aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/RTFP.txt77
-rw-r--r--Documentation/RCU/UP.txt34
-rw-r--r--Documentation/RCU/checklist.txt20
-rw-r--r--Documentation/RCU/rcu.txt10
-rw-r--r--Documentation/RCU/rcubarrier.txt7
-rw-r--r--Documentation/RCU/torture.txt23
-rw-r--r--Documentation/RCU/trace.txt7
-rw-r--r--Documentation/RCU/whatisRCU.txt22
-rw-r--r--Documentation/feature-removal-schedule.txt51
-rw-r--r--Documentation/filesystems/nfs.txt98
-rw-r--r--Documentation/kernel-parameters.txt34
-rw-r--r--Documentation/keys.txt39
-rw-r--r--Documentation/kmemleak.txt31
-rw-r--r--Documentation/s390/s390dbf.txt7
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt30
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt33
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt64
-rw-r--r--Documentation/sysctl/kernel.txt16
-rw-r--r--Documentation/trace/events.txt9
-rw-r--r--Documentation/trace/ftrace.txt68
-rw-r--r--Documentation/trace/function-graph-fold.vim42
-rw-r--r--Documentation/trace/ring-buffer-design.txt955
-rw-r--r--MAINTAINERS12
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig12
-rw-r--r--arch/alpha/include/asm/thread_info.h5
-rw-r--r--arch/alpha/kernel/signal.c8
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/signal.c8
-rw-r--r--arch/arm/mach-omap2/mcbsp.c5
-rw-r--r--arch/arm/mach-pxa/include/mach/audio.h3
-rw-r--r--arch/arm/plat-omap/dma.c10
-rw-r--r--arch/arm/plat-omap/include/mach/mcbsp.h51
-rw-r--r--arch/arm/plat-omap/mcbsp.c401
-rw-r--r--arch/arm/plat-s3c/include/plat/audio-simtec.h37
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h5
-rw-r--r--arch/avr32/include/asm/thread_info.h6
-rw-r--r--arch/avr32/kernel/entry-avr32b.S2
-rw-r--r--arch/avr32/kernel/signal.c8
-rw-r--r--arch/cris/kernel/ptrace.c8
-rw-r--r--arch/frv/kernel/signal.c2
-rw-r--r--arch/h8300/include/asm/thread_info.h2
-rw-r--r--arch/h8300/kernel/signal.c8
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/ia64/kernel/dma-mapping.c4
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/lib/ip_fast_csum.S8
-rw-r--r--arch/ia64/xen/time.c3
-rw-r--r--arch/m32r/include/asm/thread_info.h2
-rw-r--r--arch/m32r/kernel/signal.c8
-rw-r--r--arch/m68k/include/asm/entry_mm.h4
-rw-r--r--arch/m68k/include/asm/entry_no.h8
-rw-r--r--arch/m68k/include/asm/math-emu.h20
-rw-r--r--arch/m68k/include/asm/thread_info_mm.h11
-rw-r--r--arch/m68k/kernel/asm-offsets.c39
-rw-r--r--arch/m68k/kernel/entry.S22
-rw-r--r--arch/m68k/math-emu/fp_entry.S38
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mn10300/kernel/signal.c2
-rw-r--r--arch/parisc/include/asm/thread_info.h4
-rw-r--r--arch/parisc/kernel/entry.S2
-rw-r--r--arch/parisc/kernel/signal.c8
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h23
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h20
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c48
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S19
-rw-r--r--arch/powerpc/kernel/perf_callchain.c527
-rw-r--r--arch/powerpc/kernel/power7-pmu.c6
-rw-r--r--arch/powerpc/mm/slb.c37
-rw-r--r--arch/powerpc/mm/stab.c11
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c1
-rw-r--r--arch/s390/Kconfig10
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/crypto/des_s390.c11
-rw-r--r--arch/s390/crypto/sha1_s390.c26
-rw-r--r--arch/s390/crypto/sha256_s390.c26
-rw-r--r--arch/s390/crypto/sha512_s390.c36
-rw-r--r--arch/s390/defconfig2
-rw-r--r--arch/s390/hypfs/inode.c6
-rw-r--r--arch/s390/include/asm/atomic.h205
-rw-r--r--arch/s390/include/asm/checksum.h25
-rw-r--r--arch/s390/include/asm/chsc.h28
-rw-r--r--arch/s390/include/asm/cio.h223
-rw-r--r--arch/s390/include/asm/cpu.h26
-rw-r--r--arch/s390/include/asm/cpuid.h25
-rw-r--r--arch/s390/include/asm/debug.h9
-rw-r--r--arch/s390/include/asm/hardirq.h7
-rw-r--r--arch/s390/include/asm/ipl.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h6
-rw-r--r--arch/s390/include/asm/kvm_virtio.h10
-rw-r--r--arch/s390/include/asm/lowcore.h6
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/page.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/scatterlist.h20
-rw-r--r--arch/s390/include/asm/scsw.h (renamed from drivers/s390/cio/scsw.c)345
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/smp.h32
-rw-r--r--arch/s390/include/asm/spinlock.h29
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/timex.h14
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/early.c74
-rw-r--r--arch/s390/kernel/entry.S18
-rw-r--r--arch/s390/kernel/entry64.S6
-rw-r--r--arch/s390/kernel/ftrace.c36
-rw-r--r--arch/s390/kernel/head.S1
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/head64.S9
-rw-r--r--arch/s390/kernel/ipl.c166
-rw-r--r--arch/s390/kernel/mcount.S147
-rw-r--r--arch/s390/kernel/mcount64.S78
-rw-r--r--arch/s390/kernel/ptrace.c11
-rw-r--r--arch/s390/kernel/setup.c10
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c39
-rw-r--r--arch/s390/kernel/suspend.c (renamed from arch/s390/power/swsusp.c)35
-rw-r--r--arch/s390/kernel/swsusp_asm64.S (renamed from arch/s390/power/swsusp_asm64.S)2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S87
-rw-r--r--arch/s390/mm/Makefile4
-rw-r--r--arch/s390/mm/fault.c13
-rw-r--r--arch/s390/mm/page-states.c6
-rw-r--r--arch/s390/mm/pgtable.c24
-rw-r--r--arch/s390/mm/vmem.c1
-rw-r--r--arch/s390/power/Makefile8
-rw-r--r--arch/s390/power/suspend.c40
-rw-r--r--arch/s390/power/swsusp_64.c17
-rw-r--r--arch/sh/kernel/signal_32.c2
-rw-r--r--arch/sh/kernel/signal_64.c2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h145
-rw-r--r--arch/sparc/include/asm/irq_64.h4
-rw-r--r--arch/sparc/include/asm/pci.h3
-rw-r--r--arch/sparc/include/asm/pci_32.h105
-rw-r--r--arch/sparc/include/asm/pci_64.h88
-rw-r--r--arch/sparc/include/asm/spinlock_32.h12
-rw-r--r--arch/sparc/include/asm/spinlock_64.h28
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/dma.c175
-rw-r--r--arch/sparc/kernel/dma.h14
-rw-r--r--arch/sparc/kernel/iommu.c20
-rw-r--r--arch/sparc/kernel/ioport.c190
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/nmi.c2
-rw-r--r--arch/sparc/kernel/pci.c2
-rw-r--r--arch/sparc/kernel/pci_sun4v.c30
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/signal_32.c2
-rw-r--r--arch/sparc/kernel/signal_64.c3
-rw-r--r--arch/sparc/prom/misc_64.c2
-rw-r--r--arch/sparc/prom/printf.c7
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig2
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c2
-rw-r--r--arch/x86/include/asm/amd_iommu.h1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h50
-rw-r--r--arch/x86/include/asm/dma-mapping.h18
-rw-r--r--arch/x86/include/asm/ftrace.h7
-rw-r--r--arch/x86/include/asm/nmi.h4
-rw-r--r--arch/x86/include/asm/perf_counter.h10
-rw-r--r--arch/x86/include/asm/thread_info.h13
-rw-r--r--arch/x86/include/asm/topology.h47
-rw-r--r--arch/x86/include/asm/unistd_32.h2
-rw-r--r--arch/x86/include/asm/unistd_64.h6
-rw-r--r--arch/x86/kernel/amd_iommu.c489
-rw-r--r--arch/x86/kernel/amd_iommu_init.c42
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic/nmi.c20
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c329
-rw-r--r--arch/x86/kernel/ftrace.c51
-rw-r--r--arch/x86/kernel/pci-dma.c17
-rw-r--r--arch/x86/kernel/pci-gart_64.c5
-rw-r--r--arch/x86/kernel/pci-nommu.c29
-rw-r--r--arch/x86/kernel/pci-swiotlb.c25
-rw-r--r--arch/x86/kernel/ptrace.c13
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c8
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c14
-rw-r--r--arch/x86/oprofile/nmi_int.c404
-rw-r--r--arch/x86/oprofile/op_counter.h2
-rw-r--r--arch/x86/oprofile/op_model_amd.c372
-rw-r--r--arch/x86/oprofile/op_model_p4.c72
-rw-r--r--arch/x86/oprofile/op_model_ppro.c101
-rw-r--r--arch/x86/oprofile/op_x86_model.h59
-rw-r--r--arch/x86/pci/direct.c5
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--crypto/Kconfig30
-rw-r--r--crypto/Makefile5
-rw-r--r--crypto/ablkcipher.c29
-rw-r--r--crypto/aes_generic.c9
-rw-r--r--crypto/ahash.c336
-rw-r--r--crypto/algapi.c191
-rw-r--r--crypto/algboss.c5
-rw-r--r--crypto/ansi_cprng.c43
-rw-r--r--crypto/api.c54
-rw-r--r--crypto/authenc.c358
-rw-r--r--crypto/cryptd.c321
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/gcm.c580
-rw-r--r--crypto/ghash-generic.c170
-rw-r--r--crypto/hmac.c302
-rw-r--r--crypto/internal.h28
-rw-r--r--crypto/pcompress.c6
-rw-r--r--crypto/rng.c2
-rw-r--r--crypto/sha1_generic.c41
-rw-r--r--crypto/sha256_generic.c100
-rw-r--r--crypto/sha512_generic.c48
-rw-r--r--crypto/shash.c270
-rw-r--r--crypto/tcrypt.c22
-rw-r--r--crypto/testmgr.c30
-rw-r--r--crypto/testmgr.h16
-rw-r--r--crypto/vmac.c678
-rw-r--r--crypto/xcbc.c370
-rw-r--r--drivers/acpi/blacklist.c5
-rw-r--r--drivers/ata/Kconfig21
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c143
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/ata/libata-core.c44
-rw-r--r--drivers/ata/libata-eh.c146
-rw-r--r--drivers/ata/libata-pmp.c2
-rw-r--r--drivers/ata/libata-scsi.c159
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_atiixp.c1
-rw-r--r--drivers/ata/pata_cs5535.c3
-rw-r--r--drivers/ata/pata_octeon_cf.c4
-rw-r--r--drivers/ata/pata_platform.c8
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c400
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/sata_fsl.c1
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/ata/sata_sil.c13
-rw-r--r--drivers/ata/sata_sil24.c11
-rw-r--r--drivers/ata/sata_sis.c75
-rw-r--r--drivers/block/aoe/aoe.h2
-rw-r--r--drivers/block/aoe/aoeblk.c13
-rw-r--r--drivers/block/aoe/aoedev.c1
-rw-r--r--drivers/char/agp/intel-agp.c8
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c4
-rw-r--r--drivers/char/hw_random/geode-rng.c3
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/n_tty.c3
-rw-r--r--drivers/char/pty.c10
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/char/sysrq.c19
-rw-r--r--drivers/char/tpm/tpm_tis.c12
-rw-r--r--drivers/cpufreq/cpufreq.c95
-rw-r--r--drivers/crypto/Kconfig15
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c73
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h25
-rw-r--r--drivers/crypto/mv_cesa.c606
-rw-r--r--drivers/crypto/mv_cesa.h119
-rw-r--r--drivers/crypto/padlock-sha.c329
-rw-r--r--drivers/crypto/talitos.c216
-rw-r--r--drivers/crypto/talitos.h1
-rw-r--r--drivers/firewire/core-iso.c4
-rw-r--r--drivers/firewire/ohci.c14
-rw-r--r--drivers/firewire/sbp2.c8
-rw-r--r--drivers/firmware/dmi_scan.c77
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c65
-rw-r--r--drivers/gpu/drm/radeon/rs690.c64
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/ide/atiixp.c1
-rw-r--r--drivers/ide/ide-cs.c1
-rw-r--r--drivers/infiniband/core/iwcm.c1
-rw-r--r--drivers/infiniband/core/mad.c35
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/multicast.c10
-rw-r--r--drivers/infiniband/core/sa_query.c7
-rw-r--r--drivers/infiniband/core/smi.c8
-rw-r--r--drivers/infiniband/core/uverbs_main.c10
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c24
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c37
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.h9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c21
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c52
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_config_reg.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c128
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c767
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h103
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c204
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c21
-rw-r--r--drivers/input/keyboard/atkbd.c35
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-exception-store.h4
-rw-r--r--drivers/md/dm-log-userspace-base.c39
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/md/dm-log-userspace-transfer.h2
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap-persistent.c88
-rw-r--r--drivers/md/dm-snap.c23
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-table.c51
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/mtd/devices/m25p80.c2
-rw-r--r--drivers/mtd/nftlcore.c15
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h8
-rw-r--r--drivers/net/gianfar.c1
-rw-r--r--drivers/net/mlx4/cq.c1
-rw-r--r--drivers/net/mlx4/eq.c77
-rw-r--r--drivers/net/mlx4/icm.c1
-rw-r--r--drivers/net/mlx4/main.c37
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h7
-rw-r--r--drivers/net/mlx4/mr.c1
-rw-r--r--drivers/net/mlx4/pd.c1
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/tun.c22
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c120
-rw-r--r--drivers/oprofile/cpu_buffer.c16
-rw-r--r--drivers/oprofile/oprof.c71
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/oprofile_files.c46
-rw-r--r--drivers/oprofile/oprofile_stats.c5
-rw-r--r--drivers/oprofile/oprofile_stats.h1
-rw-r--r--drivers/pci/intr_remapping.c14
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci.h13
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/s390/block/dasd.c26
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_alias.c5
-rw-r--r--drivers/s390/block/dasd_diag.c5
-rw-r--r--drivers/s390/block/dasd_eckd.c47
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/block/dasd_erp.c4
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c24
-rw-r--r--drivers/s390/block/xpram.c65
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/s390/char/sclp.h4
-rw-r--r--drivers/s390/char/sclp_async.c224
-rw-r--r--drivers/s390/char/tape_34xx.c2
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/char/tape_block.c12
-rw-r--r--drivers/s390/char/tape_core.c18
-rw-r--r--drivers/s390/char/tape_std.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c19
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/cio/chsc.h24
-rw-r--r--drivers/s390/cio/cio.c56
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/css.c32
-rw-r--r--drivers/s390/cio/device.c172
-rw-r--r--drivers/s390/cio/device_fsm.c22
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c55
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c9
-rw-r--r--drivers/s390/net/smsgiucv.c6
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_init.c12
-rw-r--r--drivers/staging/comedi/comedi_fops.c8
-rw-r--r--drivers/staging/pohmelfs/inode.c9
-rw-r--r--fs/binfmt_elf.c28
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/char_dev.c40
-rw-r--r--fs/compat.c17
-rw-r--r--fs/configfs/inode.c1
-rw-r--r--fs/dcache.c1
-rw-r--r--fs/exec.c63
-rw-r--r--fs/ext2/acl.c8
-rw-r--r--fs/ext2/acl.h4
-rw-r--r--fs/ext2/file.c2
-rw-r--r--fs/ext2/namei.c8
-rw-r--r--fs/ext3/acl.c8
-rw-r--r--fs/ext3/acl.h4
-rw-r--r--fs/ext3/file.c2
-rw-r--r--fs/ext3/namei.c4
-rw-r--r--fs/ext4/acl.c8
-rw-r--r--fs/ext4/acl.h4
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/fs-writeback.c1065
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/hugetlbfs/inode.c1
-rw-r--r--fs/jffs2/acl.c7
-rw-r--r--fs/jffs2/acl.h4
-rw-r--r--fs/jffs2/dir.c2
-rw-r--r--fs/jffs2/file.c2
-rw-r--r--fs/jffs2/symlink.c2
-rw-r--r--fs/jffs2/wbuf.c10
-rw-r--r--fs/jfs/acl.c7
-rw-r--r--fs/jfs/file.c2
-rw-r--r--fs/jfs/jfs_acl.h2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/lockd/host.c14
-rw-r--r--fs/lockd/mon.c44
-rw-r--r--fs/locks.c4
-rw-r--r--fs/namei.c110
-rw-r--r--fs/nfs/Makefile3
-rw-r--r--fs/nfs/cache_lib.c140
-rw-r--r--fs/nfs/cache_lib.h27
-rw-r--r--fs/nfs/callback.c26
-rw-r--r--fs/nfs/client.c16
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/dns_resolve.c335
-rw-r--r--fs/nfs/dns_resolve.h14
-rw-r--r--fs/nfs/file.c49
-rw-r--r--fs/nfs/idmap.c6
-rw-r--r--fs/nfs/inode.c100
-rw-r--r--fs/nfs/internal.h39
-rw-r--r--fs/nfs/mount_clnt.c83
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4namespace.c24
-rw-r--r--fs/nfs/nfs4proc.c40
-rw-r--r--fs/nfs/nfs4xdr.c1460
-rw-r--r--fs/nfs/super.c451
-rw-r--r--fs/nfs/write.c91
-rw-r--r--fs/nfsd/auth.c4
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/nfs4idmap.c20
-rw-r--r--fs/nfsd/nfsctl.c21
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/btnode.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dcache.c11
-rw-r--r--fs/ocfs2/dlm/dlmfs.c1
-rw-r--r--fs/open.c12
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/super.c5
-rw-r--r--fs/sync.c20
-rw-r--r--fs/sysfs/dir.c1
-rw-r--r--fs/sysfs/inode.c135
-rw-r--r--fs/sysfs/symlink.c2
-rw-r--r--fs/sysfs/sysfs.h12
-rw-r--r--fs/ubifs/budget.c16
-rw-r--r--fs/ubifs/super.c9
-rw-r--r--fs/xattr.c55
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c16
-rw-r--r--include/asm-generic/dma-mapping-common.h6
-rw-r--r--include/crypto/algapi.h38
-rw-r--r--include/crypto/cryptd.h17
-rw-r--r--include/crypto/hash.h147
-rw-r--r--include/crypto/internal/hash.h147
-rw-r--r--include/crypto/internal/skcipher.h4
-rw-r--r--include/crypto/sha.h20
-rw-r--r--include/crypto/vmac.h61
-rw-r--r--include/linux/ata.h36
-rw-r--r--include/linux/backing-dev.h55
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/cpu.h17
-rw-r--r--include/linux/cred.h69
-rw-r--r--include/linux/crypto.h43
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/dm-log-userspace.h13
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/dmi.h13
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/ftrace_event.h51
-rw-r--r--include/linux/hardirq.h10
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqnr.h6
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/keyctl.h1
-rw-r--r--include/linux/kmemcheck.h7
-rw-r--r--include/linux/kmemleak.h18
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lockdep.h18
-rw-r--r--include/linux/lsm_audit.h12
-rw-r--r--include/linux/module.h14
-rw-r--r--include/linux/nfs4.h1
-rw-r--r--include/linux/nfs_fs_sb.h9
-rw-r--r--include/linux/nmi.h19
-rw-r--r--include/linux/oprofile.h5
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/perf_counter.h7
-rw-r--r--include/linux/rcuclassic.h178
-rw-r--r--include/linux/rcupdate.h98
-rw-r--r--include/linux/rcupreempt.h127
-rw-r--r--include/linux/rcupreempt_trace.h97
-rw-r--r--include/linux/rcutree.h262
-rw-r--r--include/linux/ring_buffer.h24
-rw-r--r--include/linux/sched.h129
-rw-r--r--include/linux/security.h154
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/spinlock.h64
-rw-r--r--include/linux/spinlock_api_smp.h394
-rw-r--r--include/linux/sunrpc/cache.h40
-rw-r--r--include/linux/sunrpc/clnt.h43
-rw-r--r--include/linux/sunrpc/msg_prot.h17
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h20
-rw-r--r--include/linux/sunrpc/xdr.h10
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/syscalls.h131
-rw-r--r--include/linux/topology.h168
-rw-r--r--include/linux/tracepoint.h29
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/workqueue.h15
-rw-r--r--include/linux/writeback.h23
-rw-r--r--include/linux/xattr.h1
-rw-r--r--include/net/pkt_sched.h4
-rw-r--r--include/sound/ac97_codec.h9
-rw-r--r--include/sound/asound.h2
-rw-r--r--include/sound/core.h51
-rw-r--r--include/sound/info.h4
-rw-r--r--include/sound/memalloc.h6
-rw-r--r--include/sound/pcm.h25
-rw-r--r--include/sound/sh_fsi.h83
-rw-r--r--include/sound/soc-dai.h40
-rw-r--r--include/sound/soc-dapm.h10
-rw-r--r--include/sound/soc.h49
-rw-r--r--include/sound/tlv.h14
-rw-r--r--include/sound/uda1380.h22
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8993.h44
-rw-r--r--include/sound/ymfpci.h1
-rw-r--r--include/trace/define_trace.h7
-rw-r--r--include/trace/events/module.h126
-rw-r--r--include/trace/events/sched.h107
-rw-r--r--include/trace/events/syscalls.h70
-rw-r--r--include/trace/ftrace.h93
-rw-r--r--include/trace/syscall.h48
-rw-r--r--init/Kconfig46
-rw-r--r--init/main.c4
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/acct.c8
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/cred.c293
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/futex.c47
-rw-r--r--kernel/irq/chip.c74
-rw-r--r--kernel/irq/handle.c5
-rw-r--r--kernel/irq/internals.h13
-rw-r--r--kernel/irq/manage.c102
-rw-r--r--kernel/irq/pm.c8
-rw-r--r--kernel/irq/resend.c3
-rw-r--r--kernel/irq/spurious.c1
-rw-r--r--kernel/kmod.c9
-rw-r--r--kernel/kprobes.c30
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/lockdep.c792
-rw-r--r--kernel/lockdep_internals.h2
-rw-r--r--kernel/lockdep_proc.c128
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/perf_counter.c176
-rw-r--r--kernel/printk.c175
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c807
-rw-r--r--kernel/rcupdate.c44
-rw-r--r--kernel/rcupreempt.c1539
-rw-r--r--kernel/rcupreempt_trace.c334
-rw-r--r--kernel/rcutorture.c202
-rw-r--r--kernel/rcutree.c280
-rw-r--r--kernel/rcutree.h253
-rw-r--r--kernel/rcutree_plugin.h532
-rw-r--r--kernel/rcutree_trace.c88
-rw-r--r--kernel/sched.c1232
-rw-r--r--kernel/sched_cpupri.c30
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c84
-rw-r--r--kernel/sched_features.h2
-rw-r--r--kernel/sched_rt.c62
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/spinlock.c230
-rw-r--r--kernel/sysctl.c25
-rw-r--r--kernel/timer.c3
-rw-r--r--kernel/trace/Kconfig13
-rw-r--r--kernel/trace/blktrace.c12
-rw-r--r--kernel/trace/ftrace.c107
-rw-r--r--kernel/trace/kmemtrace.c149
-rw-r--r--kernel/trace/ring_buffer.c1112
-rw-r--r--kernel/trace/trace.c679
-rw-r--r--kernel/trace/trace.h76
-rw-r--r--kernel/trace/trace_boot.c16
-rw-r--r--kernel/trace/trace_events.c146
-rw-r--r--kernel/trace/trace_events_filter.c261
-rw-r--r--kernel/trace/trace_export.c28
-rw-r--r--kernel/trace/trace_functions.c4
-rw-r--r--kernel/trace/trace_functions_graph.c166
-rw-r--r--kernel/trace/trace_irqsoff.c3
-rw-r--r--kernel/trace/trace_mmiotrace.c10
-rw-r--r--kernel/trace/trace_power.c22
-rw-r--r--kernel/trace/trace_sched_switch.c59
-rw-r--r--kernel/trace/trace_sched_wakeup.c7
-rw-r--r--kernel/trace/trace_selftest.c1
-rw-r--r--kernel/trace/trace_stack.c43
-rw-r--r--kernel/trace/trace_stat.c17
-rw-r--r--kernel/trace/trace_stat.h2
-rw-r--r--kernel/trace/trace_syscalls.c471
-rw-r--r--kernel/trace/trace_workqueue.c32
-rw-r--r--kernel/tracepoint.c50
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/is_single_threaded.c61
-rw-r--r--lib/swiotlb.c124
-rw-r--r--mm/Makefile2
-rw-r--r--mm/backing-dev.c381
-rw-r--r--mm/bootmem.c6
-rw-r--r--mm/kmemleak.c336
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/page-writeback.c182
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/pdflush.c269
-rw-r--r--mm/percpu.c15
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/shmem_acl.c11
-rw-r--r--mm/slub.c4
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/sched/sch_api.c12
-rw-r--r--net/sched/sch_cbq.c25
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/addr.c364
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c7
-rw-r--r--net/sunrpc/cache.c622
-rw-r--r--net/sunrpc/clnt.c60
-rw-r--r--net/sunrpc/rpc_pipe.c685
-rw-r--r--net/sunrpc/rpcb_clnt.c420
-rw-r--r--net/sunrpc/sunrpc_syms.c2
-rw-r--r--net/sunrpc/svcauth_unix.c14
-rw-r--r--net/sunrpc/timer.c45
-rw-r--r--net/sunrpc/xdr.c12
-rw-r--r--net/sunrpc/xprtrdma/transport.c48
-rw-r--r--net/sunrpc/xprtsock.c287
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--security/Makefile4
-rw-r--r--security/capability.c63
-rw-r--r--security/commoncap.c4
-rw-r--r--security/integrity/ima/ima_main.c6
-rw-r--r--security/keys/Makefile1
-rw-r--r--security/keys/compat.c3
-rw-r--r--security/keys/gc.c194
-rw-r--r--security/keys/internal.h10
-rw-r--r--security/keys/key.c24
-rw-r--r--security/keys/keyctl.c161
-rw-r--r--security/keys/keyring.c85
-rw-r--r--security/keys/proc.c93
-rw-r--r--security/keys/process_keys.c69
-rw-r--r--security/keys/sysctl.c28
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/security.c62
-rw-r--r--security/selinux/avc.c205
-rw-r--r--security/selinux/hooks.c318
-rw-r--r--security/selinux/include/av_inherit.h1
-rw-r--r--security/selinux/include/av_perm_to_string.h1
-rw-r--r--security/selinux/include/av_permissions.h23
-rw-r--r--security/selinux/include/avc.h55
-rw-r--r--security/selinux/include/class_to_string.h1
-rw-r--r--security/selinux/include/flask.h1
-rw-r--r--security/selinux/include/netlabel.h4
-rw-r--r--security/selinux/include/xfrm.h8
-rw-r--r--security/selinux/netlabel.c2
-rw-r--r--security/selinux/ss/services.c142
-rw-r--r--security/selinux/xfrm.c4
-rw-r--r--security/smack/smack.h2
-rw-r--r--security/smack/smack_access.c11
-rw-r--r--security/smack/smack_lsm.c65
-rw-r--r--security/tomoyo/common.c30
-rw-r--r--security/tomoyo/common.h2
-rw-r--r--security/tomoyo/domain.c42
-rw-r--r--security/tomoyo/tomoyo.c27
-rw-r--r--security/tomoyo/tomoyo.h3
-rw-r--r--sound/Kconfig28
-rw-r--r--sound/arm/pxa2xx-ac97.c10
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c3
-rw-r--r--sound/core/Kconfig4
-rw-r--r--sound/core/Makefile2
-rw-r--r--sound/core/control.c34
-rw-r--r--sound/core/info.c8
-rw-r--r--sound/core/init.c8
-rw-r--r--sound/core/memalloc.c4
-rw-r--r--sound/core/misc.c75
-rw-r--r--sound/core/oss/mixer_oss.c3
-rw-r--r--sound/core/oss/pcm_oss.c12
-rw-r--r--sound/core/pcm.c26
-rw-r--r--sound/core/pcm_lib.c12
-rw-r--r--sound/core/pcm_memory.c2
-rw-r--r--sound/core/pcm_native.c64
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c14
-rw-r--r--sound/core/seq/seq_midi.c7
-rw-r--r--sound/core/vmaster.c8
-rw-r--r--sound/drivers/dummy.c700
-rw-r--r--sound/isa/cmi8330.c86
-rw-r--r--sound/oss/midibuf.c7
-rw-r--r--sound/oss/vwsnd.c6
-rw-r--r--sound/pci/Kconfig4
-rw-r--r--sound/pci/ali5451/ali5451.c65
-rw-r--r--sound/pci/azt3328.c1116
-rw-r--r--sound/pci/azt3328.h103
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.h2
-rw-r--r--sound/pci/ctxfi/ct20k2reg.h9
-rw-r--r--sound/pci/ctxfi/ctamixer.c20
-rw-r--r--sound/pci/ctxfi/ctatc.c77
-rw-r--r--sound/pci/ctxfi/ctdaio.c30
-rw-r--r--sound/pci/ctxfi/cthw20k1.c22
-rw-r--r--sound/pci/ctxfi/cthw20k2.c73
-rw-r--r--sound/pci/ctxfi/ctmixer.c8
-rw-r--r--sound/pci/ctxfi/ctpcm.c6
-rw-r--r--sound/pci/ctxfi/ctresource.c4
-rw-r--r--sound/pci/ctxfi/ctsrc.c10
-rw-r--r--sound/pci/ctxfi/ctvmem.c6
-rw-r--r--sound/pci/hda/Kconfig27
-rw-r--r--sound/pci/hda/Makefile4
-rw-r--r--sound/pci/hda/hda_beep.c4
-rw-r--r--sound/pci/hda/hda_codec.c68
-rw-r--r--sound/pci/hda/hda_codec.h10
-rw-r--r--sound/pci/hda/hda_generic.c18
-rw-r--r--sound/pci/hda/hda_hwdep.c236
-rw-r--r--sound/pci/hda/hda_intel.c74
-rw-r--r--sound/pci/hda/hda_local.h14
-rw-r--r--sound/pci/hda/hda_proc.c7
-rw-r--r--sound/pci/hda/patch_analog.c131
-rw-r--r--sound/pci/hda/patch_atihdmi.c3
-rw-r--r--sound/pci/hda/patch_ca0110.c3
-rw-r--r--sound/pci/hda/patch_cirrus.c1194
-rw-r--r--sound/pci/hda/patch_cmedia.c3
-rw-r--r--sound/pci/hda/patch_conexant.c479
-rw-r--r--sound/pci/hda/patch_intelhdmi.c104
-rw-r--r--sound/pci/hda/patch_nvhdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c4144
-rw-r--r--sound/pci/hda/patch_sigmatel.c1206
-rw-r--r--sound/pci/hda/patch_via.c3
-rw-r--r--sound/pci/ice1712/ice1712.h9
-rw-r--r--sound/pci/ice1712/ice1724.c112
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c46
-rw-r--r--sound/pci/oxygen/oxygen_io.c11
-rw-r--r--sound/pci/oxygen/oxygen_lib.c3
-rw-r--r--sound/pci/oxygen/oxygen_pcm.c2
-rw-r--r--sound/pci/rme9652/hdsp.c39
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c20
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile3
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c138
-rw-r--r--sound/soc/au1x/psc-ac97.c129
-rw-r--r--sound/soc/au1x/psc.h1
-rw-r--r--sound/soc/blackfin/Kconfig31
-rw-r--r--sound/soc/blackfin/Makefile8
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c10
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c128
-rw-r--r--sound/soc/blackfin/bf5xx-ad1938.c142
-rw-r--r--sound/soc/blackfin/bf5xx-ad73311.c16
-rw-r--r--sound/soc/blackfin/bf5xx-i2s.c8
-rw-r--r--sound/soc/blackfin/bf5xx-ssm2602.c16
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.c330
-rw-r--r--sound/soc/blackfin/bf5xx-tdm-pcm.h21
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.c343
-rw-r--r--sound/soc/blackfin/bf5xx-tdm.h14
-rw-r--r--sound/soc/codecs/Kconfig44
-rw-r--r--sound/soc/codecs/Makefile26
-rw-r--r--sound/soc/codecs/ad1836.c446
-rw-r--r--sound/soc/codecs/ad1836.h64
-rw-r--r--sound/soc/codecs/ad1938.c682
-rw-r--r--sound/soc/codecs/ad1938.h100
-rw-r--r--sound/soc/codecs/ak4535.c16
-rw-r--r--sound/soc/codecs/ak4642.c502
-rw-r--r--sound/soc/codecs/ak4642.h20
-rw-r--r--sound/soc/codecs/cs4270.c27
-rw-r--r--sound/soc/codecs/cx20442.c501
-rw-r--r--sound/soc/codecs/cx20442.h20
-rw-r--r--sound/soc/codecs/max9877.c308
-rw-r--r--sound/soc/codecs/max9877.h37
-rw-r--r--sound/soc/codecs/spdif_transciever.c3
-rw-r--r--sound/soc/codecs/stac9766.c4
-rw-r--r--sound/soc/codecs/tlv320aic3x.c233
-rw-r--r--sound/soc/codecs/tlv320aic3x.h2
-rw-r--r--sound/soc/codecs/twl4030.c260
-rw-r--r--sound/soc/codecs/twl4030.h2
-rw-r--r--sound/soc/codecs/uda134x.c2
-rw-r--r--sound/soc/codecs/uda1380.c313
-rw-r--r--sound/soc/codecs/uda1380.h8
-rw-r--r--sound/soc/codecs/wm8350.c51
-rw-r--r--sound/soc/codecs/wm8400.c26
-rw-r--r--sound/soc/codecs/wm8510.c175
-rw-r--r--sound/soc/codecs/wm8523.c699
-rw-r--r--sound/soc/codecs/wm8523.h160
-rw-r--r--sound/soc/codecs/wm8580.c211
-rw-r--r--sound/soc/codecs/wm8728.c111
-rw-r--r--sound/soc/codecs/wm8731.c218
-rw-r--r--sound/soc/codecs/wm8750.c154
-rw-r--r--sound/soc/codecs/wm8753.c35
-rw-r--r--sound/soc/codecs/wm8776.c744
-rw-r--r--sound/soc/codecs/wm8776.h51
-rw-r--r--sound/soc/codecs/wm8900.c345
-rw-r--r--sound/soc/codecs/wm8903.c267
-rw-r--r--sound/soc/codecs/wm8940.c160
-rw-r--r--sound/soc/codecs/wm8960.c233
-rw-r--r--sound/soc/codecs/wm8961.c1265
-rw-r--r--sound/soc/codecs/wm8961.h866
-rw-r--r--sound/soc/codecs/wm8971.c127
-rw-r--r--sound/soc/codecs/wm8974.c808
-rw-r--r--sound/soc/codecs/wm8974.h99
-rw-r--r--sound/soc/codecs/wm8988.c180
-rw-r--r--sound/soc/codecs/wm8990.c194
-rw-r--r--sound/soc/codecs/wm8993.c1675
-rw-r--r--sound/soc/codecs/wm8993.h2132
-rw-r--r--sound/soc/codecs/wm9081.c317
-rw-r--r--sound/soc/codecs/wm9705.c2
-rw-r--r--sound/soc/codecs/wm_hubs.c743
-rw-r--r--sound/soc/codecs/wm_hubs.h24
-rw-r--r--sound/soc/davinci/Kconfig33
-rw-r--r--sound/soc/davinci/Makefile5
-rw-r--r--sound/soc/davinci/davinci-evm.c140
-rw-r--r--sound/soc/davinci/davinci-i2s.c340
-rw-r--r--sound/soc/davinci/davinci-mcasp.c973
-rw-r--r--sound/soc/davinci/davinci-mcasp.h60
-rw-r--r--sound/soc/davinci/davinci-pcm.c10
-rw-r--r--sound/soc/davinci/davinci-pcm.h19
-rw-r--r--sound/soc/fsl/mpc5200_dma.c17
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c3
-rw-r--r--sound/soc/imx/Kconfig21
-rw-r--r--sound/soc/imx/Makefile10
-rw-r--r--sound/soc/imx/mx1_mx2-pcm.c488
-rw-r--r--sound/soc/imx/mx1_mx2-pcm.h26
-rw-r--r--sound/soc/imx/mx27vis_wm8974.c317
-rw-r--r--sound/soc/imx/mxc-ssi.c868
-rw-r--r--sound/soc/imx/mxc-ssi.h238
-rw-r--r--sound/soc/omap/Kconfig15
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/ams-delta.c646
-rw-r--r--sound/soc/omap/n810.c12
-rw-r--r--sound/soc/omap/omap-mcbsp.c123
-rw-r--r--sound/soc/omap/omap-mcbsp.h4
-rw-r--r--sound/soc/omap/omap-pcm.c53
-rw-r--r--sound/soc/omap/omap-pcm.h2
-rw-r--r--sound/soc/omap/sdp3430.c18
-rw-r--r--sound/soc/omap/zoom2.c314
-rw-r--r--sound/soc/pxa/magician.c56
-rw-r--r--sound/soc/pxa/palm27x.c204
-rw-r--r--sound/soc/pxa/pxa-ssp.c77
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c12
-rw-r--r--sound/soc/s3c24xx/Kconfig35
-rw-r--r--sound/soc/s3c24xx/Makefile9
-rw-r--r--sound/soc/s3c24xx/neo1973_gta02_wm8753.c498
-rw-r--r--sound/soc/s3c24xx/s3c-i2s-v2.c17
-rw-r--r--sound/soc/s3c24xx/s3c2443-ac97.c20
-rw-r--r--sound/soc/s3c24xx/s3c24xx-i2s.c5
-rw-r--r--sound/soc/s3c24xx/s3c24xx-pcm.c2
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.c394
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec.h22
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_hermes.c153
-rw-r--r--sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c137
-rw-r--r--sound/soc/s6000/s6105-ipcam.c12
-rw-r--r--sound/soc/sh/Kconfig15
-rw-r--r--sound/soc/sh/Makefile4
-rw-r--r--sound/soc/sh/fsi-ak4642.c107
-rw-r--r--sound/soc/sh/fsi.c1004
-rw-r--r--sound/soc/soc-cache.c218
-rw-r--r--sound/soc/soc-core.c148
-rw-r--r--sound/soc/soc-dapm.c498
-rw-r--r--sound/soc/soc-jack.c24
-rw-r--r--sound/soc/txx9/txx9aclc.c10
-rw-r--r--sound/sound_core.c100
-rw-r--r--sound/usb/usbaudio.c6
-rw-r--r--sound/usb/usbmidi.c290
-rw-r--r--sound/usb/usbmixer.c73
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Makefile39
-rw-r--r--tools/perf/builtin-annotate.c472
-rw-r--r--tools/perf/builtin-help.c1
-rw-r--r--tools/perf/builtin-record.c38
-rw-r--r--tools/perf/builtin-report.c719
-rw-r--r--tools/perf/builtin-stat.c239
-rw-r--r--tools/perf/builtin-top.c66
-rw-r--r--tools/perf/builtin-trace.c297
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/util/abspath.c3
-rw-r--r--tools/perf/util/cache.h1
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/color.c16
-rw-r--r--tools/perf/util/color.h3
-rw-r--r--tools/perf/util/config.c22
-rw-r--r--tools/perf/util/debug.c95
-rw-r--r--tools/perf/util/debug.h8
-rw-r--r--tools/perf/util/event.h96
-rw-r--r--tools/perf/util/exec_cmd.c1
-rw-r--r--tools/perf/util/header.c37
-rw-r--r--tools/perf/util/header.h4
-rw-r--r--tools/perf/util/map.c97
-rw-r--r--tools/perf/util/module.c4
-rw-r--r--tools/perf/util/parse-events.c147
-rw-r--r--tools/perf/util/parse-events.h17
-rw-r--r--tools/perf/util/parse-options.c22
-rw-r--r--tools/perf/util/path.c25
-rw-r--r--tools/perf/util/run-command.c6
-rw-r--r--tools/perf/util/symbol.c199
-rw-r--r--tools/perf/util/symbol.h14
-rw-r--r--tools/perf/util/thread.c175
-rw-r--r--tools/perf/util/thread.h21
-rw-r--r--tools/perf/util/trace-event-info.c539
-rw-r--r--tools/perf/util/trace-event-parse.c2942
-rw-r--r--tools/perf/util/trace-event-read.c512
-rw-r--r--tools/perf/util/trace-event.h240
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/perf/util/values.c230
-rw-r--r--tools/perf/util/values.h27
970 files changed, 64660 insertions, 23652 deletions
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 9f711d2df91b..d2b85237c76e 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -743,3 +743,80 @@ Revised:
743 RCU, realtime RCU, sleepable RCU, performance. 743 RCU, realtime RCU, sleepable RCU, performance.
744" 744"
745} 745}
746
747@article{PaulEMcKenney2008RCUOSR
748,author="Paul E. McKenney and Jonathan Walpole"
749,title="Introducing technology into the {Linux} kernel: a case study"
750,Year="2008"
751,journal="SIGOPS Oper. Syst. Rev."
752,volume="42"
753,number="5"
754,pages="4--17"
755,issn="0163-5980"
756,doi={http://doi.acm.org/10.1145/1400097.1400099}
757,publisher="ACM"
758,address="New York, NY, USA"
759,annotation={
760 Linux changed RCU to a far greater degree than RCU has changed Linux.
761}
762}
763
764@unpublished{PaulEMcKenney2008HierarchicalRCU
765,Author="Paul E. McKenney"
766,Title="Hierarchical {RCU}"
767,month="November"
768,day="3"
769,year="2008"
770,note="Available:
771\url{http://lwn.net/Articles/305782/}
772[Viewed November 6, 2008]"
773,annotation="
774 RCU with combining-tree-based grace-period detection,
775 permitting it to handle thousands of CPUs.
776"
777}
778
779@conference{PaulEMcKenney2009MaliciousURCU
780,Author="Paul E. McKenney"
781,Title="Using a Malicious User-Level {RCU} to Torture {RCU}-Based Algorithms"
782,Booktitle="linux.conf.au 2009"
783,month="January"
784,year="2009"
785,address="Hobart, Australia"
786,note="Available:
787\url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf}
788[Viewed February 2, 2009]"
789,annotation="
790 Realtime RCU and torture-testing RCU uses.
791"
792}
793
794@unpublished{MathieuDesnoyers2009URCU
795,Author="Mathieu Desnoyers"
796,Title="[{RFC} git tree] Userspace {RCU} (urcu) for {Linux}"
797,month="February"
798,day="5"
799,year="2009"
800,note="Available:
801\url{http://lkml.org/lkml/2009/2/5/572}
802\url{git://lttng.org/userspace-rcu.git}
803[Viewed February 20, 2009]"
804,annotation="
805 Mathieu Desnoyers's user-space RCU implementation.
806 git://lttng.org/userspace-rcu.git
807"
808}
809
810@unpublished{PaulEMcKenney2009BloatWatchRCU
811,Author="Paul E. McKenney"
812,Title="{RCU}: The {Bloatwatch} Edition"
813,month="March"
814,day="17"
815,year="2009"
816,note="Available:
817\url{http://lwn.net/Articles/323929/}
818[Viewed March 20, 2009]"
819,annotation="
820 Uniprocessor assumptions allow simplified RCU implementation.
821"
822}
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt
index aab4a9ec3931..90ec5341ee98 100644
--- a/Documentation/RCU/UP.txt
+++ b/Documentation/RCU/UP.txt
@@ -2,14 +2,13 @@ RCU on Uniprocessor Systems
2 2
3 3
4A common misconception is that, on UP systems, the call_rcu() primitive 4A common misconception is that, on UP systems, the call_rcu() primitive
5may immediately invoke its function, and that the synchronize_rcu() 5may immediately invoke its function. The basis of this misconception
6primitive may return immediately. The basis of this misconception
7is that since there is only one CPU, it should not be necessary to 6is that since there is only one CPU, it should not be necessary to
8wait for anything else to get done, since there are no other CPUs for 7wait for anything else to get done, since there are no other CPUs for
9anything else to be happening on. Although this approach will -sort- -of- 8anything else to be happening on. Although this approach will -sort- -of-
10work a surprising amount of the time, it is a very bad idea in general. 9work a surprising amount of the time, it is a very bad idea in general.
11This document presents three examples that demonstrate exactly how bad an 10This document presents three examples that demonstrate exactly how bad
12idea this is. 11an idea this is.
13 12
14 13
15Example 1: softirq Suicide 14Example 1: softirq Suicide
@@ -82,11 +81,18 @@ Quick Quiz #2: What locking restriction must RCU callbacks respect?
82 81
83Summary 82Summary
84 83
85Permitting call_rcu() to immediately invoke its arguments or permitting 84Permitting call_rcu() to immediately invoke its arguments breaks RCU,
86synchronize_rcu() to immediately return breaks RCU, even on a UP system. 85even on a UP system. So do not do it! Even on a UP system, the RCU
87So do not do it! Even on a UP system, the RCU infrastructure -must- 86infrastructure -must- respect grace periods, and -must- invoke callbacks
88respect grace periods, and -must- invoke callbacks from a known environment 87from a known environment in which no locks are held.
89in which no locks are held. 88
89It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
90immediately on an UP system. It is also safe for synchronize_rcu()
91to return immediately on UP systems, except when running preemptable
92RCU.
93
94Quick Quiz #3: Why can't synchronize_rcu() return immediately on
95 UP systems running preemptable RCU?
90 96
91 97
92Answer to Quick Quiz #1: 98Answer to Quick Quiz #1:
@@ -117,3 +123,13 @@ Answer to Quick Quiz #2:
117 callbacks acquire locks directly. However, a great many RCU 123 callbacks acquire locks directly. However, a great many RCU
118 callbacks do acquire locks -indirectly-, for example, via 124 callbacks do acquire locks -indirectly-, for example, via
119 the kfree() primitive. 125 the kfree() primitive.
126
127Answer to Quick Quiz #3:
128 Why can't synchronize_rcu() return immediately on UP systems
129 running preemptable RCU?
130
131 Because some other task might have been preempted in the middle
132 of an RCU read-side critical section. If synchronize_rcu()
133 simply immediately returned, it would prematurely signal the
134 end of the grace period, which would come as a nasty shock to
135 that other thread when it started running again.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index accfe2f5247d..51525a30e8b4 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -11,7 +11,10 @@ over a rather long period of time, but improvements are always welcome!
11 structure is updated more than about 10% of the time, then 11 structure is updated more than about 10% of the time, then
12 you should strongly consider some other approach, unless 12 you should strongly consider some other approach, unless
13 detailed performance measurements show that RCU is nonetheless 13 detailed performance measurements show that RCU is nonetheless
14 the right tool for the job. 14 the right tool for the job. Yes, you might think of RCU
15 as simply cutting overhead off of the readers and imposing it
16 on the writers. That is exactly why normal uses of RCU will
17 do much more reading than updating.
15 18
16 Another exception is where performance is not an issue, and RCU 19 Another exception is where performance is not an issue, and RCU
17 provides a simpler implementation. An example of this situation 20 provides a simpler implementation. An example of this situation
@@ -240,10 +243,11 @@ over a rather long period of time, but improvements are always welcome!
240 instead need to use synchronize_irq() or synchronize_sched(). 243 instead need to use synchronize_irq() or synchronize_sched().
241 244
24212. Any lock acquired by an RCU callback must be acquired elsewhere 24512. Any lock acquired by an RCU callback must be acquired elsewhere
243 with irq disabled, e.g., via spin_lock_irqsave(). Failing to 246 with softirq disabled, e.g., via spin_lock_irqsave(),
244 disable irq on a given acquisition of that lock will result in 247 spin_lock_bh(), etc. Failing to disable irq on a given
245 deadlock as soon as the RCU callback happens to interrupt that 248 acquisition of that lock will result in deadlock as soon as the
246 acquisition's critical section. 249 RCU callback happens to interrupt that acquisition's critical
250 section.
247 251
24813. RCU callbacks can be and are executed in parallel. In many cases, 25213. RCU callbacks can be and are executed in parallel. In many cases,
249 the callback code simply wrappers around kfree(), so that this 253 the callback code simply wrappers around kfree(), so that this
@@ -310,3 +314,9 @@ over a rather long period of time, but improvements are always welcome!
310 Because these primitives only wait for pre-existing readers, 314 Because these primitives only wait for pre-existing readers,
311 it is the caller's responsibility to guarantee safety to 315 it is the caller's responsibility to guarantee safety to
312 any subsequent readers. 316 any subsequent readers.
317
31816. The various RCU read-side primitives do -not- contain memory
319 barriers. The CPU (and in some cases, the compiler) is free
320 to reorder code into and out of RCU read-side critical sections.
321 It is the responsibility of the RCU update-side primitives to
322 deal with this.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 7aa2002ade77..2a23523ce471 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -36,7 +36,7 @@ o How can the updater tell when a grace period has completed
36 executed in user mode, or executed in the idle loop, we can 36 executed in user mode, or executed in the idle loop, we can
37 safely free up that item. 37 safely free up that item.
38 38
39 Preemptible variants of RCU (CONFIG_PREEMPT_RCU) get the 39 Preemptible variants of RCU (CONFIG_TREE_PREEMPT_RCU) get the
40 same effect, but require that the readers manipulate CPU-local 40 same effect, but require that the readers manipulate CPU-local
41 counters. These counters allow limited types of blocking 41 counters. These counters allow limited types of blocking
42 within RCU read-side critical sections. SRCU also uses 42 within RCU read-side critical sections. SRCU also uses
@@ -79,10 +79,10 @@ o I hear that RCU is patented? What is with that?
79o I hear that RCU needs work in order to support realtime kernels? 79o I hear that RCU needs work in order to support realtime kernels?
80 80
81 This work is largely completed. Realtime-friendly RCU can be 81 This work is largely completed. Realtime-friendly RCU can be
82 enabled via the CONFIG_PREEMPT_RCU kernel configuration parameter. 82 enabled via the CONFIG_TREE_PREEMPT_RCU kernel configuration
83 However, work is in progress for enabling priority boosting of 83 parameter. However, work is in progress for enabling priority
84 preempted RCU read-side critical sections. This is needed if you 84 boosting of preempted RCU read-side critical sections. This is
85 have CPU-bound realtime threads. 85 needed if you have CPU-bound realtime threads.
86 86
87o Where can I find more information on RCU? 87o Where can I find more information on RCU?
88 88
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 909602d409bb..e439a0edee22 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -170,6 +170,13 @@ module invokes call_rcu() from timers, you will need to first cancel all
170the timers, and only then invoke rcu_barrier() to wait for any remaining 170the timers, and only then invoke rcu_barrier() to wait for any remaining
171RCU callbacks to complete. 171RCU callbacks to complete.
172 172
173Of course, if you module uses call_rcu_bh(), you will need to invoke
174rcu_barrier_bh() before unloading. Similarly, if your module uses
175call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
176unloading. If your module uses call_rcu(), call_rcu_bh(), -and-
177call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
178rcu_barrier_bh(), and rcu_barrier_sched().
179
173 180
174Implementing rcu_barrier() 181Implementing rcu_barrier()
175 182
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index a342b6e1cc10..9dba3bb90e60 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -76,8 +76,10 @@ torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API,
76 "rcu_sync" for rcu_read_lock() with synchronous reclamation, 76 "rcu_sync" for rcu_read_lock() with synchronous reclamation,
77 "rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for 77 "rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for
78 rcu_read_lock_bh() with synchronous reclamation, "srcu" for 78 rcu_read_lock_bh() with synchronous reclamation, "srcu" for
79 the "srcu_read_lock()" API, and "sched" for the use of 79 the "srcu_read_lock()" API, "sched" for the use of
80 preempt_disable() together with synchronize_sched(). 80 preempt_disable() together with synchronize_sched(),
81 and "sched_expedited" for the use of preempt_disable()
82 with synchronize_sched_expedited().
81 83
82verbose Enable debug printk()s. Default is disabled. 84verbose Enable debug printk()s. Default is disabled.
83 85
@@ -162,6 +164,23 @@ of the "old" and "current" counters for the corresponding CPU. The
162"idx" value maps the "old" and "current" values to the underlying array, 164"idx" value maps the "old" and "current" values to the underlying array,
163and is useful for debugging. 165and is useful for debugging.
164 166
167Similarly, sched_expedited RCU provides the following:
168
169 sched_expedited-torture: rtc: d0000000016c1880 ver: 1090796 tfle: 0 rta: 1090796 rtaf: 0 rtf: 1090787 rtmbe: 0 nt: 27713319
170 sched_expedited-torture: Reader Pipe: 12660320201 95875 0 0 0 0 0 0 0 0 0
171 sched_expedited-torture: Reader Batch: 12660424885 0 0 0 0 0 0 0 0 0 0
172 sched_expedited-torture: Free-Block Circulation: 1090795 1090795 1090794 1090793 1090792 1090791 1090790 1090789 1090788 1090787 0
173 state: -1 / 0:0 3:0 4:0
174
175As before, the first four lines are similar to those for RCU.
176The last line shows the task-migration state. The first number is
177-1 if synchronize_sched_expedited() is idle, -2 if in the process of
178posting wakeups to the migration kthreads, and N when waiting on CPU N.
179Each of the colon-separated fields following the "/" is a CPU:state pair.
180Valid states are "0" for idle, "1" for waiting for quiescent state,
181"2" for passed through quiescent state, and "3" when a race with a
182CPU-hotplug event forces use of the synchronize_sched() primitive.
183
165 184
166USAGE 185USAGE
167 186
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 02cced183b2d..187bbf10c923 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -191,8 +191,7 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy).
191 191
192The output of "cat rcu/rcudata" looks as follows: 192The output of "cat rcu/rcudata" looks as follows:
193 193
194rcu: 194rcu_sched:
195rcu:
196 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10 195 0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10
197 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10 196 1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10
198 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10 197 2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10
@@ -306,7 +305,7 @@ comma-separated-variable spreadsheet format.
306 305
307The output of "cat rcu/rcugp" looks as follows: 306The output of "cat rcu/rcugp" looks as follows:
308 307
309rcu: completed=33062 gpnum=33063 308rcu_sched: completed=33062 gpnum=33063
310rcu_bh: completed=464 gpnum=464 309rcu_bh: completed=464 gpnum=464
311 310
312Again, this output is for both "rcu" and "rcu_bh". The fields are 311Again, this output is for both "rcu" and "rcu_bh". The fields are
@@ -413,7 +412,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
413 412
414The output of "cat rcu/rcu_pending" looks as follows: 413The output of "cat rcu/rcu_pending" looks as follows:
415 414
416rcu: 415rcu_sched:
417 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 416 0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
418 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 417 1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
419 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 418 2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 96170824a717..e41a7fecf0d3 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -136,10 +136,10 @@ rcu_read_lock()
136 Used by a reader to inform the reclaimer that the reader is 136 Used by a reader to inform the reclaimer that the reader is
137 entering an RCU read-side critical section. It is illegal 137 entering an RCU read-side critical section. It is illegal
138 to block while in an RCU read-side critical section, though 138 to block while in an RCU read-side critical section, though
139 kernels built with CONFIG_PREEMPT_RCU can preempt RCU read-side 139 kernels built with CONFIG_TREE_PREEMPT_RCU can preempt RCU
140 critical sections. Any RCU-protected data structure accessed 140 read-side critical sections. Any RCU-protected data structure
141 during an RCU read-side critical section is guaranteed to remain 141 accessed during an RCU read-side critical section is guaranteed to
142 unreclaimed for the full duration of that critical section. 142 remain unreclaimed for the full duration of that critical section.
143 Reference counts may be used in conjunction with RCU to maintain 143 Reference counts may be used in conjunction with RCU to maintain
144 longer-term references to data structures. 144 longer-term references to data structures.
145 145
@@ -785,6 +785,7 @@ RCU pointer/list traversal:
785 rcu_dereference 785 rcu_dereference
786 list_for_each_entry_rcu 786 list_for_each_entry_rcu
787 hlist_for_each_entry_rcu 787 hlist_for_each_entry_rcu
788 hlist_nulls_for_each_entry_rcu
788 789
789 list_for_each_continue_rcu (to be deprecated in favor of new 790 list_for_each_continue_rcu (to be deprecated in favor of new
790 list_for_each_entry_continue_rcu) 791 list_for_each_entry_continue_rcu)
@@ -807,19 +808,23 @@ RCU: Critical sections Grace period Barrier
807 808
808 rcu_read_lock synchronize_net rcu_barrier 809 rcu_read_lock synchronize_net rcu_barrier
809 rcu_read_unlock synchronize_rcu 810 rcu_read_unlock synchronize_rcu
811 synchronize_rcu_expedited
810 call_rcu 812 call_rcu
811 813
812 814
813bh: Critical sections Grace period Barrier 815bh: Critical sections Grace period Barrier
814 816
815 rcu_read_lock_bh call_rcu_bh rcu_barrier_bh 817 rcu_read_lock_bh call_rcu_bh rcu_barrier_bh
816 rcu_read_unlock_bh 818 rcu_read_unlock_bh synchronize_rcu_bh
819 synchronize_rcu_bh_expedited
817 820
818 821
819sched: Critical sections Grace period Barrier 822sched: Critical sections Grace period Barrier
820 823
821 [preempt_disable] synchronize_sched rcu_barrier_sched 824 rcu_read_lock_sched synchronize_sched rcu_barrier_sched
822 [and friends] call_rcu_sched 825 rcu_read_unlock_sched call_rcu_sched
826 [preempt_disable] synchronize_sched_expedited
827 [and friends]
823 828
824 829
825SRCU: Critical sections Grace period Barrier 830SRCU: Critical sections Grace period Barrier
@@ -827,6 +832,9 @@ SRCU: Critical sections Grace period Barrier
827 srcu_read_lock synchronize_srcu N/A 832 srcu_read_lock synchronize_srcu N/A
828 srcu_read_unlock 833 srcu_read_unlock
829 834
835SRCU: Initialization/cleanup
836 init_srcu_struct
837 cleanup_srcu_struct
830 838
831See the comment headers in the source code (or the docbook generated 839See the comment headers in the source code (or the docbook generated
832from them) for more information. 840from them) for more information.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 09e031c55887..bb3a53cdfbc3 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -206,24 +206,6 @@ Who: Len Brown <len.brown@intel.com>
206 206
207--------------------------- 207---------------------------
208 208
209What: libata spindown skipping and warning
210When: Dec 2008
211Why: Some halt(8) implementations synchronize caches for and spin
212 down libata disks because libata didn't use to spin down disk on
213 system halt (only synchronized caches).
214 Spin down on system halt is now implemented. sysfs node
215 /sys/class/scsi_disk/h:c:i:l/manage_start_stop is present if
216 spin down support is available.
217 Because issuing spin down command to an already spun down disk
218 makes some disks spin up just to spin down again, libata tracks
219 device spindown status to skip the extra spindown command and
220 warn about it.
221 This is to give userspace tools the time to get updated and will
222 be removed after userspace is reasonably updated.
223Who: Tejun Heo <htejun@gmail.com>
224
225---------------------------
226
227What: i386/x86_64 bzImage symlinks 209What: i386/x86_64 bzImage symlinks
228When: April 2010 210When: April 2010
229 211
@@ -394,15 +376,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
394 376
395----------------------------- 377-----------------------------
396 378
397What: obsolete generic irq defines and typedefs
398When: 2.6.30
399Why: The defines and typedefs (hw_interrupt_type, no_irq_type, irq_desc_t)
400 have been kept around for migration reasons. After more than two years
401 it's time to remove them finally
402Who: Thomas Gleixner <tglx@linutronix.de>
403
404---------------------------
405
406What: fakephp and associated sysfs files in /sys/bus/pci/slots/ 379What: fakephp and associated sysfs files in /sys/bus/pci/slots/
407When: 2011 380When: 2011
408Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to 381Why: In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -468,3 +441,27 @@ Why: cpu_policy_rwsem has a new cleaner definition making it local to
468 cpufreq core and contained inside cpufreq.c. Other dependent 441 cpufreq core and contained inside cpufreq.c. Other dependent
469 drivers should not use it in order to safely avoid lockdep issues. 442 drivers should not use it in order to safely avoid lockdep issues.
470Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 443Who: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
444
445----------------------------
446
447What: sound-slot/service-* module aliases and related clutters in
448 sound/sound_core.c
449When: August 2010
450Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
451 (14) and requests modules using custom sound-slot/service-*
452 module aliases. The only benefit of doing this is allowing
453 use of custom module aliases which might as well be considered
454 a bug at this point. This preemptive claiming prevents
455 alternative OSS implementations.
456
457 Till the feature is removed, the kernel will be requesting
458 both sound-slot/service-* and the standard char-major-* module
459 aliases and allow turning off the pre-claiming selectively via
460 CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss
461 kernel parameter.
462
463 After the transition phase is complete, both the custom module
464 aliases and switches to disable it will go away. This removal
465 will also allow making ALSA OSS emulation independent of
466 sound_core. The dependency will be broken then too.
467Who: Tejun Heo <tj@kernel.org>
diff --git a/Documentation/filesystems/nfs.txt b/Documentation/filesystems/nfs.txt
new file mode 100644
index 000000000000..f50f26ce6cd0
--- /dev/null
+++ b/Documentation/filesystems/nfs.txt
@@ -0,0 +1,98 @@
1
2The NFS client
3==============
4
5The NFS version 2 protocol was first documented in RFC1094 (March 1989).
6Since then two more major releases of NFS have been published, with NFSv3
7being documented in RFC1813 (June 1995), and NFSv4 in RFC3530 (April
82003).
9
10The Linux NFS client currently supports all the above published versions,
11and work is in progress on adding support for minor version 1 of the NFSv4
12protocol.
13
14The purpose of this document is to provide information on some of the
15upcall interfaces that are used in order to provide the NFS client with
16some of the information that it requires in order to fully comply with
17the NFS spec.
18
19The DNS resolver
20================
21
22NFSv4 allows for one server to refer the NFS client to data that has been
23migrated onto another server by means of the special "fs_locations"
24attribute. See
25 http://tools.ietf.org/html/rfc3530#section-6
26and
27 http://tools.ietf.org/html/draft-ietf-nfsv4-referrals-00
28
29The fs_locations information can take the form of either an ip address and
30a path, or a DNS hostname and a path. The latter requires the NFS client to
31do a DNS lookup in order to mount the new volume, and hence the need for an
32upcall to allow userland to provide this service.
33
34Assuming that the user has the 'rpc_pipefs' filesystem mounted in the usual
35/var/lib/nfs/rpc_pipefs, the upcall consists of the following steps:
36
37 (1) The process checks the dns_resolve cache to see if it contains a
38 valid entry. If so, it returns that entry and exits.
39
40 (2) If no valid entry exists, the helper script '/sbin/nfs_cache_getent'
41 (may be changed using the 'nfs.cache_getent' kernel boot parameter)
42 is run, with two arguments:
43 - the cache name, "dns_resolve"
44 - the hostname to resolve
45
46 (3) After looking up the corresponding ip address, the helper script
47 writes the result into the rpc_pipefs pseudo-file
48 '/var/lib/nfs/rpc_pipefs/cache/dns_resolve/channel'
49 in the following (text) format:
50
51 "<ip address> <hostname> <ttl>\n"
52
53 Where <ip address> is in the usual IPv4 (123.456.78.90) or IPv6
54 (ffee:ddcc:bbaa:9988:7766:5544:3322:1100, ffee::1100, ...) format.
55 <hostname> is identical to the second argument of the helper
56 script, and <ttl> is the 'time to live' of this cache entry (in
57 units of seconds).
58
59 Note: If <ip address> is invalid, say the string "0", then a negative
60 entry is created, which will cause the kernel to treat the hostname
61 as having no valid DNS translation.
62
63
64
65
66A basic sample /sbin/nfs_cache_getent
67=====================================
68
69#!/bin/bash
70#
71ttl=600
72#
73cut=/usr/bin/cut
74getent=/usr/bin/getent
75rpc_pipefs=/var/lib/nfs/rpc_pipefs
76#
77die()
78{
79 echo "Usage: $0 cache_name entry_name"
80 exit 1
81}
82
83[ $# -lt 2 ] && die
84cachename="$1"
85cache_path=${rpc_pipefs}/cache/${cachename}/channel
86
87case "${cachename}" in
88 dns_resolve)
89 name="$2"
90 result="$(${getent} hosts ${name} | ${cut} -f1 -d\ )"
91 [ -z "${result}" ] && result="0"
92 ;;
93 *)
94 die
95 ;;
96esac
97echo "${result} ${name} ${ttl}" >${cache_path}
98
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7936b801fe6a..5d4427d17281 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1503,6 +1503,14 @@ and is between 256 and 4096 characters. It is defined in the file
1503 [NFS] set the TCP port on which the NFSv4 callback 1503 [NFS] set the TCP port on which the NFSv4 callback
1504 channel should listen. 1504 channel should listen.
1505 1505
1506 nfs.cache_getent=
1507 [NFS] sets the pathname to the program which is used
1508 to update the NFS client cache entries.
1509
1510 nfs.cache_getent_timeout=
1511 [NFS] sets the timeout after which an attempt to
1512 update a cache entry is deemed to have failed.
1513
1506 nfs.idmap_cache_timeout= 1514 nfs.idmap_cache_timeout=
1507 [NFS] set the maximum lifetime for idmapper cache 1515 [NFS] set the maximum lifetime for idmapper cache
1508 entries. 1516 entries.
@@ -2395,6 +2403,18 @@ and is between 256 and 4096 characters. It is defined in the file
2395 stifb= [HW] 2403 stifb= [HW]
2396 Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]] 2404 Format: bpp:<bpp1>[:<bpp2>[:<bpp3>...]]
2397 2405
2406 sunrpc.min_resvport=
2407 sunrpc.max_resvport=
2408 [NFS,SUNRPC]
2409 SunRPC servers often require that client requests
2410 originate from a privileged port (i.e. a port in the
2411 range 0 < portnr < 1024).
2412 An administrator who wishes to reserve some of these
2413 ports for other uses may adjust the range that the
2414 kernel's sunrpc client considers to be privileged
2415 using these two parameters to set the minimum and
2416 maximum port values.
2417
2398 sunrpc.pool_mode= 2418 sunrpc.pool_mode=
2399 [NFS] 2419 [NFS]
2400 Control how the NFS server code allocates CPUs to 2420 Control how the NFS server code allocates CPUs to
@@ -2411,6 +2431,15 @@ and is between 256 and 4096 characters. It is defined in the file
2411 pernode one pool for each NUMA node (equivalent 2431 pernode one pool for each NUMA node (equivalent
2412 to global on non-NUMA machines) 2432 to global on non-NUMA machines)
2413 2433
2434 sunrpc.tcp_slot_table_entries=
2435 sunrpc.udp_slot_table_entries=
2436 [NFS,SUNRPC]
2437 Sets the upper limit on the number of simultaneous
2438 RPC calls that can be sent from the client to a
2439 server. Increasing these values may allow you to
2440 improve throughput, but will also increase the
2441 amount of memory reserved for use by the client.
2442
2414 swiotlb= [IA-64] Number of I/O TLB slabs 2443 swiotlb= [IA-64] Number of I/O TLB slabs
2415 2444
2416 switches= [HW,M68k] 2445 switches= [HW,M68k]
@@ -2480,6 +2509,11 @@ and is between 256 and 4096 characters. It is defined in the file
2480 trace_buf_size=nn[KMG] 2509 trace_buf_size=nn[KMG]
2481 [FTRACE] will set tracing buffer size. 2510 [FTRACE] will set tracing buffer size.
2482 2511
2512 trace_event=[event-list]
2513 [FTRACE] Set and start specified trace events in order
2514 to facilitate early boot debugging.
2515 See also Documentation/trace/events.txt
2516
2483 trix= [HW,OSS] MediaTrix AudioTrix Pro 2517 trix= [HW,OSS] MediaTrix AudioTrix Pro
2484 Format: 2518 Format:
2485 <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq> 2519 <io>,<irq>,<dma>,<dma2>,<sb_io>,<sb_irq>,<sb_dma>,<mpu_io>,<mpu_irq>
diff --git a/Documentation/keys.txt b/Documentation/keys.txt
index b56aacc1fff8..e4dbbdb1bd96 100644
--- a/Documentation/keys.txt
+++ b/Documentation/keys.txt
@@ -26,7 +26,7 @@ This document has the following sections:
26 - Notes on accessing payload contents 26 - Notes on accessing payload contents
27 - Defining a key type 27 - Defining a key type
28 - Request-key callback service 28 - Request-key callback service
29 - Key access filesystem 29 - Garbage collection
30 30
31 31
32============ 32============
@@ -113,6 +113,9 @@ Each key has a number of attributes:
113 113
114 (*) Dead. The key's type was unregistered, and so the key is now useless. 114 (*) Dead. The key's type was unregistered, and so the key is now useless.
115 115
116Keys in the last three states are subject to garbage collection. See the
117section on "Garbage collection".
118
116 119
117==================== 120====================
118KEY SERVICE OVERVIEW 121KEY SERVICE OVERVIEW
@@ -754,6 +757,26 @@ The keyctl syscall functions are:
754 successful. 757 successful.
755 758
756 759
760 (*) Install the calling process's session keyring on its parent.
761
762 long keyctl(KEYCTL_SESSION_TO_PARENT);
763
764 This functions attempts to install the calling process's session keyring
765 on to the calling process's parent, replacing the parent's current session
766 keyring.
767
768 The calling process must have the same ownership as its parent, the
769 keyring must have the same ownership as the calling process, the calling
770 process must have LINK permission on the keyring and the active LSM module
771 mustn't deny permission, otherwise error EPERM will be returned.
772
773 Error ENOMEM will be returned if there was insufficient memory to complete
774 the operation, otherwise 0 will be returned to indicate success.
775
776 The keyring will be replaced next time the parent process leaves the
777 kernel and resumes executing userspace.
778
779
757=============== 780===============
758KERNEL SERVICES 781KERNEL SERVICES
759=============== 782===============
@@ -1231,3 +1254,17 @@ by executing:
1231 1254
1232In this case, the program isn't required to actually attach the key to a ring; 1255In this case, the program isn't required to actually attach the key to a ring;
1233the rings are provided for reference. 1256the rings are provided for reference.
1257
1258
1259==================
1260GARBAGE COLLECTION
1261==================
1262
1263Dead keys (for which the type has been removed) will be automatically unlinked
1264from those keyrings that point to them and deleted as soon as possible by a
1265background garbage collector.
1266
1267Similarly, revoked and expired keys will be garbage collected, but only after a
1268certain amount of time has passed. This time is set as a number of seconds in:
1269
1270 /proc/sys/kernel/keys/gc_delay
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 89068030b01b..34f6638aa5ac 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -27,6 +27,13 @@ To trigger an intermediate memory scan:
27 27
28 # echo scan > /sys/kernel/debug/kmemleak 28 # echo scan > /sys/kernel/debug/kmemleak
29 29
30To clear the list of all current possible memory leaks:
31
32 # echo clear > /sys/kernel/debug/kmemleak
33
34New leaks will then come up upon reading /sys/kernel/debug/kmemleak
35again.
36
30Note that the orphan objects are listed in the order they were allocated 37Note that the orphan objects are listed in the order they were allocated
31and one object at the beginning of the list may cause other subsequent 38and one object at the beginning of the list may cause other subsequent
32objects to be reported as orphan. 39objects to be reported as orphan.
@@ -42,6 +49,9 @@ Memory scanning parameters can be modified at run-time by writing to the
42 scan=<secs> - set the automatic memory scanning period in seconds 49 scan=<secs> - set the automatic memory scanning period in seconds
43 (default 600, 0 to stop the automatic scanning) 50 (default 600, 0 to stop the automatic scanning)
44 scan - trigger a memory scan 51 scan - trigger a memory scan
52 clear - clear list of current memory leak suspects, done by
53 marking all current reported unreferenced objects grey
54 dump=<addr> - dump information about the object found at <addr>
45 55
46Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on 56Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
47the kernel command line. 57the kernel command line.
@@ -86,6 +96,27 @@ avoid this, kmemleak can also store the number of values pointing to an
86address inside the block address range that need to be found so that the 96address inside the block address range that need to be found so that the
87block is not considered a leak. One example is __vmalloc(). 97block is not considered a leak. One example is __vmalloc().
88 98
99Testing specific sections with kmemleak
100---------------------------------------
101
102Upon initial bootup your /sys/kernel/debug/kmemleak output page may be
103quite extensive. This can also be the case if you have very buggy code
104when doing development. To work around these situations you can use the
105'clear' command to clear all reported unreferenced objects from the
106/sys/kernel/debug/kmemleak output. By issuing a 'scan' after a 'clear'
107you can find new unreferenced objects; this should help with testing
108specific sections of code.
109
110To test a critical section on demand with a clean kmemleak do:
111
112 # echo clear > /sys/kernel/debug/kmemleak
113 ... test your kernel or modules ...
114 # echo scan > /sys/kernel/debug/kmemleak
115
116Then as usual to get your report with:
117
118 # cat /sys/kernel/debug/kmemleak
119
89Kmemleak API 120Kmemleak API
90------------ 121------------
91 122
diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt
index 2d10053dd97e..ae66f9b90a25 100644
--- a/Documentation/s390/s390dbf.txt
+++ b/Documentation/s390/s390dbf.txt
@@ -495,6 +495,13 @@ and for each vararg a long value. So e.g. for a debug entry with a format
495string plus two varargs one would need to allocate a (3 * sizeof(long)) 495string plus two varargs one would need to allocate a (3 * sizeof(long))
496byte data area in the debug_register() function. 496byte data area in the debug_register() function.
497 497
498IMPORTANT: Using "%s" in sprintf event functions is dangerous. You can only
499use "%s" in the sprintf event functions, if the memory for the passed string is
500available as long as the debug feature exists. The reason behind this is that
501due to performance considerations only a pointer to the string is stored in
502the debug feature. If you log a string that is freed afterwards, you will get
503an OOPS when inspecting the debug feature, because then the debug feature will
504access the already freed memory.
498 505
499NOTE: If using the sprintf view do NOT use other event/exception functions 506NOTE: If using the sprintf view do NOT use other event/exception functions
500than the sprintf-event and -exception functions. 507than the sprintf-event and -exception functions.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 4252697a95d6..1c8eb4518ce0 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -60,6 +60,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
60 slots - Reserve the slot index for the given driver. 60 slots - Reserve the slot index for the given driver.
61 This option takes multiple strings. 61 This option takes multiple strings.
62 See "Module Autoloading Support" section for details. 62 See "Module Autoloading Support" section for details.
63 debug - Specifies the debug message level
64 (0 = disable debug prints, 1 = normal debug messages,
65 2 = verbose debug messages)
66 This option appears only when CONFIG_SND_DEBUG=y.
67 This option can be dynamically changed via sysfs
68 /sys/modules/snd/parameters/debug file.
63 69
64 Module snd-pcm-oss 70 Module snd-pcm-oss
65 ------------------ 71 ------------------
@@ -513,6 +519,26 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
513 or input, but you may use this module for any application which 519 or input, but you may use this module for any application which
514 requires a sound card (like RealPlayer). 520 requires a sound card (like RealPlayer).
515 521
522 pcm_devs - Number of PCM devices assigned to each card
523 (default = 1, up to 4)
524 pcm_substreams - Number of PCM substreams assigned to each PCM
525 (default = 8, up to 16)
526 hrtimer - Use hrtimer (=1, default) or system timer (=0)
527 fake_buffer - Fake buffer allocations (default = 1)
528
529 When multiple PCM devices are created, snd-dummy gives different
530 behavior to each PCM device:
531 0 = interleaved with mmap support
532 1 = non-interleaved with mmap support
533 2 = interleaved without mmap
534 3 = non-interleaved without mmap
535
536 As default, snd-dummy drivers doesn't allocate the real buffers
537 but either ignores read/write or mmap a single dummy page to all
538 buffer pages, in order to save the resouces. If your apps need
539 the read/ written buffer data to be consistent, pass fake_buffer=0
540 option.
541
516 The power-management is supported. 542 The power-management is supported.
517 543
518 Module snd-echo3g 544 Module snd-echo3g
@@ -768,6 +794,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
768 bdl_pos_adj - Specifies the DMA IRQ timing delay in samples. 794 bdl_pos_adj - Specifies the DMA IRQ timing delay in samples.
769 Passing -1 will make the driver to choose the appropriate 795 Passing -1 will make the driver to choose the appropriate
770 value based on the controller chip. 796 value based on the controller chip.
797 patch - Specifies the early "patch" files to modify the HD-audio
798 setup before initializing the codecs. This option is
799 available only when CONFIG_SND_HDA_PATCH_LOADER=y is set.
800 See HD-Audio.txt for details.
771 801
772 [Single (global) options] 802 [Single (global) options]
773 single_cmd - Use single immediate commands to communicate with 803 single_cmd - Use single immediate commands to communicate with
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 939a3dd58148..97eebd63bedc 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -114,8 +114,8 @@ ALC662/663/272
114 samsung-nc10 Samsung NC10 mini notebook 114 samsung-nc10 Samsung NC10 mini notebook
115 auto auto-config reading BIOS (default) 115 auto auto-config reading BIOS (default)
116 116
117ALC882/885 117ALC882/883/885/888/889
118========== 118======================
119 3stack-dig 3-jack with SPDIF I/O 119 3stack-dig 3-jack with SPDIF I/O
120 6stack-dig 6-jack digital with SPDIF I/O 120 6stack-dig 6-jack digital with SPDIF I/O
121 arima Arima W820Di1 121 arima Arima W820Di1
@@ -127,12 +127,8 @@ ALC882/885
127 mbp3 Macbook Pro rev3 127 mbp3 Macbook Pro rev3
128 imac24 iMac 24'' with jack detection 128 imac24 iMac 24'' with jack detection
129 w2jc ASUS W2JC 129 w2jc ASUS W2JC
130 auto auto-config reading BIOS (default) 130 3stack-2ch-dig 3-jack with SPDIF I/O (ALC883)
131 131 alc883-6stack-dig 6-jack digital with SPDIF I/O (ALC883)
132ALC883/888
133==========
134 3stack-dig 3-jack with SPDIF I/O
135 6stack-dig 6-jack digital with SPDIF I/O
136 3stack-6ch 3-jack 6-channel 132 3stack-6ch 3-jack 6-channel
137 3stack-6ch-dig 3-jack 6-channel with SPDIF I/O 133 3stack-6ch-dig 3-jack 6-channel with SPDIF I/O
138 6stack-dig-demo 6-jack digital for Intel demo board 134 6stack-dig-demo 6-jack digital for Intel demo board
@@ -140,6 +136,7 @@ ALC883/888
140 acer-aspire Acer Aspire 9810 136 acer-aspire Acer Aspire 9810
141 acer-aspire-4930g Acer Aspire 4930G 137 acer-aspire-4930g Acer Aspire 4930G
142 acer-aspire-6530g Acer Aspire 6530G 138 acer-aspire-6530g Acer Aspire 6530G
139 acer-aspire-7730g Acer Aspire 7730G
143 acer-aspire-8930g Acer Aspire 8930G 140 acer-aspire-8930g Acer Aspire 8930G
144 medion Medion Laptops 141 medion Medion Laptops
145 medion-md2 Medion MD2 142 medion-md2 Medion MD2
@@ -155,10 +152,13 @@ ALC883/888
155 3stack-hp HP machines with 3stack (Lucknow, Samba boards) 152 3stack-hp HP machines with 3stack (Lucknow, Samba boards)
156 6stack-dell Dell machines with 6stack (Inspiron 530) 153 6stack-dell Dell machines with 6stack (Inspiron 530)
157 mitac Mitac 8252D 154 mitac Mitac 8252D
155 clevo-m540r Clevo M540R (6ch + digital)
158 clevo-m720 Clevo M720 laptop series 156 clevo-m720 Clevo M720 laptop series
159 fujitsu-pi2515 Fujitsu AMILO Pi2515 157 fujitsu-pi2515 Fujitsu AMILO Pi2515
160 fujitsu-xa3530 Fujitsu AMILO XA3530 158 fujitsu-xa3530 Fujitsu AMILO XA3530
161 3stack-6ch-intel Intel DG33* boards 159 3stack-6ch-intel Intel DG33* boards
160 intel-alc889a Intel IbexPeak with ALC889A
161 intel-x58 Intel DX58 with ALC889
162 asus-p5q ASUS P5Q-EM boards 162 asus-p5q ASUS P5Q-EM boards
163 mb31 MacBook 3,1 163 mb31 MacBook 3,1
164 sony-vaio-tt Sony VAIO TT 164 sony-vaio-tt Sony VAIO TT
@@ -229,7 +229,7 @@ AD1984
229====== 229======
230 basic default configuration 230 basic default configuration
231 thinkpad Lenovo Thinkpad T61/X61 231 thinkpad Lenovo Thinkpad T61/X61
232 dell Dell T3400 232 dell_desktop Dell T3400
233 233
234AD1986A 234AD1986A
235======= 235=======
@@ -258,6 +258,7 @@ Conexant 5045
258 laptop-micsense Laptop with Mic sense (old model fujitsu) 258 laptop-micsense Laptop with Mic sense (old model fujitsu)
259 laptop-hpmicsense Laptop with HP and Mic senses 259 laptop-hpmicsense Laptop with HP and Mic senses
260 benq Benq R55E 260 benq Benq R55E
261 laptop-hp530 HP 530 laptop
261 test for testing/debugging purpose, almost all controls 262 test for testing/debugging purpose, almost all controls
262 can be adjusted. Appearing only when compiled with 263 can be adjusted. Appearing only when compiled with
263 $CONFIG_SND_DEBUG=y 264 $CONFIG_SND_DEBUG=y
@@ -278,9 +279,16 @@ Conexant 5051
278 hp-dv6736 HP dv6736 279 hp-dv6736 HP dv6736
279 lenovo-x200 Lenovo X200 laptop 280 lenovo-x200 Lenovo X200 laptop
280 281
282Conexant 5066
283=============
284 laptop Basic Laptop config (default)
285 dell-laptop Dell laptops
286 olpc-xo-1_5 OLPC XO 1.5
287
281STAC9200 288STAC9200
282======== 289========
283 ref Reference board 290 ref Reference board
291 oqo OQO Model 2
284 dell-d21 Dell (unknown) 292 dell-d21 Dell (unknown)
285 dell-d22 Dell (unknown) 293 dell-d22 Dell (unknown)
286 dell-d23 Dell (unknown) 294 dell-d23 Dell (unknown)
@@ -368,10 +376,12 @@ STAC92HD73*
368=========== 376===========
369 ref Reference board 377 ref Reference board
370 no-jd BIOS setup but without jack-detection 378 no-jd BIOS setup but without jack-detection
379 intel Intel DG45* mobos
371 dell-m6-amic Dell desktops/laptops with analog mics 380 dell-m6-amic Dell desktops/laptops with analog mics
372 dell-m6-dmic Dell desktops/laptops with digital mics 381 dell-m6-dmic Dell desktops/laptops with digital mics
373 dell-m6 Dell desktops/laptops with both type of mics 382 dell-m6 Dell desktops/laptops with both type of mics
374 dell-eq Dell desktops/laptops 383 dell-eq Dell desktops/laptops
384 alienware Alienware M17x
375 auto BIOS setup (default) 385 auto BIOS setup (default)
376 386
377STAC92HD83* 387STAC92HD83*
@@ -385,3 +395,8 @@ STAC9872
385======== 395========
386 vaio VAIO laptop without SPDIF 396 vaio VAIO laptop without SPDIF
387 auto BIOS setup (default) 397 auto BIOS setup (default)
398
399Cirrus Logic CS4206/4207
400========================
401 mbp55 MacBook Pro 5,5
402 auto BIOS setup (default)
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index 71ac995b1915..7b8a5f947d1d 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -139,6 +139,10 @@ The driver checks PCI SSID and looks through the static configuration
139table until any matching entry is found. If you have a new machine, 139table until any matching entry is found. If you have a new machine,
140you may see a message like below: 140you may see a message like below:
141------------------------------------------------------------------------ 141------------------------------------------------------------------------
142 hda_codec: ALC880: BIOS auto-probing.
143------------------------------------------------------------------------
144Meanwhile, in the earlier versions, you would see a message like:
145------------------------------------------------------------------------
142 hda_codec: Unknown model for ALC880, trying auto-probe from BIOS... 146 hda_codec: Unknown model for ALC880, trying auto-probe from BIOS...
143------------------------------------------------------------------------ 147------------------------------------------------------------------------
144Even if you see such a message, DON'T PANIC. Take a deep breath and 148Even if you see such a message, DON'T PANIC. Take a deep breath and
@@ -403,6 +407,66 @@ re-configure based on that state, run like below:
403------------------------------------------------------------------------ 407------------------------------------------------------------------------
404 408
405 409
410Early Patching
411~~~~~~~~~~~~~~
412When CONFIG_SND_HDA_PATCH_LOADER=y is set, you can pass a "patch" as a
413firmware file for modifying the HD-audio setup before initializing the
414codec. This can work basically like the reconfiguration via sysfs in
415the above, but it does it before the first codec configuration.
416
417A patch file is a plain text file which looks like below:
418
419------------------------------------------------------------------------
420 [codec]
421 0x12345678 0xabcd1234 2
422
423 [model]
424 auto
425
426 [pincfg]
427 0x12 0x411111f0
428
429 [verb]
430 0x20 0x500 0x03
431 0x20 0x400 0xff
432
433 [hint]
434 hp_detect = yes
435------------------------------------------------------------------------
436
437The file needs to have a line `[codec]`. The next line should contain
438three numbers indicating the codec vendor-id (0x12345678 in the
439example), the codec subsystem-id (0xabcd1234) and the address (2) of
440the codec. The rest patch entries are applied to this specified codec
441until another codec entry is given.
442
443The `[model]` line allows to change the model name of the each codec.
444In the example above, it will be changed to model=auto.
445Note that this overrides the module option.
446
447After the `[pincfg]` line, the contents are parsed as the initial
448default pin-configurations just like `user_pin_configs` sysfs above.
449The values can be shown in user_pin_configs sysfs file, too.
450
451Similarly, the lines after `[verb]` are parsed as `init_verbs`
452sysfs entries, and the lines after `[hint]` are parsed as `hints`
453sysfs entries, respectively.
454
455The hd-audio driver reads the file via request_firmware(). Thus,
456a patch file has to be located on the appropriate firmware path,
457typically, /lib/firmware. For example, when you pass the option
458`patch=hda-init.fw`, the file /lib/firmware/hda-init-fw must be
459present.
460
461The patch module option is specific to each card instance, and you
462need to give one file name for each instance, separated by commas.
463For example, if you have two cards, one for an on-board analog and one
464for an HDMI video board, you may pass patch option like below:
465------------------------------------------------------------------------
466 options snd-hda-intel patch=on-board-patch,hdmi-patch
467------------------------------------------------------------------------
468
469
406Power-Saving 470Power-Saving
407~~~~~~~~~~~~ 471~~~~~~~~~~~~
408The power-saving is a kind of auto-suspend of the device. When the 472The power-saving is a kind of auto-suspend of the device. When the
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 322a00bb99d9..2dbff53369d0 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -19,6 +19,7 @@ Currently, these files might (depending on your configuration)
19show up in /proc/sys/kernel: 19show up in /proc/sys/kernel:
20- acpi_video_flags 20- acpi_video_flags
21- acct 21- acct
22- callhome [ S390 only ]
22- auto_msgmni 23- auto_msgmni
23- core_pattern 24- core_pattern
24- core_uses_pid 25- core_uses_pid
@@ -91,6 +92,21 @@ valid for 30 seconds.
91 92
92============================================================== 93==============================================================
93 94
95callhome:
96
97Controls the kernel's callhome behavior in case of a kernel panic.
98
99The s390 hardware allows an operating system to send a notification
100to a service organization (callhome) in case of an operating system panic.
101
102When the value in this file is 0 (which is the default behavior)
103nothing happens in case of a kernel panic. If this value is set to "1"
104the complete kernel oops message is send to the IBM customer service
105organization in case the mainframe the Linux operating system is running
106on has a service contract with IBM.
107
108==============================================================
109
94core_pattern: 110core_pattern:
95 111
96core_pattern is used to specify a core dumpfile pattern name. 112core_pattern is used to specify a core dumpfile pattern name.
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index f157d7594ea7..2bcc8d4dea29 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -83,6 +83,15 @@ When reading one of these enable files, there are four results:
83 X - there is a mixture of events enabled and disabled 83 X - there is a mixture of events enabled and disabled
84 ? - this file does not affect any event 84 ? - this file does not affect any event
85 85
862.3 Boot option
87---------------
88
89In order to facilitate early boot debugging, use boot option:
90
91 trace_event=[event-list]
92
93The format of this boot option is the same as described in section 2.1.
94
863. Defining an event-enabled tracepoint 953. Defining an event-enabled tracepoint
87======================================= 96=======================================
88 97
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index a39b3c749de5..355d0f1f8c50 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -85,26 +85,19 @@ of ftrace. Here is a list of some of the key files:
85 This file holds the output of the trace in a human 85 This file holds the output of the trace in a human
86 readable format (described below). 86 readable format (described below).
87 87
88 latency_trace:
89
90 This file shows the same trace but the information
91 is organized more to display possible latencies
92 in the system (described below).
93
94 trace_pipe: 88 trace_pipe:
95 89
96 The output is the same as the "trace" file but this 90 The output is the same as the "trace" file but this
97 file is meant to be streamed with live tracing. 91 file is meant to be streamed with live tracing.
98 Reads from this file will block until new data 92 Reads from this file will block until new data is
99 is retrieved. Unlike the "trace" and "latency_trace" 93 retrieved. Unlike the "trace" file, this file is a
100 files, this file is a consumer. This means reading 94 consumer. This means reading from this file causes
101 from this file causes sequential reads to display 95 sequential reads to display more current data. Once
102 more current data. Once data is read from this 96 data is read from this file, it is consumed, and
103 file, it is consumed, and will not be read 97 will not be read again with a sequential read. The
104 again with a sequential read. The "trace" and 98 "trace" file is static, and if the tracer is not
105 "latency_trace" files are static, and if the 99 adding more data,they will display the same
106 tracer is not adding more data, they will display 100 information every time they are read.
107 the same information every time they are read.
108 101
109 trace_options: 102 trace_options:
110 103
@@ -117,10 +110,10 @@ of ftrace. Here is a list of some of the key files:
117 Some of the tracers record the max latency. 110 Some of the tracers record the max latency.
118 For example, the time interrupts are disabled. 111 For example, the time interrupts are disabled.
119 This time is saved in this file. The max trace 112 This time is saved in this file. The max trace
120 will also be stored, and displayed by either 113 will also be stored, and displayed by "trace".
121 "trace" or "latency_trace". A new max trace will 114 A new max trace will only be recorded if the
122 only be recorded if the latency is greater than 115 latency is greater than the value in this
123 the value in this file. (in microseconds) 116 file. (in microseconds)
124 117
125 buffer_size_kb: 118 buffer_size_kb:
126 119
@@ -210,7 +203,7 @@ Here is the list of current tracers that may be configured.
210 the trace with the longest max latency. 203 the trace with the longest max latency.
211 See tracing_max_latency. When a new max is recorded, 204 See tracing_max_latency. When a new max is recorded,
212 it replaces the old trace. It is best to view this 205 it replaces the old trace. It is best to view this
213 trace via the latency_trace file. 206 trace with the latency-format option enabled.
214 207
215 "preemptoff" 208 "preemptoff"
216 209
@@ -307,8 +300,8 @@ the lowest priority thread (pid 0).
307Latency trace format 300Latency trace format
308-------------------- 301--------------------
309 302
310For traces that display latency times, the latency_trace file 303When the latency-format option is enabled, the trace file gives
311gives somewhat more information to see why a latency happened. 304somewhat more information to see why a latency happened.
312Here is a typical trace. 305Here is a typical trace.
313 306
314# tracer: irqsoff 307# tracer: irqsoff
@@ -380,9 +373,10 @@ explains which is which.
380 373
381The above is mostly meaningful for kernel developers. 374The above is mostly meaningful for kernel developers.
382 375
383 time: This differs from the trace file output. The trace file output 376 time: When the latency-format option is enabled, the trace file
384 includes an absolute timestamp. The timestamp used by the 377 output includes a timestamp relative to the start of the
385 latency_trace file is relative to the start of the trace. 378 trace. This differs from the output when latency-format
379 is disabled, which includes an absolute timestamp.
386 380
387 delay: This is just to help catch your eye a bit better. And 381 delay: This is just to help catch your eye a bit better. And
388 needs to be fixed to be only relative to the same CPU. 382 needs to be fixed to be only relative to the same CPU.
@@ -440,7 +434,8 @@ Here are the available options:
440 sym-addr: 434 sym-addr:
441 bash-4000 [01] 1477.606694: simple_strtoul <c0339346> 435 bash-4000 [01] 1477.606694: simple_strtoul <c0339346>
442 436
443 verbose - This deals with the latency_trace file. 437 verbose - This deals with the trace file when the
438 latency-format option is enabled.
444 439
445 bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \ 440 bash 4000 1 0 00000000 00010a95 [58127d26] 1720.415ms \
446 (+0.000ms): simple_strtoul (strict_strtoul) 441 (+0.000ms): simple_strtoul (strict_strtoul)
@@ -472,7 +467,7 @@ Here are the available options:
472 the app is no longer running 467 the app is no longer running
473 468
474 The lookup is performed when you read 469 The lookup is performed when you read
475 trace,trace_pipe,latency_trace. Example: 470 trace,trace_pipe. Example:
476 471
477 a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0 472 a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
478x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6] 473x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
@@ -481,6 +476,11 @@ x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
481 every scheduling event. Will add overhead if 476 every scheduling event. Will add overhead if
482 there's a lot of tasks running at once. 477 there's a lot of tasks running at once.
483 478
479 latency-format - This option changes the trace. When
480 it is enabled, the trace displays
481 additional information about the
482 latencies, as described in "Latency
483 trace format".
484 484
485sched_switch 485sched_switch
486------------ 486------------
@@ -596,12 +596,13 @@ To reset the maximum, echo 0 into tracing_max_latency. Here is
596an example: 596an example:
597 597
598 # echo irqsoff > current_tracer 598 # echo irqsoff > current_tracer
599 # echo latency-format > trace_options
599 # echo 0 > tracing_max_latency 600 # echo 0 > tracing_max_latency
600 # echo 1 > tracing_enabled 601 # echo 1 > tracing_enabled
601 # ls -ltr 602 # ls -ltr
602 [...] 603 [...]
603 # echo 0 > tracing_enabled 604 # echo 0 > tracing_enabled
604 # cat latency_trace 605 # cat trace
605# tracer: irqsoff 606# tracer: irqsoff
606# 607#
607irqsoff latency trace v1.1.5 on 2.6.26 608irqsoff latency trace v1.1.5 on 2.6.26
@@ -703,12 +704,13 @@ which preemption was disabled. The control of preemptoff tracer
703is much like the irqsoff tracer. 704is much like the irqsoff tracer.
704 705
705 # echo preemptoff > current_tracer 706 # echo preemptoff > current_tracer
707 # echo latency-format > trace_options
706 # echo 0 > tracing_max_latency 708 # echo 0 > tracing_max_latency
707 # echo 1 > tracing_enabled 709 # echo 1 > tracing_enabled
708 # ls -ltr 710 # ls -ltr
709 [...] 711 [...]
710 # echo 0 > tracing_enabled 712 # echo 0 > tracing_enabled
711 # cat latency_trace 713 # cat trace
712# tracer: preemptoff 714# tracer: preemptoff
713# 715#
714preemptoff latency trace v1.1.5 on 2.6.26-rc8 716preemptoff latency trace v1.1.5 on 2.6.26-rc8
@@ -850,12 +852,13 @@ Again, using this trace is much like the irqsoff and preemptoff
850tracers. 852tracers.
851 853
852 # echo preemptirqsoff > current_tracer 854 # echo preemptirqsoff > current_tracer
855 # echo latency-format > trace_options
853 # echo 0 > tracing_max_latency 856 # echo 0 > tracing_max_latency
854 # echo 1 > tracing_enabled 857 # echo 1 > tracing_enabled
855 # ls -ltr 858 # ls -ltr
856 [...] 859 [...]
857 # echo 0 > tracing_enabled 860 # echo 0 > tracing_enabled
858 # cat latency_trace 861 # cat trace
859# tracer: preemptirqsoff 862# tracer: preemptirqsoff
860# 863#
861preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8 864preemptirqsoff latency trace v1.1.5 on 2.6.26-rc8
@@ -1012,11 +1015,12 @@ Instead of performing an 'ls', we will run 'sleep 1' under
1012'chrt' which changes the priority of the task. 1015'chrt' which changes the priority of the task.
1013 1016
1014 # echo wakeup > current_tracer 1017 # echo wakeup > current_tracer
1018 # echo latency-format > trace_options
1015 # echo 0 > tracing_max_latency 1019 # echo 0 > tracing_max_latency
1016 # echo 1 > tracing_enabled 1020 # echo 1 > tracing_enabled
1017 # chrt -f 5 sleep 1 1021 # chrt -f 5 sleep 1
1018 # echo 0 > tracing_enabled 1022 # echo 0 > tracing_enabled
1019 # cat latency_trace 1023 # cat trace
1020# tracer: wakeup 1024# tracer: wakeup
1021# 1025#
1022wakeup latency trace v1.1.5 on 2.6.26-rc8 1026wakeup latency trace v1.1.5 on 2.6.26-rc8
diff --git a/Documentation/trace/function-graph-fold.vim b/Documentation/trace/function-graph-fold.vim
new file mode 100644
index 000000000000..0544b504c8b0
--- /dev/null
+++ b/Documentation/trace/function-graph-fold.vim
@@ -0,0 +1,42 @@
1" Enable folding for ftrace function_graph traces.
2"
3" To use, :source this file while viewing a function_graph trace, or use vim's
4" -S option to load from the command-line together with a trace. You can then
5" use the usual vim fold commands, such as "za", to open and close nested
6" functions. While closed, a fold will show the total time taken for a call,
7" as would normally appear on the line with the closing brace. Folded
8" functions will not include finish_task_switch(), so folding should remain
9" relatively sane even through a context switch.
10"
11" Note that this will almost certainly only work well with a
12" single-CPU trace (e.g. trace-cmd report --cpu 1).
13
14function! FunctionGraphFoldExpr(lnum)
15 let line = getline(a:lnum)
16 if line[-1:] == '{'
17 if line =~ 'finish_task_switch() {$'
18 return '>1'
19 endif
20 return 'a1'
21 elseif line[-1:] == '}'
22 return 's1'
23 else
24 return '='
25 endif
26endfunction
27
28function! FunctionGraphFoldText()
29 let s = split(getline(v:foldstart), '|', 1)
30 if getline(v:foldend+1) =~ 'finish_task_switch() {$'
31 let s[2] = ' task switch '
32 else
33 let e = split(getline(v:foldend), '|', 1)
34 let s[2] = e[2]
35 endif
36 return join(s, '|')
37endfunction
38
39setlocal foldexpr=FunctionGraphFoldExpr(v:lnum)
40setlocal foldtext=FunctionGraphFoldText()
41setlocal foldcolumn=12
42setlocal foldmethod=expr
diff --git a/Documentation/trace/ring-buffer-design.txt b/Documentation/trace/ring-buffer-design.txt
new file mode 100644
index 000000000000..5b1d23d604c5
--- /dev/null
+++ b/Documentation/trace/ring-buffer-design.txt
@@ -0,0 +1,955 @@
1 Lockless Ring Buffer Design
2 ===========================
3
4Copyright 2009 Red Hat Inc.
5 Author: Steven Rostedt <srostedt@redhat.com>
6 License: The GNU Free Documentation License, Version 1.2
7 (dual licensed under the GPL v2)
8Reviewers: Mathieu Desnoyers, Huang Ying, Hidetoshi Seto,
9 and Frederic Weisbecker.
10
11
12Written for: 2.6.31
13
14Terminology used in this Document
15---------------------------------
16
17tail - where new writes happen in the ring buffer.
18
19head - where new reads happen in the ring buffer.
20
21producer - the task that writes into the ring buffer (same as writer)
22
23writer - same as producer
24
25consumer - the task that reads from the buffer (same as reader)
26
27reader - same as consumer.
28
29reader_page - A page outside the ring buffer used solely (for the most part)
30 by the reader.
31
32head_page - a pointer to the page that the reader will use next
33
34tail_page - a pointer to the page that will be written to next
35
36commit_page - a pointer to the page with the last finished non nested write.
37
38cmpxchg - hardware assisted atomic transaction that performs the following:
39
40 A = B iff previous A == C
41
42 R = cmpxchg(A, C, B) is saying that we replace A with B if and only if
43 current A is equal to C, and we put the old (current) A into R
44
45 R gets the previous A regardless if A is updated with B or not.
46
47 To see if the update was successful a compare of R == C may be used.
48
49The Generic Ring Buffer
50-----------------------
51
52The ring buffer can be used in either an overwrite mode or in
53producer/consumer mode.
54
55Producer/consumer mode is where the producer were to fill up the
56buffer before the consumer could free up anything, the producer
57will stop writing to the buffer. This will lose most recent events.
58
59Overwrite mode is where the produce were to fill up the buffer
60before the consumer could free up anything, the producer will
61overwrite the older data. This will lose the oldest events.
62
63No two writers can write at the same time (on the same per cpu buffer),
64but a writer may interrupt another writer, but it must finish writing
65before the previous writer may continue. This is very important to the
66algorithm. The writers act like a "stack". The way interrupts works
67enforces this behavior.
68
69
70 writer1 start
71 <preempted> writer2 start
72 <preempted> writer3 start
73 writer3 finishes
74 writer2 finishes
75 writer1 finishes
76
77This is very much like a writer being preempted by an interrupt and
78the interrupt doing a write as well.
79
80Readers can happen at any time. But no two readers may run at the
81same time, nor can a reader preempt/interrupt another reader. A reader
82can not preempt/interrupt a writer, but it may read/consume from the
83buffer at the same time as a writer is writing, but the reader must be
84on another processor to do so. A reader may read on its own processor
85and can be preempted by a writer.
86
87A writer can preempt a reader, but a reader can not preempt a writer.
88But a reader can read the buffer at the same time (on another processor)
89as a writer.
90
91The ring buffer is made up of a list of pages held together by a link list.
92
93At initialization a reader page is allocated for the reader that is not
94part of the ring buffer.
95
96The head_page, tail_page and commit_page are all initialized to point
97to the same page.
98
99The reader page is initialized to have its next pointer pointing to
100the head page, and its previous pointer pointing to a page before
101the head page.
102
103The reader has its own page to use. At start up time, this page is
104allocated but is not attached to the list. When the reader wants
105to read from the buffer, if its page is empty (like it is on start up)
106it will swap its page with the head_page. The old reader page will
107become part of the ring buffer and the head_page will be removed.
108The page after the inserted page (old reader_page) will become the
109new head page.
110
111Once the new page is given to the reader, the reader could do what
112it wants with it, as long as a writer has left that page.
113
114A sample of how the reader page is swapped: Note this does not
115show the head page in the buffer, it is for demonstrating a swap
116only.
117
118 +------+
119 |reader| RING BUFFER
120 |page |
121 +------+
122 +---+ +---+ +---+
123 | |-->| |-->| |
124 | |<--| |<--| |
125 +---+ +---+ +---+
126 ^ | ^ |
127 | +-------------+ |
128 +-----------------+
129
130
131 +------+
132 |reader| RING BUFFER
133 |page |-------------------+
134 +------+ v
135 | +---+ +---+ +---+
136 | | |-->| |-->| |
137 | | |<--| |<--| |<-+
138 | +---+ +---+ +---+ |
139 | ^ | ^ | |
140 | | +-------------+ | |
141 | +-----------------+ |
142 +------------------------------------+
143
144 +------+
145 |reader| RING BUFFER
146 |page |-------------------+
147 +------+ <---------------+ v
148 | ^ +---+ +---+ +---+
149 | | | |-->| |-->| |
150 | | | | | |<--| |<-+
151 | | +---+ +---+ +---+ |
152 | | | ^ | |
153 | | +-------------+ | |
154 | +-----------------------------+ |
155 +------------------------------------+
156
157 +------+
158 |buffer| RING BUFFER
159 |page |-------------------+
160 +------+ <---------------+ v
161 | ^ +---+ +---+ +---+
162 | | | | | |-->| |
163 | | New | | | |<--| |<-+
164 | | Reader +---+ +---+ +---+ |
165 | | page ----^ | |
166 | | | |
167 | +-----------------------------+ |
168 +------------------------------------+
169
170
171
172It is possible that the page swapped is the commit page and the tail page,
173if what is in the ring buffer is less than what is held in a buffer page.
174
175
176 reader page commit page tail page
177 | | |
178 v | |
179 +---+ | |
180 | |<----------+ |
181 | |<------------------------+
182 | |------+
183 +---+ |
184 |
185 v
186 +---+ +---+ +---+ +---+
187<---| |--->| |--->| |--->| |--->
188--->| |<---| |<---| |<---| |<---
189 +---+ +---+ +---+ +---+
190
191This case is still valid for this algorithm.
192When the writer leaves the page, it simply goes into the ring buffer
193since the reader page still points to the next location in the ring
194buffer.
195
196
197The main pointers:
198
199 reader page - The page used solely by the reader and is not part
200 of the ring buffer (may be swapped in)
201
202 head page - the next page in the ring buffer that will be swapped
203 with the reader page.
204
205 tail page - the page where the next write will take place.
206
207 commit page - the page that last finished a write.
208
209The commit page only is updated by the outer most writer in the
210writer stack. A writer that preempts another writer will not move the
211commit page.
212
213When data is written into the ring buffer, a position is reserved
214in the ring buffer and passed back to the writer. When the writer
215is finished writing data into that position, it commits the write.
216
217Another write (or a read) may take place at anytime during this
218transaction. If another write happens it must finish before continuing
219with the previous write.
220
221
222 Write reserve:
223
224 Buffer page
225 +---------+
226 |written |
227 +---------+ <--- given back to writer (current commit)
228 |reserved |
229 +---------+ <--- tail pointer
230 | empty |
231 +---------+
232
233 Write commit:
234
235 Buffer page
236 +---------+
237 |written |
238 +---------+
239 |written |
240 +---------+ <--- next positon for write (current commit)
241 | empty |
242 +---------+
243
244
245 If a write happens after the first reserve:
246
247 Buffer page
248 +---------+
249 |written |
250 +---------+ <-- current commit
251 |reserved |
252 +---------+ <--- given back to second writer
253 |reserved |
254 +---------+ <--- tail pointer
255
256 After second writer commits:
257
258
259 Buffer page
260 +---------+
261 |written |
262 +---------+ <--(last full commit)
263 |reserved |
264 +---------+
265 |pending |
266 |commit |
267 +---------+ <--- tail pointer
268
269 When the first writer commits:
270
271 Buffer page
272 +---------+
273 |written |
274 +---------+
275 |written |
276 +---------+
277 |written |
278 +---------+ <--(last full commit and tail pointer)
279
280
281The commit pointer points to the last write location that was
282committed without preempting another write. When a write that
283preempted another write is committed, it only becomes a pending commit
284and will not be a full commit till all writes have been committed.
285
286The commit page points to the page that has the last full commit.
287The tail page points to the page with the last write (before
288committing).
289
290The tail page is always equal to or after the commit page. It may
291be several pages ahead. If the tail page catches up to the commit
292page then no more writes may take place (regardless of the mode
293of the ring buffer: overwrite and produce/consumer).
294
295The order of pages are:
296
297 head page
298 commit page
299 tail page
300
301Possible scenario:
302 tail page
303 head page commit page |
304 | | |
305 v v v
306 +---+ +---+ +---+ +---+
307<---| |--->| |--->| |--->| |--->
308--->| |<---| |<---| |<---| |<---
309 +---+ +---+ +---+ +---+
310
311There is a special case that the head page is after either the commit page
312and possibly the tail page. That is when the commit (and tail) page has been
313swapped with the reader page. This is because the head page is always
314part of the ring buffer, but the reader page is not. When ever there
315has been less than a full page that has been committed inside the ring buffer,
316and a reader swaps out a page, it will be swapping out the commit page.
317
318
319 reader page commit page tail page
320 | | |
321 v | |
322 +---+ | |
323 | |<----------+ |
324 | |<------------------------+
325 | |------+
326 +---+ |
327 |
328 v
329 +---+ +---+ +---+ +---+
330<---| |--->| |--->| |--->| |--->
331--->| |<---| |<---| |<---| |<---
332 +---+ +---+ +---+ +---+
333 ^
334 |
335 head page
336
337
338In this case, the head page will not move when the tail and commit
339move back into the ring buffer.
340
341The reader can not swap a page into the ring buffer if the commit page
342is still on that page. If the read meets the last commit (real commit
343not pending or reserved), then there is nothing more to read.
344The buffer is considered empty until another full commit finishes.
345
346When the tail meets the head page, if the buffer is in overwrite mode,
347the head page will be pushed ahead one. If the buffer is in producer/consumer
348mode, the write will fail.
349
350Overwrite mode:
351
352 tail page
353 |
354 v
355 +---+ +---+ +---+ +---+
356<---| |--->| |--->| |--->| |--->
357--->| |<---| |<---| |<---| |<---
358 +---+ +---+ +---+ +---+
359 ^
360 |
361 head page
362
363
364 tail page
365 |
366 v
367 +---+ +---+ +---+ +---+
368<---| |--->| |--->| |--->| |--->
369--->| |<---| |<---| |<---| |<---
370 +---+ +---+ +---+ +---+
371 ^
372 |
373 head page
374
375
376 tail page
377 |
378 v
379 +---+ +---+ +---+ +---+
380<---| |--->| |--->| |--->| |--->
381--->| |<---| |<---| |<---| |<---
382 +---+ +---+ +---+ +---+
383 ^
384 |
385 head page
386
387Note, the reader page will still point to the previous head page.
388But when a swap takes place, it will use the most recent head page.
389
390
391Making the Ring Buffer Lockless:
392--------------------------------
393
394The main idea behind the lockless algorithm is to combine the moving
395of the head_page pointer with the swapping of pages with the reader.
396State flags are placed inside the pointer to the page. To do this,
397each page must be aligned in memory by 4 bytes. This will allow the 2
398least significant bits of the address to be used as flags. Since
399they will always be zero for the address. To get the address,
400simply mask out the flags.
401
402 MASK = ~3
403
404 address & MASK
405
406Two flags will be kept by these two bits:
407
408 HEADER - the page being pointed to is a head page
409
410 UPDATE - the page being pointed to is being updated by a writer
411 and was or is about to be a head page.
412
413
414 reader page
415 |
416 v
417 +---+
418 | |------+
419 +---+ |
420 |
421 v
422 +---+ +---+ +---+ +---+
423<---| |--->| |-H->| |--->| |--->
424--->| |<---| |<---| |<---| |<---
425 +---+ +---+ +---+ +---+
426
427
428The above pointer "-H->" would have the HEADER flag set. That is
429the next page is the next page to be swapped out by the reader.
430This pointer means the next page is the head page.
431
432When the tail page meets the head pointer, it will use cmpxchg to
433change the pointer to the UPDATE state:
434
435
436 tail page
437 |
438 v
439 +---+ +---+ +---+ +---+
440<---| |--->| |-H->| |--->| |--->
441--->| |<---| |<---| |<---| |<---
442 +---+ +---+ +---+ +---+
443
444 tail page
445 |
446 v
447 +---+ +---+ +---+ +---+
448<---| |--->| |-U->| |--->| |--->
449--->| |<---| |<---| |<---| |<---
450 +---+ +---+ +---+ +---+
451
452"-U->" represents a pointer in the UPDATE state.
453
454Any access to the reader will need to take some sort of lock to serialize
455the readers. But the writers will never take a lock to write to the
456ring buffer. This means we only need to worry about a single reader,
457and writes only preempt in "stack" formation.
458
459When the reader tries to swap the page with the ring buffer, it
460will also use cmpxchg. If the flag bit in the pointer to the
461head page does not have the HEADER flag set, the compare will fail
462and the reader will need to look for the new head page and try again.
463Note, the flag UPDATE and HEADER are never set at the same time.
464
465The reader swaps the reader page as follows:
466
467 +------+
468 |reader| RING BUFFER
469 |page |
470 +------+
471 +---+ +---+ +---+
472 | |--->| |--->| |
473 | |<---| |<---| |
474 +---+ +---+ +---+
475 ^ | ^ |
476 | +---------------+ |
477 +-----H-------------+
478
479The reader sets the reader page next pointer as HEADER to the page after
480the head page.
481
482
483 +------+
484 |reader| RING BUFFER
485 |page |-------H-----------+
486 +------+ v
487 | +---+ +---+ +---+
488 | | |--->| |--->| |
489 | | |<---| |<---| |<-+
490 | +---+ +---+ +---+ |
491 | ^ | ^ | |
492 | | +---------------+ | |
493 | +-----H-------------+ |
494 +--------------------------------------+
495
496It does a cmpxchg with the pointer to the previous head page to make it
497point to the reader page. Note that the new pointer does not have the HEADER
498flag set. This action atomically moves the head page forward.
499
500 +------+
501 |reader| RING BUFFER
502 |page |-------H-----------+
503 +------+ v
504 | ^ +---+ +---+ +---+
505 | | | |-->| |-->| |
506 | | | |<--| |<--| |<-+
507 | | +---+ +---+ +---+ |
508 | | | ^ | |
509 | | +-------------+ | |
510 | +-----------------------------+ |
511 +------------------------------------+
512
513After the new head page is set, the previous pointer of the head page is
514updated to the reader page.
515
516 +------+
517 |reader| RING BUFFER
518 |page |-------H-----------+
519 +------+ <---------------+ v
520 | ^ +---+ +---+ +---+
521 | | | |-->| |-->| |
522 | | | | | |<--| |<-+
523 | | +---+ +---+ +---+ |
524 | | | ^ | |
525 | | +-------------+ | |
526 | +-----------------------------+ |
527 +------------------------------------+
528
529 +------+
530 |buffer| RING BUFFER
531 |page |-------H-----------+ <--- New head page
532 +------+ <---------------+ v
533 | ^ +---+ +---+ +---+
534 | | | | | |-->| |
535 | | New | | | |<--| |<-+
536 | | Reader +---+ +---+ +---+ |
537 | | page ----^ | |
538 | | | |
539 | +-----------------------------+ |
540 +------------------------------------+
541
542Another important point. The page that the reader page points back to
543by its previous pointer (the one that now points to the new head page)
544never points back to the reader page. That is because the reader page is
545not part of the ring buffer. Traversing the ring buffer via the next pointers
546will always stay in the ring buffer. Traversing the ring buffer via the
547prev pointers may not.
548
549Note, the way to determine a reader page is simply by examining the previous
550pointer of the page. If the next pointer of the previous page does not
551point back to the original page, then the original page is a reader page:
552
553
554 +--------+
555 | reader | next +----+
556 | page |-------->| |<====== (buffer page)
557 +--------+ +----+
558 | | ^
559 | v | next
560 prev | +----+
561 +------------->| |
562 +----+
563
564The way the head page moves forward:
565
566When the tail page meets the head page and the buffer is in overwrite mode
567and more writes take place, the head page must be moved forward before the
568writer may move the tail page. The way this is done is that the writer
569performs a cmpxchg to convert the pointer to the head page from the HEADER
570flag to have the UPDATE flag set. Once this is done, the reader will
571not be able to swap the head page from the buffer, nor will it be able to
572move the head page, until the writer is finished with the move.
573
574This eliminates any races that the reader can have on the writer. The reader
575must spin, and this is why the reader can not preempt the writer.
576
577 tail page
578 |
579 v
580 +---+ +---+ +---+ +---+
581<---| |--->| |-H->| |--->| |--->
582--->| |<---| |<---| |<---| |<---
583 +---+ +---+ +---+ +---+
584
585 tail page
586 |
587 v
588 +---+ +---+ +---+ +---+
589<---| |--->| |-U->| |--->| |--->
590--->| |<---| |<---| |<---| |<---
591 +---+ +---+ +---+ +---+
592
593The following page will be made into the new head page.
594
595 tail page
596 |
597 v
598 +---+ +---+ +---+ +---+
599<---| |--->| |-U->| |-H->| |--->
600--->| |<---| |<---| |<---| |<---
601 +---+ +---+ +---+ +---+
602
603After the new head page has been set, we can set the old head page
604pointer back to NORMAL.
605
606 tail page
607 |
608 v
609 +---+ +---+ +---+ +---+
610<---| |--->| |--->| |-H->| |--->
611--->| |<---| |<---| |<---| |<---
612 +---+ +---+ +---+ +---+
613
614After the head page has been moved, the tail page may now move forward.
615
616 tail page
617 |
618 v
619 +---+ +---+ +---+ +---+
620<---| |--->| |--->| |-H->| |--->
621--->| |<---| |<---| |<---| |<---
622 +---+ +---+ +---+ +---+
623
624
625The above are the trivial updates. Now for the more complex scenarios.
626
627
628As stated before, if enough writes preempt the first write, the
629tail page may make it all the way around the buffer and meet the commit
630page. At this time, we must start dropping writes (usually with some kind
631of warning to the user). But what happens if the commit was still on the
632reader page? The commit page is not part of the ring buffer. The tail page
633must account for this.
634
635
636 reader page commit page
637 | |
638 v |
639 +---+ |
640 | |<----------+
641 | |
642 | |------+
643 +---+ |
644 |
645 v
646 +---+ +---+ +---+ +---+
647<---| |--->| |-H->| |--->| |--->
648--->| |<---| |<---| |<---| |<---
649 +---+ +---+ +---+ +---+
650 ^
651 |
652 tail page
653
654If the tail page were to simply push the head page forward, the commit when
655leaving the reader page would not be pointing to the correct page.
656
657The solution to this is to test if the commit page is on the reader page
658before pushing the head page. If it is, then it can be assumed that the
659tail page wrapped the buffer, and we must drop new writes.
660
661This is not a race condition, because the commit page can only be moved
662by the outter most writer (the writer that was preempted).
663This means that the commit will not move while a writer is moving the
664tail page. The reader can not swap the reader page if it is also being
665used as the commit page. The reader can simply check that the commit
666is off the reader page. Once the commit page leaves the reader page
667it will never go back on it unless a reader does another swap with the
668buffer page that is also the commit page.
669
670
671Nested writes
672-------------
673
674In the pushing forward of the tail page we must first push forward
675the head page if the head page is the next page. If the head page
676is not the next page, the tail page is simply updated with a cmpxchg.
677
678Only writers move the tail page. This must be done atomically to protect
679against nested writers.
680
681 temp_page = tail_page
682 next_page = temp_page->next
683 cmpxchg(tail_page, temp_page, next_page)
684
685The above will update the tail page if it is still pointing to the expected
686page. If this fails, a nested write pushed it forward, the the current write
687does not need to push it.
688
689
690 temp page
691 |
692 v
693 tail page
694 |
695 v
696 +---+ +---+ +---+ +---+
697<---| |--->| |--->| |--->| |--->
698--->| |<---| |<---| |<---| |<---
699 +---+ +---+ +---+ +---+
700
701Nested write comes in and moves the tail page forward:
702
703 tail page (moved by nested writer)
704 temp page |
705 | |
706 v v
707 +---+ +---+ +---+ +---+
708<---| |--->| |--->| |--->| |--->
709--->| |<---| |<---| |<---| |<---
710 +---+ +---+ +---+ +---+
711
712The above would fail the cmpxchg, but since the tail page has already
713been moved forward, the writer will just try again to reserve storage
714on the new tail page.
715
716But the moving of the head page is a bit more complex.
717
718 tail page
719 |
720 v
721 +---+ +---+ +---+ +---+
722<---| |--->| |-H->| |--->| |--->
723--->| |<---| |<---| |<---| |<---
724 +---+ +---+ +---+ +---+
725
726The write converts the head page pointer to UPDATE.
727
728 tail page
729 |
730 v
731 +---+ +---+ +---+ +---+
732<---| |--->| |-U->| |--->| |--->
733--->| |<---| |<---| |<---| |<---
734 +---+ +---+ +---+ +---+
735
736But if a nested writer preempts here. It will see that the next
737page is a head page, but it is also nested. It will detect that
738it is nested and will save that information. The detection is the
739fact that it sees the UPDATE flag instead of a HEADER or NORMAL
740pointer.
741
742The nested writer will set the new head page pointer.
743
744 tail page
745 |
746 v
747 +---+ +---+ +---+ +---+
748<---| |--->| |-U->| |-H->| |--->
749--->| |<---| |<---| |<---| |<---
750 +---+ +---+ +---+ +---+
751
752But it will not reset the update back to normal. Only the writer
753that converted a pointer from HEAD to UPDATE will convert it back
754to NORMAL.
755
756 tail page
757 |
758 v
759 +---+ +---+ +---+ +---+
760<---| |--->| |-U->| |-H->| |--->
761--->| |<---| |<---| |<---| |<---
762 +---+ +---+ +---+ +---+
763
764After the nested writer finishes, the outer most writer will convert
765the UPDATE pointer to NORMAL.
766
767
768 tail page
769 |
770 v
771 +---+ +---+ +---+ +---+
772<---| |--->| |--->| |-H->| |--->
773--->| |<---| |<---| |<---| |<---
774 +---+ +---+ +---+ +---+
775
776
777It can be even more complex if several nested writes came in and moved
778the tail page ahead several pages:
779
780
781(first writer)
782
783 tail page
784 |
785 v
786 +---+ +---+ +---+ +---+
787<---| |--->| |-H->| |--->| |--->
788--->| |<---| |<---| |<---| |<---
789 +---+ +---+ +---+ +---+
790
791The write converts the head page pointer to UPDATE.
792
793 tail page
794 |
795 v
796 +---+ +---+ +---+ +---+
797<---| |--->| |-U->| |--->| |--->
798--->| |<---| |<---| |<---| |<---
799 +---+ +---+ +---+ +---+
800
801Next writer comes in, and sees the update and sets up the new
802head page.
803
804(second writer)
805
806 tail page
807 |
808 v
809 +---+ +---+ +---+ +---+
810<---| |--->| |-U->| |-H->| |--->
811--->| |<---| |<---| |<---| |<---
812 +---+ +---+ +---+ +---+
813
814The nested writer moves the tail page forward. But does not set the old
815update page to NORMAL because it is not the outer most writer.
816
817 tail page
818 |
819 v
820 +---+ +---+ +---+ +---+
821<---| |--->| |-U->| |-H->| |--->
822--->| |<---| |<---| |<---| |<---
823 +---+ +---+ +---+ +---+
824
825Another writer preempts and sees the page after the tail page is a head page.
826It changes it from HEAD to UPDATE.
827
828(third writer)
829
830 tail page
831 |
832 v
833 +---+ +---+ +---+ +---+
834<---| |--->| |-U->| |-U->| |--->
835--->| |<---| |<---| |<---| |<---
836 +---+ +---+ +---+ +---+
837
838The writer will move the head page forward:
839
840
841(third writer)
842
843 tail page
844 |
845 v
846 +---+ +---+ +---+ +---+
847<---| |--->| |-U->| |-U->| |-H->
848--->| |<---| |<---| |<---| |<---
849 +---+ +---+ +---+ +---+
850
851But now that the third writer did change the HEAD flag to UPDATE it
852will convert it to normal:
853
854
855(third writer)
856
857 tail page
858 |
859 v
860 +---+ +---+ +---+ +---+
861<---| |--->| |-U->| |--->| |-H->
862--->| |<---| |<---| |<---| |<---
863 +---+ +---+ +---+ +---+
864
865
866Then it will move the tail page, and return back to the second writer.
867
868
869(second writer)
870
871 tail page
872 |
873 v
874 +---+ +---+ +---+ +---+
875<---| |--->| |-U->| |--->| |-H->
876--->| |<---| |<---| |<---| |<---
877 +---+ +---+ +---+ +---+
878
879
880The second writer will fail to move the tail page because it was already
881moved, so it will try again and add its data to the new tail page.
882It will return to the first writer.
883
884
885(first writer)
886
887 tail page
888 |
889 v
890 +---+ +---+ +---+ +---+
891<---| |--->| |-U->| |--->| |-H->
892--->| |<---| |<---| |<---| |<---
893 +---+ +---+ +---+ +---+
894
895The first writer can not know atomically test if the tail page moved
896while it updates the HEAD page. It will then update the head page to
897what it thinks is the new head page.
898
899
900(first writer)
901
902 tail page
903 |
904 v
905 +---+ +---+ +---+ +---+
906<---| |--->| |-U->| |-H->| |-H->
907--->| |<---| |<---| |<---| |<---
908 +---+ +---+ +---+ +---+
909
910Since the cmpxchg returns the old value of the pointer the first writer
911will see it succeeded in updating the pointer from NORMAL to HEAD.
912But as we can see, this is not good enough. It must also check to see
913if the tail page is either where it use to be or on the next page:
914
915
916(first writer)
917
918 A B tail page
919 | | |
920 v v v
921 +---+ +---+ +---+ +---+
922<---| |--->| |-U->| |-H->| |-H->
923--->| |<---| |<---| |<---| |<---
924 +---+ +---+ +---+ +---+
925
926If tail page != A and tail page does not equal B, then it must reset the
927pointer back to NORMAL. The fact that it only needs to worry about
928nested writers, it only needs to check this after setting the HEAD page.
929
930
931(first writer)
932
933 A B tail page
934 | | |
935 v v v
936 +---+ +---+ +---+ +---+
937<---| |--->| |-U->| |--->| |-H->
938--->| |<---| |<---| |<---| |<---
939 +---+ +---+ +---+ +---+
940
941Now the writer can update the head page. This is also why the head page must
942remain in UPDATE and only reset by the outer most writer. This prevents
943the reader from seeing the incorrect head page.
944
945
946(first writer)
947
948 A B tail page
949 | | |
950 v v v
951 +---+ +---+ +---+ +---+
952<---| |--->| |--->| |--->| |-H->
953--->| |<---| |<---| |<---| |<---
954 +---+ +---+ +---+ +---+
955
diff --git a/MAINTAINERS b/MAINTAINERS
index 8dca9d89c6c1..989ff1149390 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -439,7 +439,7 @@ F: drivers/hwmon/ams/
439AMSO1100 RNIC DRIVER 439AMSO1100 RNIC DRIVER
440M: Tom Tucker <tom@opengridcomputing.com> 440M: Tom Tucker <tom@opengridcomputing.com>
441M: Steve Wise <swise@opengridcomputing.com> 441M: Steve Wise <swise@opengridcomputing.com>
442L: general@lists.openfabrics.org 442L: linux-rdma@vger.kernel.org
443S: Maintained 443S: Maintained
444F: drivers/infiniband/hw/amso1100/ 444F: drivers/infiniband/hw/amso1100/
445 445
@@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/
1494 1494
1495CXGB3 IWARP RNIC DRIVER (IW_CXGB3) 1495CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
1496M: Steve Wise <swise@chelsio.com> 1496M: Steve Wise <swise@chelsio.com>
1497L: general@lists.openfabrics.org 1497L: linux-rdma@vger.kernel.org
1498W: http://www.openfabrics.org 1498W: http://www.openfabrics.org
1499S: Supported 1499S: Supported
1500F: drivers/infiniband/hw/cxgb3/ 1500F: drivers/infiniband/hw/cxgb3/
@@ -1868,7 +1868,7 @@ F: fs/efs/
1868EHCA (IBM GX bus InfiniBand adapter) DRIVER 1868EHCA (IBM GX bus InfiniBand adapter) DRIVER
1869M: Hoang-Nam Nguyen <hnguyen@de.ibm.com> 1869M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
1870M: Christoph Raisch <raisch@de.ibm.com> 1870M: Christoph Raisch <raisch@de.ibm.com>
1871L: general@lists.openfabrics.org 1871L: linux-rdma@vger.kernel.org
1872S: Supported 1872S: Supported
1873F: drivers/infiniband/hw/ehca/ 1873F: drivers/infiniband/hw/ehca/
1874 1874
@@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM
2552M: Roland Dreier <rolandd@cisco.com> 2552M: Roland Dreier <rolandd@cisco.com>
2553M: Sean Hefty <sean.hefty@intel.com> 2553M: Sean Hefty <sean.hefty@intel.com>
2554M: Hal Rosenstock <hal.rosenstock@gmail.com> 2554M: Hal Rosenstock <hal.rosenstock@gmail.com>
2555L: general@lists.openfabrics.org (moderated for non-subscribers) 2555L: linux-rdma@vger.kernel.org
2556W: http://www.openib.org/ 2556W: http://www.openib.org/
2557T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git 2557T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
2558S: Supported 2558S: Supported
@@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c
2729 2729
2730IPATH DRIVER 2730IPATH DRIVER
2731M: Ralph Campbell <infinipath@qlogic.com> 2731M: Ralph Campbell <infinipath@qlogic.com>
2732L: general@lists.openfabrics.org 2732L: linux-rdma@vger.kernel.org
2733T: git git://git.qlogic.com/ipath-linux-2.6 2733T: git git://git.qlogic.com/ipath-linux-2.6
2734S: Supported 2734S: Supported
2735F: drivers/infiniband/hw/ipath/ 2735F: drivers/infiniband/hw/ipath/
@@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.*
3485NETEFFECT IWARP RNIC DRIVER (IW_NES) 3485NETEFFECT IWARP RNIC DRIVER (IW_NES)
3486M: Faisal Latif <faisal.latif@intel.com> 3486M: Faisal Latif <faisal.latif@intel.com>
3487M: Chien Tung <chien.tin.tung@intel.com> 3487M: Chien Tung <chien.tin.tung@intel.com>
3488L: general@lists.openfabrics.org 3488L: linux-rdma@vger.kernel.org
3489W: http://www.neteffect.com 3489W: http://www.neteffect.com
3490S: Supported 3490S: Supported
3491F: drivers/infiniband/hw/nes/ 3491F: drivers/infiniband/hw/nes/
diff --git a/Makefile b/Makefile
index 25c615e57302..60de4ef31254 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 31 3SUBLEVEL = 31
4EXTRAVERSION = -rc8 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 99193b160232..beea3ccebb5e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -30,6 +30,18 @@ config OPROFILE_IBS
30 30
31 If unsure, say N. 31 If unsure, say N.
32 32
33config OPROFILE_EVENT_MULTIPLEX
34 bool "OProfile multiplexing support (EXPERIMENTAL)"
35 default n
36 depends on OPROFILE && X86
37 help
38 The number of hardware counters is limited. The multiplexing
39 feature enables OProfile to gather more events than counters
40 are provided by the hardware. This is realized by switching
41 between events at an user specified time interval.
42
43 If unsure, say N.
44
33config HAVE_OPROFILE 45config HAVE_OPROFILE
34 bool 46 bool
35 47
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 60c83abfde70..5076a8860b18 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -75,6 +75,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
75#define TIF_UAC_SIGBUS 7 75#define TIF_UAC_SIGBUS 7
76#define TIF_MEMDIE 8 76#define TIF_MEMDIE 8
77#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ 77#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */
78#define TIF_NOTIFY_RESUME 10 /* callback before returning to user */
78#define TIF_FREEZE 16 /* is freezing for suspend */ 79#define TIF_FREEZE 16 /* is freezing for suspend */
79 80
80#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 81#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -82,10 +83,12 @@ register struct thread_info *__current_thread_info __asm__("$8");
82#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 83#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
83#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 84#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
84#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 85#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
86#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
85#define _TIF_FREEZE (1<<TIF_FREEZE) 87#define _TIF_FREEZE (1<<TIF_FREEZE)
86 88
87/* Work to do on interrupt/exception return. */ 89/* Work to do on interrupt/exception return. */
88#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED) 90#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
91 _TIF_NOTIFY_RESUME)
89 92
90/* Work to do on any return to userspace. */ 93/* Work to do on any return to userspace. */
91#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ 94#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index df65eaa84c4c..0932dbb1ef8e 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -20,6 +20,7 @@
20#include <linux/binfmts.h> 20#include <linux/binfmts.h>
21#include <linux/bitops.h> 21#include <linux/bitops.h>
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/tracehook.h>
23 24
24#include <asm/uaccess.h> 25#include <asm/uaccess.h>
25#include <asm/sigcontext.h> 26#include <asm/sigcontext.h>
@@ -683,4 +684,11 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
683{ 684{
684 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 685 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
685 do_signal(regs, sw, r0, r19); 686 do_signal(regs, sw, r0, r19);
687
688 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
689 clear_thread_flag(TIF_NOTIFY_RESUME);
690 tracehook_notify_resume(regs);
691 if (current->replacement_session_keyring)
692 key_replace_session_keyring();
693 }
686} 694}
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 73394e50cbca..d3a39b1e6c0f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -130,11 +130,13 @@ extern void vfp_sync_state(struct thread_info *thread);
130 * TIF_SYSCALL_TRACE - syscall trace active 130 * TIF_SYSCALL_TRACE - syscall trace active
131 * TIF_SIGPENDING - signal pending 131 * TIF_SIGPENDING - signal pending
132 * TIF_NEED_RESCHED - rescheduling necessary 132 * TIF_NEED_RESCHED - rescheduling necessary
133 * TIF_NOTIFY_RESUME - callback before returning to user
133 * TIF_USEDFPU - FPU was used by this task this quantum (SMP) 134 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
134 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED 135 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
135 */ 136 */
136#define TIF_SIGPENDING 0 137#define TIF_SIGPENDING 0
137#define TIF_NEED_RESCHED 1 138#define TIF_NEED_RESCHED 1
139#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
138#define TIF_SYSCALL_TRACE 8 140#define TIF_SYSCALL_TRACE 8
139#define TIF_POLLING_NRFLAG 16 141#define TIF_POLLING_NRFLAG 16
140#define TIF_USING_IWMMXT 17 142#define TIF_USING_IWMMXT 17
@@ -143,6 +145,7 @@ extern void vfp_sync_state(struct thread_info *thread);
143 145
144#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 146#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
145#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 147#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
148#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
146#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 149#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
147#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 150#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
148#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 151#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 8c3de1a350b5..7813ab782fda 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -51,7 +51,7 @@ fast_work_pending:
51work_pending: 51work_pending:
52 tst r1, #_TIF_NEED_RESCHED 52 tst r1, #_TIF_NEED_RESCHED
53 bne work_resched 53 bne work_resched
54 tst r1, #_TIF_SIGPENDING 54 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
55 beq no_work_pending 55 beq no_work_pending
56 mov r0, sp @ 'regs' 56 mov r0, sp @ 'regs'
57 mov r2, why @ 'syscall' 57 mov r2, why @ 'syscall'
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f6bc5d442782..b76fe06d92e7 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -12,6 +12,7 @@
12#include <linux/personality.h> 12#include <linux/personality.h>
13#include <linux/freezer.h> 13#include <linux/freezer.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/tracehook.h>
15 16
16#include <asm/elf.h> 17#include <asm/elf.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
@@ -707,4 +708,11 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
707{ 708{
708 if (thread_flags & _TIF_SIGPENDING) 709 if (thread_flags & _TIF_SIGPENDING)
709 do_signal(&current->blocked, regs, syscall); 710 do_signal(&current->blocked, regs, syscall);
711
712 if (thread_flags & _TIF_NOTIFY_RESUME) {
713 clear_thread_flag(TIF_NOTIFY_RESUME);
714 tracehook_notify_resume(regs);
715 if (current->replacement_session_keyring)
716 key_replace_session_keyring();
717 }
710} 718}
diff --git a/arch/arm/mach-omap2/mcbsp.c b/arch/arm/mach-omap2/mcbsp.c
index 99b6e1546311..0447d26d454b 100644
--- a/arch/arm/mach-omap2/mcbsp.c
+++ b/arch/arm/mach-omap2/mcbsp.c
@@ -128,6 +128,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
128 .rx_irq = INT_24XX_MCBSP1_IRQ_RX, 128 .rx_irq = INT_24XX_MCBSP1_IRQ_RX,
129 .tx_irq = INT_24XX_MCBSP1_IRQ_TX, 129 .tx_irq = INT_24XX_MCBSP1_IRQ_TX,
130 .ops = &omap2_mcbsp_ops, 130 .ops = &omap2_mcbsp_ops,
131 .buffer_size = 0x6F,
131 }, 132 },
132 { 133 {
133 .phys_base = OMAP34XX_MCBSP2_BASE, 134 .phys_base = OMAP34XX_MCBSP2_BASE,
@@ -136,6 +137,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
136 .rx_irq = INT_24XX_MCBSP2_IRQ_RX, 137 .rx_irq = INT_24XX_MCBSP2_IRQ_RX,
137 .tx_irq = INT_24XX_MCBSP2_IRQ_TX, 138 .tx_irq = INT_24XX_MCBSP2_IRQ_TX,
138 .ops = &omap2_mcbsp_ops, 139 .ops = &omap2_mcbsp_ops,
140 .buffer_size = 0x3FF,
139 }, 141 },
140 { 142 {
141 .phys_base = OMAP34XX_MCBSP3_BASE, 143 .phys_base = OMAP34XX_MCBSP3_BASE,
@@ -144,6 +146,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
144 .rx_irq = INT_24XX_MCBSP3_IRQ_RX, 146 .rx_irq = INT_24XX_MCBSP3_IRQ_RX,
145 .tx_irq = INT_24XX_MCBSP3_IRQ_TX, 147 .tx_irq = INT_24XX_MCBSP3_IRQ_TX,
146 .ops = &omap2_mcbsp_ops, 148 .ops = &omap2_mcbsp_ops,
149 .buffer_size = 0x6F,
147 }, 150 },
148 { 151 {
149 .phys_base = OMAP34XX_MCBSP4_BASE, 152 .phys_base = OMAP34XX_MCBSP4_BASE,
@@ -152,6 +155,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
152 .rx_irq = INT_24XX_MCBSP4_IRQ_RX, 155 .rx_irq = INT_24XX_MCBSP4_IRQ_RX,
153 .tx_irq = INT_24XX_MCBSP4_IRQ_TX, 156 .tx_irq = INT_24XX_MCBSP4_IRQ_TX,
154 .ops = &omap2_mcbsp_ops, 157 .ops = &omap2_mcbsp_ops,
158 .buffer_size = 0x6F,
155 }, 159 },
156 { 160 {
157 .phys_base = OMAP34XX_MCBSP5_BASE, 161 .phys_base = OMAP34XX_MCBSP5_BASE,
@@ -160,6 +164,7 @@ static struct omap_mcbsp_platform_data omap34xx_mcbsp_pdata[] = {
160 .rx_irq = INT_24XX_MCBSP5_IRQ_RX, 164 .rx_irq = INT_24XX_MCBSP5_IRQ_RX,
161 .tx_irq = INT_24XX_MCBSP5_IRQ_TX, 165 .tx_irq = INT_24XX_MCBSP5_IRQ_TX,
162 .ops = &omap2_mcbsp_ops, 166 .ops = &omap2_mcbsp_ops,
167 .buffer_size = 0x6F,
163 }, 168 },
164}; 169};
165#define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata) 170#define OMAP34XX_MCBSP_PDATA_SZ ARRAY_SIZE(omap34xx_mcbsp_pdata)
diff --git a/arch/arm/mach-pxa/include/mach/audio.h b/arch/arm/mach-pxa/include/mach/audio.h
index 16eb02552d5d..a3449e35a6f5 100644
--- a/arch/arm/mach-pxa/include/mach/audio.h
+++ b/arch/arm/mach-pxa/include/mach/audio.h
@@ -3,10 +3,12 @@
3 3
4#include <sound/core.h> 4#include <sound/core.h>
5#include <sound/pcm.h> 5#include <sound/pcm.h>
6#include <sound/ac97_codec.h>
6 7
7/* 8/*
8 * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95) 9 * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
9 * a -1 value means no gpio will be used for reset 10 * a -1 value means no gpio will be used for reset
11 * @codec_pdata: AC97 codec platform_data
10 12
11 * reset_gpio should only be specified for pxa27x CPUs where a silicon 13 * reset_gpio should only be specified for pxa27x CPUs where a silicon
12 * bug prevents correct operation of the reset line. If not specified, 14 * bug prevents correct operation of the reset line. If not specified,
@@ -20,6 +22,7 @@ typedef struct {
20 void (*resume)(void *); 22 void (*resume)(void *);
21 void *priv; 23 void *priv;
22 int reset_gpio; 24 int reset_gpio;
25 void *codec_pdata[AC97_BUS_MAX_DEVICES];
23} pxa2xx_audio_ops_t; 26} pxa2xx_audio_ops_t;
24 27
25extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops); 28extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index e3ac94f09006..9b00f4cbc903 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1127,6 +1127,11 @@ int omap_dma_running(void)
1127void omap_dma_link_lch(int lch_head, int lch_queue) 1127void omap_dma_link_lch(int lch_head, int lch_queue)
1128{ 1128{
1129 if (omap_dma_in_1510_mode()) { 1129 if (omap_dma_in_1510_mode()) {
1130 if (lch_head == lch_queue) {
1131 dma_write(dma_read(CCR(lch_head)) | (3 << 8),
1132 CCR(lch_head));
1133 return;
1134 }
1130 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1135 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1131 BUG(); 1136 BUG();
1132 return; 1137 return;
@@ -1149,6 +1154,11 @@ EXPORT_SYMBOL(omap_dma_link_lch);
1149void omap_dma_unlink_lch(int lch_head, int lch_queue) 1154void omap_dma_unlink_lch(int lch_head, int lch_queue)
1150{ 1155{
1151 if (omap_dma_in_1510_mode()) { 1156 if (omap_dma_in_1510_mode()) {
1157 if (lch_head == lch_queue) {
1158 dma_write(dma_read(CCR(lch_head)) & ~(3 << 8),
1159 CCR(lch_head));
1160 return;
1161 }
1152 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n"); 1162 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1153 BUG(); 1163 BUG();
1154 return; 1164 return;
diff --git a/arch/arm/plat-omap/include/mach/mcbsp.h b/arch/arm/plat-omap/include/mach/mcbsp.h
index bb154ea76769..63a3f254af7b 100644
--- a/arch/arm/plat-omap/include/mach/mcbsp.h
+++ b/arch/arm/plat-omap/include/mach/mcbsp.h
@@ -134,6 +134,11 @@
134#define OMAP_MCBSP_REG_XCERG 0x74 134#define OMAP_MCBSP_REG_XCERG 0x74
135#define OMAP_MCBSP_REG_XCERH 0x78 135#define OMAP_MCBSP_REG_XCERH 0x78
136#define OMAP_MCBSP_REG_SYSCON 0x8C 136#define OMAP_MCBSP_REG_SYSCON 0x8C
137#define OMAP_MCBSP_REG_THRSH2 0x90
138#define OMAP_MCBSP_REG_THRSH1 0x94
139#define OMAP_MCBSP_REG_IRQST 0xA0
140#define OMAP_MCBSP_REG_IRQEN 0xA4
141#define OMAP_MCBSP_REG_WAKEUPEN 0xA8
137#define OMAP_MCBSP_REG_XCCR 0xAC 142#define OMAP_MCBSP_REG_XCCR 0xAC
138#define OMAP_MCBSP_REG_RCCR 0xB0 143#define OMAP_MCBSP_REG_RCCR 0xB0
139 144
@@ -249,8 +254,27 @@
249#define RDISABLE 0x0001 254#define RDISABLE 0x0001
250 255
251/********************** McBSP SYSCONFIG bit definitions ********************/ 256/********************** McBSP SYSCONFIG bit definitions ********************/
257#define CLOCKACTIVITY(value) ((value)<<8)
258#define SIDLEMODE(value) ((value)<<3)
259#define ENAWAKEUP 0x0004
252#define SOFTRST 0x0002 260#define SOFTRST 0x0002
253 261
262/********************** McBSP DMA operating modes **************************/
263#define MCBSP_DMA_MODE_ELEMENT 0
264#define MCBSP_DMA_MODE_THRESHOLD 1
265#define MCBSP_DMA_MODE_FRAME 2
266
267/********************** McBSP WAKEUPEN bit definitions *********************/
268#define XEMPTYEOFEN 0x4000
269#define XRDYEN 0x0400
270#define XEOFEN 0x0200
271#define XFSXEN 0x0100
272#define XSYNCERREN 0x0080
273#define RRDYEN 0x0008
274#define REOFEN 0x0004
275#define RFSREN 0x0002
276#define RSYNCERREN 0x0001
277
254/* we don't do multichannel for now */ 278/* we don't do multichannel for now */
255struct omap_mcbsp_reg_cfg { 279struct omap_mcbsp_reg_cfg {
256 u16 spcr2; 280 u16 spcr2;
@@ -344,6 +368,9 @@ struct omap_mcbsp_platform_data {
344 u8 dma_rx_sync, dma_tx_sync; 368 u8 dma_rx_sync, dma_tx_sync;
345 u16 rx_irq, tx_irq; 369 u16 rx_irq, tx_irq;
346 struct omap_mcbsp_ops *ops; 370 struct omap_mcbsp_ops *ops;
371#ifdef CONFIG_ARCH_OMAP34XX
372 u16 buffer_size;
373#endif
347}; 374};
348 375
349struct omap_mcbsp { 376struct omap_mcbsp {
@@ -377,6 +404,11 @@ struct omap_mcbsp {
377 struct omap_mcbsp_platform_data *pdata; 404 struct omap_mcbsp_platform_data *pdata;
378 struct clk *iclk; 405 struct clk *iclk;
379 struct clk *fclk; 406 struct clk *fclk;
407#ifdef CONFIG_ARCH_OMAP34XX
408 int dma_op_mode;
409 u16 max_tx_thres;
410 u16 max_rx_thres;
411#endif
380}; 412};
381extern struct omap_mcbsp **mcbsp_ptr; 413extern struct omap_mcbsp **mcbsp_ptr;
382extern int omap_mcbsp_count; 414extern int omap_mcbsp_count;
@@ -385,10 +417,25 @@ int omap_mcbsp_init(void);
385void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config, 417void omap_mcbsp_register_board_cfg(struct omap_mcbsp_platform_data *config,
386 int size); 418 int size);
387void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config); 419void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg * config);
420#ifdef CONFIG_ARCH_OMAP34XX
421void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold);
422void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold);
423u16 omap_mcbsp_get_max_tx_threshold(unsigned int id);
424u16 omap_mcbsp_get_max_rx_threshold(unsigned int id);
425int omap_mcbsp_get_dma_op_mode(unsigned int id);
426#else
427static inline void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
428{ }
429static inline void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
430{ }
431static inline u16 omap_mcbsp_get_max_tx_threshold(unsigned int id) { return 0; }
432static inline u16 omap_mcbsp_get_max_rx_threshold(unsigned int id) { return 0; }
433static inline int omap_mcbsp_get_dma_op_mode(unsigned int id) { return 0; }
434#endif
388int omap_mcbsp_request(unsigned int id); 435int omap_mcbsp_request(unsigned int id);
389void omap_mcbsp_free(unsigned int id); 436void omap_mcbsp_free(unsigned int id);
390void omap_mcbsp_start(unsigned int id); 437void omap_mcbsp_start(unsigned int id, int tx, int rx);
391void omap_mcbsp_stop(unsigned int id); 438void omap_mcbsp_stop(unsigned int id, int tx, int rx);
392void omap_mcbsp_xmit_word(unsigned int id, u32 word); 439void omap_mcbsp_xmit_word(unsigned int id, u32 word);
393u32 omap_mcbsp_recv_word(unsigned int id); 440u32 omap_mcbsp_recv_word(unsigned int id);
394 441
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index efa0e0111f38..8dc7927906f1 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -198,6 +198,170 @@ void omap_mcbsp_config(unsigned int id, const struct omap_mcbsp_reg_cfg *config)
198} 198}
199EXPORT_SYMBOL(omap_mcbsp_config); 199EXPORT_SYMBOL(omap_mcbsp_config);
200 200
201#ifdef CONFIG_ARCH_OMAP34XX
202/*
203 * omap_mcbsp_set_tx_threshold configures how to deal
204 * with transmit threshold. the threshold value and handler can be
205 * configure in here.
206 */
207void omap_mcbsp_set_tx_threshold(unsigned int id, u16 threshold)
208{
209 struct omap_mcbsp *mcbsp;
210 void __iomem *io_base;
211
212 if (!cpu_is_omap34xx())
213 return;
214
215 if (!omap_mcbsp_check_valid_id(id)) {
216 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
217 return;
218 }
219 mcbsp = id_to_mcbsp_ptr(id);
220 io_base = mcbsp->io_base;
221
222 OMAP_MCBSP_WRITE(io_base, THRSH2, threshold);
223}
224EXPORT_SYMBOL(omap_mcbsp_set_tx_threshold);
225
226/*
227 * omap_mcbsp_set_rx_threshold configures how to deal
228 * with receive threshold. the threshold value and handler can be
229 * configure in here.
230 */
231void omap_mcbsp_set_rx_threshold(unsigned int id, u16 threshold)
232{
233 struct omap_mcbsp *mcbsp;
234 void __iomem *io_base;
235
236 if (!cpu_is_omap34xx())
237 return;
238
239 if (!omap_mcbsp_check_valid_id(id)) {
240 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
241 return;
242 }
243 mcbsp = id_to_mcbsp_ptr(id);
244 io_base = mcbsp->io_base;
245
246 OMAP_MCBSP_WRITE(io_base, THRSH1, threshold);
247}
248EXPORT_SYMBOL(omap_mcbsp_set_rx_threshold);
249
250/*
251 * omap_mcbsp_get_max_tx_thres just return the current configured
252 * maximum threshold for transmission
253 */
254u16 omap_mcbsp_get_max_tx_threshold(unsigned int id)
255{
256 struct omap_mcbsp *mcbsp;
257
258 if (!omap_mcbsp_check_valid_id(id)) {
259 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
260 return -ENODEV;
261 }
262 mcbsp = id_to_mcbsp_ptr(id);
263
264 return mcbsp->max_tx_thres;
265}
266EXPORT_SYMBOL(omap_mcbsp_get_max_tx_threshold);
267
268/*
269 * omap_mcbsp_get_max_rx_thres just return the current configured
270 * maximum threshold for reception
271 */
272u16 omap_mcbsp_get_max_rx_threshold(unsigned int id)
273{
274 struct omap_mcbsp *mcbsp;
275
276 if (!omap_mcbsp_check_valid_id(id)) {
277 printk(KERN_ERR "%s: Invalid id (%d)\n", __func__, id + 1);
278 return -ENODEV;
279 }
280 mcbsp = id_to_mcbsp_ptr(id);
281
282 return mcbsp->max_rx_thres;
283}
284EXPORT_SYMBOL(omap_mcbsp_get_max_rx_threshold);
285
286/*
287 * omap_mcbsp_get_dma_op_mode just return the current configured
288 * operating mode for the mcbsp channel
289 */
290int omap_mcbsp_get_dma_op_mode(unsigned int id)
291{
292 struct omap_mcbsp *mcbsp;
293 int dma_op_mode;
294
295 if (!omap_mcbsp_check_valid_id(id)) {
296 printk(KERN_ERR "%s: Invalid id (%u)\n", __func__, id + 1);
297 return -ENODEV;
298 }
299 mcbsp = id_to_mcbsp_ptr(id);
300
301 spin_lock_irq(&mcbsp->lock);
302 dma_op_mode = mcbsp->dma_op_mode;
303 spin_unlock_irq(&mcbsp->lock);
304
305 return dma_op_mode;
306}
307EXPORT_SYMBOL(omap_mcbsp_get_dma_op_mode);
308
309static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp)
310{
311 /*
312 * Enable wakup behavior, smart idle and all wakeups
313 * REVISIT: some wakeups may be unnecessary
314 */
315 if (cpu_is_omap34xx()) {
316 u16 syscon;
317
318 syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON);
319 syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
320
321 spin_lock_irq(&mcbsp->lock);
322 if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
323 syscon |= (ENAWAKEUP | SIDLEMODE(0x02) |
324 CLOCKACTIVITY(0x02));
325 OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN,
326 XRDYEN | RRDYEN);
327 } else {
328 syscon |= SIDLEMODE(0x01);
329 }
330 spin_unlock_irq(&mcbsp->lock);
331
332 OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
333 }
334}
335
336static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp)
337{
338 /*
339 * Disable wakup behavior, smart idle and all wakeups
340 */
341 if (cpu_is_omap34xx()) {
342 u16 syscon;
343
344 syscon = OMAP_MCBSP_READ(mcbsp->io_base, SYSCON);
345 syscon &= ~(ENAWAKEUP | SIDLEMODE(0x03) | CLOCKACTIVITY(0x03));
346 /*
347 * HW bug workaround - If no_idle mode is taken, we need to
348 * go to smart_idle before going to always_idle, or the
349 * device will not hit retention anymore.
350 */
351 syscon |= SIDLEMODE(0x02);
352 OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
353
354 syscon &= ~(SIDLEMODE(0x03));
355 OMAP_MCBSP_WRITE(mcbsp->io_base, SYSCON, syscon);
356
357 OMAP_MCBSP_WRITE(mcbsp->io_base, WAKEUPEN, 0);
358 }
359}
360#else
361static inline void omap34xx_mcbsp_request(struct omap_mcbsp *mcbsp) {}
362static inline void omap34xx_mcbsp_free(struct omap_mcbsp *mcbsp) {}
363#endif
364
201/* 365/*
202 * We can choose between IRQ based or polled IO. 366 * We can choose between IRQ based or polled IO.
203 * This needs to be called before omap_mcbsp_request(). 367 * This needs to be called before omap_mcbsp_request().
@@ -257,6 +421,9 @@ int omap_mcbsp_request(unsigned int id)
257 clk_enable(mcbsp->iclk); 421 clk_enable(mcbsp->iclk);
258 clk_enable(mcbsp->fclk); 422 clk_enable(mcbsp->fclk);
259 423
424 /* Do procedure specific to omap34xx arch, if applicable */
425 omap34xx_mcbsp_request(mcbsp);
426
260 /* 427 /*
261 * Make sure that transmitter, receiver and sample-rate generator are 428 * Make sure that transmitter, receiver and sample-rate generator are
262 * not running before activating IRQs. 429 * not running before activating IRQs.
@@ -305,6 +472,9 @@ void omap_mcbsp_free(unsigned int id)
305 if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free) 472 if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free)
306 mcbsp->pdata->ops->free(id); 473 mcbsp->pdata->ops->free(id);
307 474
475 /* Do procedure specific to omap34xx arch, if applicable */
476 omap34xx_mcbsp_free(mcbsp);
477
308 clk_disable(mcbsp->fclk); 478 clk_disable(mcbsp->fclk);
309 clk_disable(mcbsp->iclk); 479 clk_disable(mcbsp->iclk);
310 480
@@ -328,14 +498,15 @@ void omap_mcbsp_free(unsigned int id)
328EXPORT_SYMBOL(omap_mcbsp_free); 498EXPORT_SYMBOL(omap_mcbsp_free);
329 499
330/* 500/*
331 * Here we start the McBSP, by enabling the sample 501 * Here we start the McBSP, by enabling transmitter, receiver or both.
332 * generator, both transmitter and receivers, 502 * If no transmitter or receiver is active prior calling, then sample-rate
333 * and the frame sync. 503 * generator and frame sync are started.
334 */ 504 */
335void omap_mcbsp_start(unsigned int id) 505void omap_mcbsp_start(unsigned int id, int tx, int rx)
336{ 506{
337 struct omap_mcbsp *mcbsp; 507 struct omap_mcbsp *mcbsp;
338 void __iomem *io_base; 508 void __iomem *io_base;
509 int idle;
339 u16 w; 510 u16 w;
340 511
341 if (!omap_mcbsp_check_valid_id(id)) { 512 if (!omap_mcbsp_check_valid_id(id)) {
@@ -348,32 +519,58 @@ void omap_mcbsp_start(unsigned int id)
348 mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7; 519 mcbsp->rx_word_length = (OMAP_MCBSP_READ(io_base, RCR1) >> 5) & 0x7;
349 mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7; 520 mcbsp->tx_word_length = (OMAP_MCBSP_READ(io_base, XCR1) >> 5) & 0x7;
350 521
351 /* Start the sample generator */ 522 idle = !((OMAP_MCBSP_READ(io_base, SPCR2) |
352 w = OMAP_MCBSP_READ(io_base, SPCR2); 523 OMAP_MCBSP_READ(io_base, SPCR1)) & 1);
353 OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6)); 524
525 if (idle) {
526 /* Start the sample generator */
527 w = OMAP_MCBSP_READ(io_base, SPCR2);
528 OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 6));
529 }
354 530
355 /* Enable transmitter and receiver */ 531 /* Enable transmitter and receiver */
532 tx &= 1;
356 w = OMAP_MCBSP_READ(io_base, SPCR2); 533 w = OMAP_MCBSP_READ(io_base, SPCR2);
357 OMAP_MCBSP_WRITE(io_base, SPCR2, w | 1); 534 OMAP_MCBSP_WRITE(io_base, SPCR2, w | tx);
358 535
536 rx &= 1;
359 w = OMAP_MCBSP_READ(io_base, SPCR1); 537 w = OMAP_MCBSP_READ(io_base, SPCR1);
360 OMAP_MCBSP_WRITE(io_base, SPCR1, w | 1); 538 OMAP_MCBSP_WRITE(io_base, SPCR1, w | rx);
361 539
362 udelay(100); 540 /*
541 * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec
542 * REVISIT: 100us may give enough time for two CLKSRG, however
543 * due to some unknown PM related, clock gating etc. reason it
544 * is now at 500us.
545 */
546 udelay(500);
363 547
364 /* Start frame sync */ 548 if (idle) {
365 w = OMAP_MCBSP_READ(io_base, SPCR2); 549 /* Start frame sync */
366 OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7)); 550 w = OMAP_MCBSP_READ(io_base, SPCR2);
551 OMAP_MCBSP_WRITE(io_base, SPCR2, w | (1 << 7));
552 }
553
554 if (cpu_is_omap2430() || cpu_is_omap34xx()) {
555 /* Release the transmitter and receiver */
556 w = OMAP_MCBSP_READ(io_base, XCCR);
557 w &= ~(tx ? XDISABLE : 0);
558 OMAP_MCBSP_WRITE(io_base, XCCR, w);
559 w = OMAP_MCBSP_READ(io_base, RCCR);
560 w &= ~(rx ? RDISABLE : 0);
561 OMAP_MCBSP_WRITE(io_base, RCCR, w);
562 }
367 563
368 /* Dump McBSP Regs */ 564 /* Dump McBSP Regs */
369 omap_mcbsp_dump_reg(id); 565 omap_mcbsp_dump_reg(id);
370} 566}
371EXPORT_SYMBOL(omap_mcbsp_start); 567EXPORT_SYMBOL(omap_mcbsp_start);
372 568
373void omap_mcbsp_stop(unsigned int id) 569void omap_mcbsp_stop(unsigned int id, int tx, int rx)
374{ 570{
375 struct omap_mcbsp *mcbsp; 571 struct omap_mcbsp *mcbsp;
376 void __iomem *io_base; 572 void __iomem *io_base;
573 int idle;
377 u16 w; 574 u16 w;
378 575
379 if (!omap_mcbsp_check_valid_id(id)) { 576 if (!omap_mcbsp_check_valid_id(id)) {
@@ -385,16 +582,33 @@ void omap_mcbsp_stop(unsigned int id)
385 io_base = mcbsp->io_base; 582 io_base = mcbsp->io_base;
386 583
387 /* Reset transmitter */ 584 /* Reset transmitter */
585 tx &= 1;
586 if (cpu_is_omap2430() || cpu_is_omap34xx()) {
587 w = OMAP_MCBSP_READ(io_base, XCCR);
588 w |= (tx ? XDISABLE : 0);
589 OMAP_MCBSP_WRITE(io_base, XCCR, w);
590 }
388 w = OMAP_MCBSP_READ(io_base, SPCR2); 591 w = OMAP_MCBSP_READ(io_base, SPCR2);
389 OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1)); 592 OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~tx);
390 593
391 /* Reset receiver */ 594 /* Reset receiver */
595 rx &= 1;
596 if (cpu_is_omap2430() || cpu_is_omap34xx()) {
597 w = OMAP_MCBSP_READ(io_base, RCCR);
598 w |= (tx ? RDISABLE : 0);
599 OMAP_MCBSP_WRITE(io_base, RCCR, w);
600 }
392 w = OMAP_MCBSP_READ(io_base, SPCR1); 601 w = OMAP_MCBSP_READ(io_base, SPCR1);
393 OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~(1)); 602 OMAP_MCBSP_WRITE(io_base, SPCR1, w & ~rx);
394 603
395 /* Reset the sample rate generator */ 604 idle = !((OMAP_MCBSP_READ(io_base, SPCR2) |
396 w = OMAP_MCBSP_READ(io_base, SPCR2); 605 OMAP_MCBSP_READ(io_base, SPCR1)) & 1);
397 OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6)); 606
607 if (idle) {
608 /* Reset the sample rate generator */
609 w = OMAP_MCBSP_READ(io_base, SPCR2);
610 OMAP_MCBSP_WRITE(io_base, SPCR2, w & ~(1 << 6));
611 }
398} 612}
399EXPORT_SYMBOL(omap_mcbsp_stop); 613EXPORT_SYMBOL(omap_mcbsp_stop);
400 614
@@ -883,6 +1097,149 @@ void omap_mcbsp_set_spi_mode(unsigned int id,
883} 1097}
884EXPORT_SYMBOL(omap_mcbsp_set_spi_mode); 1098EXPORT_SYMBOL(omap_mcbsp_set_spi_mode);
885 1099
1100#ifdef CONFIG_ARCH_OMAP34XX
1101#define max_thres(m) (mcbsp->pdata->buffer_size)
1102#define valid_threshold(m, val) ((val) <= max_thres(m))
1103#define THRESHOLD_PROP_BUILDER(prop) \
1104static ssize_t prop##_show(struct device *dev, \
1105 struct device_attribute *attr, char *buf) \
1106{ \
1107 struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
1108 \
1109 return sprintf(buf, "%u\n", mcbsp->prop); \
1110} \
1111 \
1112static ssize_t prop##_store(struct device *dev, \
1113 struct device_attribute *attr, \
1114 const char *buf, size_t size) \
1115{ \
1116 struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
1117 unsigned long val; \
1118 int status; \
1119 \
1120 status = strict_strtoul(buf, 0, &val); \
1121 if (status) \
1122 return status; \
1123 \
1124 if (!valid_threshold(mcbsp, val)) \
1125 return -EDOM; \
1126 \
1127 mcbsp->prop = val; \
1128 return size; \
1129} \
1130 \
1131static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store);
1132
1133THRESHOLD_PROP_BUILDER(max_tx_thres);
1134THRESHOLD_PROP_BUILDER(max_rx_thres);
1135
1136static const char *dma_op_modes[] = {
1137 "element", "threshold", "frame",
1138};
1139
1140static ssize_t dma_op_mode_show(struct device *dev,
1141 struct device_attribute *attr, char *buf)
1142{
1143 struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
1144 int dma_op_mode, i = 0;
1145 ssize_t len = 0;
1146 const char * const *s;
1147
1148 spin_lock_irq(&mcbsp->lock);
1149 dma_op_mode = mcbsp->dma_op_mode;
1150 spin_unlock_irq(&mcbsp->lock);
1151
1152 for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) {
1153 if (dma_op_mode == i)
1154 len += sprintf(buf + len, "[%s] ", *s);
1155 else
1156 len += sprintf(buf + len, "%s ", *s);
1157 }
1158 len += sprintf(buf + len, "\n");
1159
1160 return len;
1161}
1162
1163static ssize_t dma_op_mode_store(struct device *dev,
1164 struct device_attribute *attr,
1165 const char *buf, size_t size)
1166{
1167 struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
1168 const char * const *s;
1169 int i = 0;
1170
1171 for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++)
1172 if (sysfs_streq(buf, *s))
1173 break;
1174
1175 if (i == ARRAY_SIZE(dma_op_modes))
1176 return -EINVAL;
1177
1178 spin_lock_irq(&mcbsp->lock);
1179 if (!mcbsp->free) {
1180 size = -EBUSY;
1181 goto unlock;
1182 }
1183 mcbsp->dma_op_mode = i;
1184
1185unlock:
1186 spin_unlock_irq(&mcbsp->lock);
1187
1188 return size;
1189}
1190
1191static DEVICE_ATTR(dma_op_mode, 0644, dma_op_mode_show, dma_op_mode_store);
1192
1193static const struct attribute *additional_attrs[] = {
1194 &dev_attr_max_tx_thres.attr,
1195 &dev_attr_max_rx_thres.attr,
1196 &dev_attr_dma_op_mode.attr,
1197 NULL,
1198};
1199
1200static const struct attribute_group additional_attr_group = {
1201 .attrs = (struct attribute **)additional_attrs,
1202};
1203
1204static inline int __devinit omap_additional_add(struct device *dev)
1205{
1206 return sysfs_create_group(&dev->kobj, &additional_attr_group);
1207}
1208
1209static inline void __devexit omap_additional_remove(struct device *dev)
1210{
1211 sysfs_remove_group(&dev->kobj, &additional_attr_group);
1212}
1213
1214static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp)
1215{
1216 mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
1217 if (cpu_is_omap34xx()) {
1218 mcbsp->max_tx_thres = max_thres(mcbsp);
1219 mcbsp->max_rx_thres = max_thres(mcbsp);
1220 /*
1221 * REVISIT: Set dmap_op_mode to THRESHOLD as default
1222 * for mcbsp2 instances.
1223 */
1224 if (omap_additional_add(mcbsp->dev))
1225 dev_warn(mcbsp->dev,
1226 "Unable to create additional controls\n");
1227 } else {
1228 mcbsp->max_tx_thres = -EINVAL;
1229 mcbsp->max_rx_thres = -EINVAL;
1230 }
1231}
1232
1233static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp)
1234{
1235 if (cpu_is_omap34xx())
1236 omap_additional_remove(mcbsp->dev);
1237}
1238#else
1239static inline void __devinit omap34xx_device_init(struct omap_mcbsp *mcbsp) {}
1240static inline void __devexit omap34xx_device_exit(struct omap_mcbsp *mcbsp) {}
1241#endif /* CONFIG_ARCH_OMAP34XX */
1242
886/* 1243/*
887 * McBSP1 and McBSP3 are directly mapped on 1610 and 1510. 1244 * McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
888 * 730 has only 2 McBSP, and both of them are MPU peripherals. 1245 * 730 has only 2 McBSP, and both of them are MPU peripherals.
@@ -953,6 +1310,10 @@ static int __devinit omap_mcbsp_probe(struct platform_device *pdev)
953 mcbsp->dev = &pdev->dev; 1310 mcbsp->dev = &pdev->dev;
954 mcbsp_ptr[id] = mcbsp; 1311 mcbsp_ptr[id] = mcbsp;
955 platform_set_drvdata(pdev, mcbsp); 1312 platform_set_drvdata(pdev, mcbsp);
1313
1314 /* Initialize mcbsp properties for OMAP34XX if needed / applicable */
1315 omap34xx_device_init(mcbsp);
1316
956 return 0; 1317 return 0;
957 1318
958err_fclk: 1319err_fclk:
@@ -976,6 +1337,8 @@ static int __devexit omap_mcbsp_remove(struct platform_device *pdev)
976 mcbsp->pdata->ops->free) 1337 mcbsp->pdata->ops->free)
977 mcbsp->pdata->ops->free(mcbsp->id); 1338 mcbsp->pdata->ops->free(mcbsp->id);
978 1339
1340 omap34xx_device_exit(mcbsp);
1341
979 clk_disable(mcbsp->fclk); 1342 clk_disable(mcbsp->fclk);
980 clk_disable(mcbsp->iclk); 1343 clk_disable(mcbsp->iclk);
981 clk_put(mcbsp->fclk); 1344 clk_put(mcbsp->fclk);
diff --git a/arch/arm/plat-s3c/include/plat/audio-simtec.h b/arch/arm/plat-s3c/include/plat/audio-simtec.h
new file mode 100644
index 000000000000..0f440b9168db
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/audio-simtec.h
@@ -0,0 +1,37 @@
1/* arch/arm/plat-s3c/include/plat/audio-simtec.h
2 *
3 * Copyright 2008 Simtec Electronics
4 * http://armlinux.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Simtec Audio support.
12*/
13
14/**
15 * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
16 * @use_mpllin: Select codec clock from MPLLin
17 * @output_cdclk: Need to output CDCLK to the codec
18 * @have_mic: Set if we have a MIC socket
19 * @have_lout: Set if we have a LineOut socket
20 * @amp_gpio: GPIO pin to enable the AMP
21 * @amp_gain: Option GPIO to control AMP gain
22 */
23struct s3c24xx_audio_simtec_pdata {
24 unsigned int use_mpllin:1;
25 unsigned int output_cdclk:1;
26
27 unsigned int have_mic:1;
28 unsigned int have_lout:1;
29
30 int amp_gpio;
31 int amp_gain[2];
32
33 void (*startup)(void);
34};
35
36extern int simtec_audio_add(const char *codec_name,
37 struct s3c24xx_audio_simtec_pdata *pdata);
diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
index 0fad7571030e..07659dad1748 100644
--- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
+++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h
@@ -33,6 +33,11 @@
33#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1) 33#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1)
34#define S3C2412_IISCON_IIS_ACTIVE (1 << 0) 34#define S3C2412_IISCON_IIS_ACTIVE (1 << 0)
35 35
36#define S3C64XX_IISMOD_BLC_16BIT (0 << 13)
37#define S3C64XX_IISMOD_BLC_8BIT (1 << 13)
38#define S3C64XX_IISMOD_BLC_24BIT (2 << 13)
39#define S3C64XX_IISMOD_BLC_MASK (3 << 13)
40
36#define S3C64XX_IISMOD_IMS_PCLK (0 << 10) 41#define S3C64XX_IISMOD_IMS_PCLK (0 << 10)
37#define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10) 42#define S3C64XX_IISMOD_IMS_SYSMUX (1 << 10)
38 43
diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h
index fc42de5ca209..fd0c5d7e9337 100644
--- a/arch/avr32/include/asm/thread_info.h
+++ b/arch/avr32/include/asm/thread_info.h
@@ -84,6 +84,7 @@ static inline struct thread_info *current_thread_info(void)
84#define TIF_MEMDIE 6 84#define TIF_MEMDIE 6
85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */ 85#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */ 86#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
87#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
87#define TIF_FREEZE 29 88#define TIF_FREEZE 29
88#define TIF_DEBUG 30 /* debugging enabled */ 89#define TIF_DEBUG 30 /* debugging enabled */
89#define TIF_USERSPACE 31 /* true if FS sets userspace */ 90#define TIF_USERSPACE 31 /* true if FS sets userspace */
@@ -96,6 +97,7 @@ static inline struct thread_info *current_thread_info(void)
96#define _TIF_MEMDIE (1 << TIF_MEMDIE) 97#define _TIF_MEMDIE (1 << TIF_MEMDIE)
97#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 98#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
98#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP) 99#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
100#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
99#define _TIF_FREEZE (1 << TIF_FREEZE) 101#define _TIF_FREEZE (1 << TIF_FREEZE)
100 102
101/* Note: The masks below must never span more than 16 bits! */ 103/* Note: The masks below must never span more than 16 bits! */
@@ -103,13 +105,15 @@ static inline struct thread_info *current_thread_info(void)
103/* work to do on interrupt/exception return */ 105/* work to do on interrupt/exception return */
104#define _TIF_WORK_MASK \ 106#define _TIF_WORK_MASK \
105 ((1 << TIF_SIGPENDING) \ 107 ((1 << TIF_SIGPENDING) \
108 | _TIF_NOTIFY_RESUME \
106 | (1 << TIF_NEED_RESCHED) \ 109 | (1 << TIF_NEED_RESCHED) \
107 | (1 << TIF_POLLING_NRFLAG) \ 110 | (1 << TIF_POLLING_NRFLAG) \
108 | (1 << TIF_BREAKPOINT) \ 111 | (1 << TIF_BREAKPOINT) \
109 | (1 << TIF_RESTORE_SIGMASK)) 112 | (1 << TIF_RESTORE_SIGMASK))
110 113
111/* work to do on any return to userspace */ 114/* work to do on any return to userspace */
112#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE)) 115#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | (1 << TIF_SYSCALL_TRACE) | \
116 _TIF_NOTIFY_RESUME)
113/* work to do on return from debug mode */ 117/* work to do on return from debug mode */
114#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT)) 118#define _TIF_DBGWORK_MASK (_TIF_WORK_MASK & ~(1 << TIF_BREAKPOINT))
115 119
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 009a80155d67..169268c40ae2 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -281,7 +281,7 @@ syscall_exit_work:
281 ld.w r1, r0[TI_flags] 281 ld.w r1, r0[TI_flags]
282 rjmp 1b 282 rjmp 1b
283 283
2842: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK 2842: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME
285 tst r1, r2 285 tst r1, r2
286 breq 3f 286 breq 3f
287 unmask_interrupts 287 unmask_interrupts
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index 27227561bad6..64f886fac2ef 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -16,6 +16,7 @@
16#include <linux/ptrace.h> 16#include <linux/ptrace.h>
17#include <linux/unistd.h> 17#include <linux/unistd.h>
18#include <linux/freezer.h> 18#include <linux/freezer.h>
19#include <linux/tracehook.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/ucontext.h> 22#include <asm/ucontext.h>
@@ -322,4 +323,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
322 323
323 if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 324 if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
324 do_signal(regs, &current->blocked, syscall); 325 do_signal(regs, &current->blocked, syscall);
326
327 if (ti->flags & _TIF_NOTIFY_RESUME) {
328 clear_thread_flag(TIF_NOTIFY_RESUME);
329 tracehook_notify_resume(regs);
330 if (current->replacement_session_keyring)
331 key_replace_session_keyring();
332 }
325} 333}
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c
index b326023baab2..48b0f3912632 100644
--- a/arch/cris/kernel/ptrace.c
+++ b/arch/cris/kernel/ptrace.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/user.h> 18#include <linux/user.h>
19#include <linux/tracehook.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/page.h> 22#include <asm/page.h>
@@ -36,4 +37,11 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
36 /* deal with pending signal delivery */ 37 /* deal with pending signal delivery */
37 if (thread_info_flags & _TIF_SIGPENDING) 38 if (thread_info_flags & _TIF_SIGPENDING)
38 do_signal(canrestart,regs); 39 do_signal(canrestart,regs);
40
41 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
42 clear_thread_flag(TIF_NOTIFY_RESUME);
43 tracehook_notify_resume(regs);
44 if (current->replacement_session_keyring)
45 key_replace_session_keyring();
46 }
39} 47}
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 4a7a62c6e783..6b0a2b6fed6a 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -572,6 +572,8 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
572 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 572 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
573 clear_thread_flag(TIF_NOTIFY_RESUME); 573 clear_thread_flag(TIF_NOTIFY_RESUME);
574 tracehook_notify_resume(__frame); 574 tracehook_notify_resume(__frame);
575 if (current->replacement_session_keyring)
576 key_replace_session_keyring();
575 } 577 }
576 578
577} /* end do_notify_resume() */ 579} /* end do_notify_resume() */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index 8bbc8b0ee45d..70e67e47d020 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -89,6 +89,7 @@ static inline struct thread_info *current_thread_info(void)
89 TIF_NEED_RESCHED */ 89 TIF_NEED_RESCHED */
90#define TIF_MEMDIE 4 90#define TIF_MEMDIE 4
91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 91#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
92#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
92#define TIF_FREEZE 16 /* is freezing for suspend */ 93#define TIF_FREEZE 16 /* is freezing for suspend */
93 94
94/* as above, but as bit values */ 95/* as above, but as bit values */
@@ -97,6 +98,7 @@ static inline struct thread_info *current_thread_info(void)
97#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 98#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
98#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 99#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
99#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 100#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
100#define _TIF_FREEZE (1<<TIF_FREEZE) 102#define _TIF_FREEZE (1<<TIF_FREEZE)
101 103
102#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 104#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index cf3472f7389b..af842c369d24 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -39,6 +39,7 @@
39#include <linux/tty.h> 39#include <linux/tty.h>
40#include <linux/binfmts.h> 40#include <linux/binfmts.h>
41#include <linux/freezer.h> 41#include <linux/freezer.h>
42#include <linux/tracehook.h>
42 43
43#include <asm/setup.h> 44#include <asm/setup.h>
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -552,4 +553,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
552{ 553{
553 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 554 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
554 do_signal(regs, NULL); 555 do_signal(regs, NULL);
556
557 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
558 clear_thread_flag(TIF_NOTIFY_RESUME);
559 tracehook_notify_resume(regs);
560 if (current->replacement_session_keyring)
561 key_replace_session_keyring();
562 }
555} 563}
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 5a61b5c2e18f..8d3c79cd81e7 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -44,7 +44,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
44#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 44#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
45 45
46#define get_dma_ops(dev) platform_dma_get_ops(dev) 46#define get_dma_ops(dev) platform_dma_get_ops(dev)
47#define flush_write_buffers()
48 47
49#include <asm-generic/dma-mapping-common.h> 48#include <asm-generic/dma-mapping-common.h>
50 49
@@ -69,6 +68,24 @@ dma_set_mask (struct device *dev, u64 mask)
69 return 0; 68 return 0;
70} 69}
71 70
71static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
72{
73 if (!dev->dma_mask)
74 return 0;
75
76 return addr + size <= *dev->dma_mask;
77}
78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
80{
81 return paddr;
82}
83
84static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
85{
86 return daddr;
87}
88
72extern int dma_get_cache_alignment(void); 89extern int dma_get_cache_alignment(void);
73 90
74static inline void 91static inline void
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 39a3cd0a4173..f2c1600da097 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops);
10 10
11static int __init dma_init(void) 11static int __init dma_init(void)
12{ 12{
13 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 13 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
14
15 return 0;
14} 16}
15fs_initcall(dma_init); 17fs_initcall(dma_init);
16 18
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5d7c0e5b9e76..89969e950045 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -192,6 +192,8 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
192 if (test_thread_flag(TIF_NOTIFY_RESUME)) { 192 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
193 clear_thread_flag(TIF_NOTIFY_RESUME); 193 clear_thread_flag(TIF_NOTIFY_RESUME);
194 tracehook_notify_resume(&scr->pt); 194 tracehook_notify_resume(&scr->pt);
195 if (current->replacement_session_keyring)
196 key_replace_session_keyring();
195 } 197 }
196 198
197 /* copy user rbs to kernel rbs */ 199 /* copy user rbs to kernel rbs */
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 1f86aeb2c948..620d9dc5220f 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -96,20 +96,22 @@ END(ip_fast_csum)
96GLOBAL_ENTRY(csum_ipv6_magic) 96GLOBAL_ENTRY(csum_ipv6_magic)
97 ld4 r20=[in0],4 97 ld4 r20=[in0],4
98 ld4 r21=[in1],4 98 ld4 r21=[in1],4
99 dep r15=in3,in2,32,16 99 zxt4 in2=in2
100 ;; 100 ;;
101 ld4 r22=[in0],4 101 ld4 r22=[in0],4
102 ld4 r23=[in1],4 102 ld4 r23=[in1],4
103 mux1 r15=r15,@rev 103 dep r15=in3,in2,32,16
104 ;; 104 ;;
105 ld4 r24=[in0],4 105 ld4 r24=[in0],4
106 ld4 r25=[in1],4 106 ld4 r25=[in1],4
107 shr.u r15=r15,16 107 mux1 r15=r15,@rev
108 add r16=r20,r21 108 add r16=r20,r21
109 add r17=r22,r23 109 add r17=r22,r23
110 zxt4 in4=in4
110 ;; 111 ;;
111 ld4 r26=[in0],4 112 ld4 r26=[in0],4
112 ld4 r27=[in1],4 113 ld4 r27=[in1],4
114 shr.u r15=r15,16
113 add r18=r24,r25 115 add r18=r24,r25
114 add r8=r16,r17 116 add r8=r16,r17
115 ;; 117 ;;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index fb8332690179..dbeadb9c8e20 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -133,8 +133,7 @@ consider_steal_time(unsigned long new_itm)
133 account_idle_ticks(blocked); 133 account_idle_ticks(blocked);
134 run_local_timers(); 134 run_local_timers();
135 135
136 if (rcu_pending(cpu)) 136 rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
137 rcu_check_callbacks(cpu, user_mode(get_irq_regs()));
138 137
139 scheduler_tick(); 138 scheduler_tick();
140 run_posix_cpu_timers(p); 139 run_posix_cpu_timers(p);
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index 07bb5bd00e2a..71578151a403 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -149,6 +149,7 @@ static inline unsigned int get_thread_fault_code(void)
149#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 149#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
150#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ 150#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
151#define TIF_IRET 4 /* return with iret */ 151#define TIF_IRET 4 /* return with iret */
152#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
152#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ 153#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
153#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 154#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
154#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 155#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -160,6 +161,7 @@ static inline unsigned int get_thread_fault_code(void)
160#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 161#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
161#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 162#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
162#define _TIF_IRET (1<<TIF_IRET) 163#define _TIF_IRET (1<<TIF_IRET)
164#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
163#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 165#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
164#define _TIF_USEDFPU (1<<TIF_USEDFPU) 166#define _TIF_USEDFPU (1<<TIF_USEDFPU)
165#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 167#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index 18124542a6eb..144b0f124fc7 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -21,6 +21,7 @@
21#include <linux/stddef.h> 21#include <linux/stddef.h>
22#include <linux/personality.h> 22#include <linux/personality.h>
23#include <linux/freezer.h> 23#include <linux/freezer.h>
24#include <linux/tracehook.h>
24#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
25#include <asm/ucontext.h> 26#include <asm/ucontext.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
@@ -408,5 +409,12 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
408 if (thread_info_flags & _TIF_SIGPENDING) 409 if (thread_info_flags & _TIF_SIGPENDING)
409 do_signal(regs,oldset); 410 do_signal(regs,oldset);
410 411
412 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
413 clear_thread_flag(TIF_NOTIFY_RESUME);
414 tracehook_notify_resume(regs);
415 if (current->replacement_session_keyring)
416 key_replace_session_keyring();
417 }
418
411 clear_thread_flag(TIF_IRET); 419 clear_thread_flag(TIF_IRET);
412} 420}
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index 5202f5a5b420..474125886218 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -46,7 +46,6 @@
46#define curptr a2 46#define curptr a2
47 47
48LFLUSH_I_AND_D = 0x00000808 48LFLUSH_I_AND_D = 0x00000808
49LSIGTRAP = 5
50 49
51/* process bits for task_struct.ptrace */ 50/* process bits for task_struct.ptrace */
52PT_TRACESYS_OFF = 3 51PT_TRACESYS_OFF = 3
@@ -118,9 +117,6 @@ PT_DTRACE_BIT = 2
118#define STR(X) STR1(X) 117#define STR(X) STR1(X)
119#define STR1(X) #X 118#define STR1(X) #X
120 119
121#define PT_OFF_ORIG_D0 0x24
122#define PT_OFF_FORMATVEC 0x32
123#define PT_OFF_SR 0x2C
124#define SAVE_ALL_INT \ 120#define SAVE_ALL_INT \
125 "clrl %%sp@-;" /* stk_adj */ \ 121 "clrl %%sp@-;" /* stk_adj */ \
126 "pea -1:w;" /* orig d0 = -1 */ \ 122 "pea -1:w;" /* orig d0 = -1 */ \
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
index c2553d26273d..907ed03d792f 100644
--- a/arch/m68k/include/asm/entry_no.h
+++ b/arch/m68k/include/asm/entry_no.h
@@ -72,8 +72,8 @@ LENOSYS = 38
72 lea %sp@(-32),%sp /* space for 8 regs */ 72 lea %sp@(-32),%sp /* space for 8 regs */
73 moveml %d1-%d5/%a0-%a2,%sp@ 73 moveml %d1-%d5/%a0-%a2,%sp@
74 movel sw_usp,%a0 /* get usp */ 74 movel sw_usp,%a0 /* get usp */
75 movel %a0@-,%sp@(PT_PC) /* copy exception program counter */ 75 movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
76 movel %a0@-,%sp@(PT_FORMATVEC)/* copy exception format/vector/sr */ 76 movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
77 bra 7f 77 bra 7f
78 6: 78 6:
79 clrl %sp@- /* stkadj */ 79 clrl %sp@- /* stkadj */
@@ -89,8 +89,8 @@ LENOSYS = 38
89 bnes 8f /* no, skip */ 89 bnes 8f /* no, skip */
90 move #0x2700,%sr /* disable intrs */ 90 move #0x2700,%sr /* disable intrs */
91 movel sw_usp,%a0 /* get usp */ 91 movel sw_usp,%a0 /* get usp */
92 movel %sp@(PT_PC),%a0@- /* copy exception program counter */ 92 movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
93 movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ 93 movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
94 moveml %sp@,%d1-%d5/%a0-%a2 94 moveml %sp@,%d1-%d5/%a0-%a2
95 lea %sp@(32),%sp /* space for 8 regs */ 95 lea %sp@(32),%sp /* space for 8 regs */
96 movel %sp@+,%d0 96 movel %sp@+,%d0
diff --git a/arch/m68k/include/asm/math-emu.h b/arch/m68k/include/asm/math-emu.h
index ddfab96403cb..5e9249b0014c 100644
--- a/arch/m68k/include/asm/math-emu.h
+++ b/arch/m68k/include/asm/math-emu.h
@@ -145,16 +145,16 @@ extern unsigned int fp_debugprint;
145 * these are only used during instruction decoding 145 * these are only used during instruction decoding
146 * where we always know how deep we're on the stack. 146 * where we always know how deep we're on the stack.
147 */ 147 */
148#define FPS_DO (PT_D0) 148#define FPS_DO (PT_OFF_D0)
149#define FPS_D1 (PT_D1) 149#define FPS_D1 (PT_OFF_D1)
150#define FPS_D2 (PT_D2) 150#define FPS_D2 (PT_OFF_D2)
151#define FPS_A0 (PT_A0) 151#define FPS_A0 (PT_OFF_A0)
152#define FPS_A1 (PT_A1) 152#define FPS_A1 (PT_OFF_A1)
153#define FPS_A2 (PT_A2) 153#define FPS_A2 (PT_OFF_A2)
154#define FPS_SR (PT_SR) 154#define FPS_SR (PT_OFF_SR)
155#define FPS_PC (PT_PC) 155#define FPS_PC (PT_OFF_PC)
156#define FPS_EA (PT_PC+6) 156#define FPS_EA (PT_OFF_PC+6)
157#define FPS_PC2 (PT_PC+10) 157#define FPS_PC2 (PT_OFF_PC+10)
158 158
159.macro fp_get_fp_reg 159.macro fp_get_fp_reg
160 lea (FPD_FPREG,FPDATA,%d0.w*4),%a0 160 lea (FPD_FPREG,FPDATA,%d0.w*4),%a0
diff --git a/arch/m68k/include/asm/thread_info_mm.h b/arch/m68k/include/asm/thread_info_mm.h
index 6ea5c33b3c56..b6da3882be9b 100644
--- a/arch/m68k/include/asm/thread_info_mm.h
+++ b/arch/m68k/include/asm/thread_info_mm.h
@@ -1,6 +1,10 @@
1#ifndef _ASM_M68K_THREAD_INFO_H 1#ifndef _ASM_M68K_THREAD_INFO_H
2#define _ASM_M68K_THREAD_INFO_H 2#define _ASM_M68K_THREAD_INFO_H
3 3
4#ifndef ASM_OFFSETS_C
5#include <asm/asm-offsets.h>
6#endif
7#include <asm/current.h>
4#include <asm/types.h> 8#include <asm/types.h>
5#include <asm/page.h> 9#include <asm/page.h>
6 10
@@ -31,7 +35,12 @@ struct thread_info {
31#define init_thread_info (init_task.thread.info) 35#define init_thread_info (init_task.thread.info)
32#define init_stack (init_thread_union.stack) 36#define init_stack (init_thread_union.stack)
33 37
34#define task_thread_info(tsk) (&(tsk)->thread.info) 38#ifdef ASM_OFFSETS_C
39#define task_thread_info(tsk) ((struct thread_info *) NULL)
40#else
41#define task_thread_info(tsk) ((struct thread_info *)((char *)tsk+TASK_TINFO))
42#endif
43
35#define task_stack_page(tsk) ((tsk)->stack) 44#define task_stack_page(tsk) ((tsk)->stack)
36#define current_thread_info() task_thread_info(current) 45#define current_thread_info() task_thread_info(current)
37 46
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index b1f012f6c493..73e5e581245b 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -8,6 +8,8 @@
8 * #defines from the assembly-language output. 8 * #defines from the assembly-language output.
9 */ 9 */
10 10
11#define ASM_OFFSETS_C
12
11#include <linux/stddef.h> 13#include <linux/stddef.h>
12#include <linux/sched.h> 14#include <linux/sched.h>
13#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
@@ -27,6 +29,9 @@ int main(void)
27 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); 29 DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
28 DEFINE(TASK_MM, offsetof(struct task_struct, mm)); 30 DEFINE(TASK_MM, offsetof(struct task_struct, mm));
29 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); 31 DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
32#ifdef CONFIG_MMU
33 DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
34#endif
30 35
31 /* offsets into the thread struct */ 36 /* offsets into the thread struct */
32 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 37 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
@@ -44,20 +49,20 @@ int main(void)
44 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); 49 DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
45 50
46 /* offsets into the pt_regs */ 51 /* offsets into the pt_regs */
47 DEFINE(PT_D0, offsetof(struct pt_regs, d0)); 52 DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
48 DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); 53 DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
49 DEFINE(PT_D1, offsetof(struct pt_regs, d1)); 54 DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
50 DEFINE(PT_D2, offsetof(struct pt_regs, d2)); 55 DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
51 DEFINE(PT_D3, offsetof(struct pt_regs, d3)); 56 DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
52 DEFINE(PT_D4, offsetof(struct pt_regs, d4)); 57 DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
53 DEFINE(PT_D5, offsetof(struct pt_regs, d5)); 58 DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
54 DEFINE(PT_A0, offsetof(struct pt_regs, a0)); 59 DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
55 DEFINE(PT_A1, offsetof(struct pt_regs, a1)); 60 DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
56 DEFINE(PT_A2, offsetof(struct pt_regs, a2)); 61 DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
57 DEFINE(PT_PC, offsetof(struct pt_regs, pc)); 62 DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
58 DEFINE(PT_SR, offsetof(struct pt_regs, sr)); 63 DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
59 /* bitfields are a bit difficult */ 64 /* bitfields are a bit difficult */
60 DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); 65 DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
61 66
62 /* offsets into the irq_handler struct */ 67 /* offsets into the irq_handler struct */
63 DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler)); 68 DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
@@ -84,10 +89,10 @@ int main(void)
84 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); 89 DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
85 90
86 /* signal defines */ 91 /* signal defines */
87 DEFINE(SIGSEGV, SIGSEGV); 92 DEFINE(LSIGSEGV, SIGSEGV);
88 DEFINE(SEGV_MAPERR, SEGV_MAPERR); 93 DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
89 DEFINE(SIGTRAP, SIGTRAP); 94 DEFINE(LSIGTRAP, SIGTRAP);
90 DEFINE(TRAP_TRACE, TRAP_TRACE); 95 DEFINE(LTRAP_TRACE, TRAP_TRACE);
91 96
92 /* offsets into the custom struct */ 97 /* offsets into the custom struct */
93 DEFINE(CUSTOMBASE, &amiga_custom); 98 DEFINE(CUSTOMBASE, &amiga_custom);
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index c3735cd6207e..922f52e7ed1a 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -77,17 +77,17 @@ ENTRY(ret_from_fork)
77 jra .Lret_from_exception 77 jra .Lret_from_exception
78 78
79do_trace_entry: 79do_trace_entry:
80 movel #-ENOSYS,%sp@(PT_D0) | needed for strace 80 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
81 subql #4,%sp 81 subql #4,%sp
82 SAVE_SWITCH_STACK 82 SAVE_SWITCH_STACK
83 jbsr syscall_trace 83 jbsr syscall_trace
84 RESTORE_SWITCH_STACK 84 RESTORE_SWITCH_STACK
85 addql #4,%sp 85 addql #4,%sp
86 movel %sp@(PT_ORIG_D0),%d0 86 movel %sp@(PT_OFF_ORIG_D0),%d0
87 cmpl #NR_syscalls,%d0 87 cmpl #NR_syscalls,%d0
88 jcs syscall 88 jcs syscall
89badsys: 89badsys:
90 movel #-ENOSYS,%sp@(PT_D0) 90 movel #-ENOSYS,%sp@(PT_OFF_D0)
91 jra ret_from_syscall 91 jra ret_from_syscall
92 92
93do_trace_exit: 93do_trace_exit:
@@ -103,7 +103,7 @@ ENTRY(ret_from_signal)
103 addql #4,%sp 103 addql #4,%sp
104/* on 68040 complete pending writebacks if any */ 104/* on 68040 complete pending writebacks if any */
105#ifdef CONFIG_M68040 105#ifdef CONFIG_M68040
106 bfextu %sp@(PT_VECTOR){#0,#4},%d0 106 bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
107 subql #7,%d0 | bus error frame ? 107 subql #7,%d0 | bus error frame ?
108 jbne 1f 108 jbne 1f
109 movel %sp,%sp@- 109 movel %sp,%sp@-
@@ -127,7 +127,7 @@ ENTRY(system_call)
127 jcc badsys 127 jcc badsys
128syscall: 128syscall:
129 jbsr @(sys_call_table,%d0:l:4)@(0) 129 jbsr @(sys_call_table,%d0:l:4)@(0)
130 movel %d0,%sp@(PT_D0) | save the return value 130 movel %d0,%sp@(PT_OFF_D0) | save the return value
131ret_from_syscall: 131ret_from_syscall:
132 |oriw #0x0700,%sr 132 |oriw #0x0700,%sr
133 movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0 133 movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
@@ -135,7 +135,7 @@ ret_from_syscall:
1351: RESTORE_ALL 1351: RESTORE_ALL
136 136
137syscall_exit_work: 137syscall_exit_work:
138 btst #5,%sp@(PT_SR) | check if returning to kernel 138 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
139 bnes 1b | if so, skip resched, signals 139 bnes 1b | if so, skip resched, signals
140 lslw #1,%d0 140 lslw #1,%d0
141 jcs do_trace_exit 141 jcs do_trace_exit
@@ -148,7 +148,7 @@ syscall_exit_work:
148 148
149ENTRY(ret_from_exception) 149ENTRY(ret_from_exception)
150.Lret_from_exception: 150.Lret_from_exception:
151 btst #5,%sp@(PT_SR) | check if returning to kernel 151 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
152 bnes 1f | if so, skip resched, signals 152 bnes 1f | if so, skip resched, signals
153 | only allow interrupts when we are really the last one on the 153 | only allow interrupts when we are really the last one on the
154 | kernel stack, otherwise stack overflow can occur during 154 | kernel stack, otherwise stack overflow can occur during
@@ -182,7 +182,7 @@ do_signal_return:
182 jbra resume_userspace 182 jbra resume_userspace
183 183
184do_delayed_trace: 184do_delayed_trace:
185 bclr #7,%sp@(PT_SR) | clear trace bit in SR 185 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
186 pea 1 | send SIGTRAP 186 pea 1 | send SIGTRAP
187 movel %curptr,%sp@- 187 movel %curptr,%sp@-
188 pea LSIGTRAP 188 pea LSIGTRAP
@@ -199,7 +199,7 @@ ENTRY(auto_inthandler)
199 GET_CURRENT(%d0) 199 GET_CURRENT(%d0)
200 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 200 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
201 | put exception # in d0 201 | put exception # in d0
202 bfextu %sp@(PT_VECTOR){#4,#10},%d0 202 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
203 subw #VEC_SPUR,%d0 203 subw #VEC_SPUR,%d0
204 204
205 movel %sp,%sp@- 205 movel %sp,%sp@-
@@ -216,7 +216,7 @@ ret_from_interrupt:
216 ALIGN 216 ALIGN
217ret_from_last_interrupt: 217ret_from_last_interrupt:
218 moveq #(~ALLOWINT>>8)&0xff,%d0 218 moveq #(~ALLOWINT>>8)&0xff,%d0
219 andb %sp@(PT_SR),%d0 219 andb %sp@(PT_OFF_SR),%d0
220 jne 2b 220 jne 2b
221 221
222 /* check if we need to do software interrupts */ 222 /* check if we need to do software interrupts */
@@ -232,7 +232,7 @@ ENTRY(user_inthandler)
232 GET_CURRENT(%d0) 232 GET_CURRENT(%d0)
233 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1) 233 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
234 | put exception # in d0 234 | put exception # in d0
235 bfextu %sp@(PT_VECTOR){#4,#10},%d0 235 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
236user_irqvec_fixup = . + 2 236user_irqvec_fixup = . + 2
237 subw #VEC_USER,%d0 237 subw #VEC_USER,%d0
238 238
diff --git a/arch/m68k/math-emu/fp_entry.S b/arch/m68k/math-emu/fp_entry.S
index 954b4f304a7d..a3fe1f348dfe 100644
--- a/arch/m68k/math-emu/fp_entry.S
+++ b/arch/m68k/math-emu/fp_entry.S
@@ -85,8 +85,8 @@ fp_err_ua2:
85fp_err_ua1: 85fp_err_ua1:
86 addq.l #4,%sp 86 addq.l #4,%sp
87 move.l %a0,-(%sp) 87 move.l %a0,-(%sp)
88 pea SEGV_MAPERR 88 pea LSEGV_MAPERR
89 pea SIGSEGV 89 pea LSIGSEGV
90 jsr fpemu_signal 90 jsr fpemu_signal
91 add.w #12,%sp 91 add.w #12,%sp
92 jra ret_from_exception 92 jra ret_from_exception
@@ -96,8 +96,8 @@ fp_err_ua1:
96 | it does not really belong here, but... 96 | it does not really belong here, but...
97fp_sendtrace060: 97fp_sendtrace060:
98 move.l (FPS_PC,%sp),-(%sp) 98 move.l (FPS_PC,%sp),-(%sp)
99 pea TRAP_TRACE 99 pea LTRAP_TRACE
100 pea SIGTRAP 100 pea LSIGTRAP
101 jsr fpemu_signal 101 jsr fpemu_signal
102 add.w #12,%sp 102 add.w #12,%sp
103 jra ret_from_exception 103 jra ret_from_exception
@@ -122,17 +122,17 @@ fp_get_data_reg:
122 .long fp_get_d6, fp_get_d7 122 .long fp_get_d6, fp_get_d7
123 123
124fp_get_d0: 124fp_get_d0:
125 move.l (PT_D0+8,%sp),%d0 125 move.l (PT_OFF_D0+8,%sp),%d0
126 printf PREGISTER,"{d0->%08x}",1,%d0 126 printf PREGISTER,"{d0->%08x}",1,%d0
127 rts 127 rts
128 128
129fp_get_d1: 129fp_get_d1:
130 move.l (PT_D1+8,%sp),%d0 130 move.l (PT_OFF_D1+8,%sp),%d0
131 printf PREGISTER,"{d1->%08x}",1,%d0 131 printf PREGISTER,"{d1->%08x}",1,%d0
132 rts 132 rts
133 133
134fp_get_d2: 134fp_get_d2:
135 move.l (PT_D2+8,%sp),%d0 135 move.l (PT_OFF_D2+8,%sp),%d0
136 printf PREGISTER,"{d2->%08x}",1,%d0 136 printf PREGISTER,"{d2->%08x}",1,%d0
137 rts 137 rts
138 138
@@ -173,35 +173,35 @@ fp_put_data_reg:
173 173
174fp_put_d0: 174fp_put_d0:
175 printf PREGISTER,"{d0<-%08x}",1,%d0 175 printf PREGISTER,"{d0<-%08x}",1,%d0
176 move.l %d0,(PT_D0+8,%sp) 176 move.l %d0,(PT_OFF_D0+8,%sp)
177 rts 177 rts
178 178
179fp_put_d1: 179fp_put_d1:
180 printf PREGISTER,"{d1<-%08x}",1,%d0 180 printf PREGISTER,"{d1<-%08x}",1,%d0
181 move.l %d0,(PT_D1+8,%sp) 181 move.l %d0,(PT_OFF_D1+8,%sp)
182 rts 182 rts
183 183
184fp_put_d2: 184fp_put_d2:
185 printf PREGISTER,"{d2<-%08x}",1,%d0 185 printf PREGISTER,"{d2<-%08x}",1,%d0
186 move.l %d0,(PT_D2+8,%sp) 186 move.l %d0,(PT_OFF_D2+8,%sp)
187 rts 187 rts
188 188
189fp_put_d3: 189fp_put_d3:
190 printf PREGISTER,"{d3<-%08x}",1,%d0 190 printf PREGISTER,"{d3<-%08x}",1,%d0
191| move.l %d0,%d3 191| move.l %d0,%d3
192 move.l %d0,(PT_D3+8,%sp) 192 move.l %d0,(PT_OFF_D3+8,%sp)
193 rts 193 rts
194 194
195fp_put_d4: 195fp_put_d4:
196 printf PREGISTER,"{d4<-%08x}",1,%d0 196 printf PREGISTER,"{d4<-%08x}",1,%d0
197| move.l %d0,%d4 197| move.l %d0,%d4
198 move.l %d0,(PT_D4+8,%sp) 198 move.l %d0,(PT_OFF_D4+8,%sp)
199 rts 199 rts
200 200
201fp_put_d5: 201fp_put_d5:
202 printf PREGISTER,"{d5<-%08x}",1,%d0 202 printf PREGISTER,"{d5<-%08x}",1,%d0
203| move.l %d0,%d5 203| move.l %d0,%d5
204 move.l %d0,(PT_D5+8,%sp) 204 move.l %d0,(PT_OFF_D5+8,%sp)
205 rts 205 rts
206 206
207fp_put_d6: 207fp_put_d6:
@@ -225,17 +225,17 @@ fp_get_addr_reg:
225 .long fp_get_a6, fp_get_a7 225 .long fp_get_a6, fp_get_a7
226 226
227fp_get_a0: 227fp_get_a0:
228 move.l (PT_A0+8,%sp),%a0 228 move.l (PT_OFF_A0+8,%sp),%a0
229 printf PREGISTER,"{a0->%08x}",1,%a0 229 printf PREGISTER,"{a0->%08x}",1,%a0
230 rts 230 rts
231 231
232fp_get_a1: 232fp_get_a1:
233 move.l (PT_A1+8,%sp),%a0 233 move.l (PT_OFF_A1+8,%sp),%a0
234 printf PREGISTER,"{a1->%08x}",1,%a0 234 printf PREGISTER,"{a1->%08x}",1,%a0
235 rts 235 rts
236 236
237fp_get_a2: 237fp_get_a2:
238 move.l (PT_A2+8,%sp),%a0 238 move.l (PT_OFF_A2+8,%sp),%a0
239 printf PREGISTER,"{a2->%08x}",1,%a0 239 printf PREGISTER,"{a2->%08x}",1,%a0
240 rts 240 rts
241 241
@@ -276,17 +276,17 @@ fp_put_addr_reg:
276 276
277fp_put_a0: 277fp_put_a0:
278 printf PREGISTER,"{a0<-%08x}",1,%a0 278 printf PREGISTER,"{a0<-%08x}",1,%a0
279 move.l %a0,(PT_A0+8,%sp) 279 move.l %a0,(PT_OFF_A0+8,%sp)
280 rts 280 rts
281 281
282fp_put_a1: 282fp_put_a1:
283 printf PREGISTER,"{a1<-%08x}",1,%a0 283 printf PREGISTER,"{a1<-%08x}",1,%a0
284 move.l %a0,(PT_A1+8,%sp) 284 move.l %a0,(PT_OFF_A1+8,%sp)
285 rts 285 rts
286 286
287fp_put_a2: 287fp_put_a2:
288 printf PREGISTER,"{a2<-%08x}",1,%a0 288 printf PREGISTER,"{a2<-%08x}",1,%a0
289 move.l %a0,(PT_A2+8,%sp) 289 move.l %a0,(PT_OFF_A2+8,%sp)
290 rts 290 rts
291 291
292fp_put_a3: 292fp_put_a3:
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index f9df720d2e40..01cc1630b66c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -115,6 +115,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
115#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 115#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
116#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */ 116#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
117#define TIF_SECCOMP 4 /* secure computing */ 117#define TIF_SECCOMP 4 /* secure computing */
118#define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
118#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ 119#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
119#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 120#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
120#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 121#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
@@ -139,6 +140,7 @@ register struct thread_info *__current_thread_info __asm__("$28");
139#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 140#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
140#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 141#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
141#define _TIF_SECCOMP (1<<TIF_SECCOMP) 142#define _TIF_SECCOMP (1<<TIF_SECCOMP)
143#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
142#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 144#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
143#define _TIF_USEDFPU (1<<TIF_USEDFPU) 145#define _TIF_USEDFPU (1<<TIF_USEDFPU)
144#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 146#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 830c5ef9932b..6254041b942f 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/syscalls.h> 22#include <linux/syscalls.h>
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/tracehook.h>
24 25
25#include <asm/abi.h> 26#include <asm/abi.h>
26#include <asm/asm.h> 27#include <asm/asm.h>
@@ -700,4 +701,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
700 /* deal with pending signal delivery */ 701 /* deal with pending signal delivery */
701 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 702 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
702 do_signal(regs); 703 do_signal(regs);
704
705 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
706 clear_thread_flag(TIF_NOTIFY_RESUME);
707 tracehook_notify_resume(regs);
708 if (current->replacement_session_keyring)
709 key_replace_session_keyring();
710 }
703} 711}
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index feb2f2e810db..a21f43bc68e2 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -568,5 +568,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
568 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 568 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
569 clear_thread_flag(TIF_NOTIFY_RESUME); 569 clear_thread_flag(TIF_NOTIFY_RESUME);
570 tracehook_notify_resume(__frame); 570 tracehook_notify_resume(__frame);
571 if (current->replacement_session_keyring)
572 key_replace_session_keyring();
571 } 573 }
572} 574}
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 4ce0edfbe969..ac775a76bff7 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -59,6 +59,7 @@ struct thread_info {
59#define TIF_MEMDIE 5 59#define TIF_MEMDIE 5
60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ 60#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
61#define TIF_FREEZE 7 /* is freezing for suspend */ 61#define TIF_FREEZE 7 /* is freezing for suspend */
62#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
62 63
63#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 64#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
64#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 65#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -67,8 +68,9 @@ struct thread_info {
67#define _TIF_32BIT (1 << TIF_32BIT) 68#define _TIF_32BIT (1 << TIF_32BIT)
68#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 69#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
69#define _TIF_FREEZE (1 << TIF_FREEZE) 70#define _TIF_FREEZE (1 << TIF_FREEZE)
71#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
70 72
71#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | \ 73#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
72 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 74 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK)
73 75
74#endif /* __KERNEL__ */ 76#endif /* __KERNEL__ */
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index e552e547cb93..8c4712b74dc1 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -948,7 +948,7 @@ intr_check_sig:
948 /* As above */ 948 /* As above */
949 mfctl %cr30,%r1 949 mfctl %cr30,%r1
950 LDREG TI_FLAGS(%r1),%r19 950 LDREG TI_FLAGS(%r1),%r19
951 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r20 951 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
952 and,COND(<>) %r19, %r20, %r0 952 and,COND(<>) %r19, %r20, %r0
953 b,n intr_restore /* skip past if we've nothing to do */ 953 b,n intr_restore /* skip past if we've nothing to do */
954 954
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index f82544225e8e..8eb3c63c407a 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -25,6 +25,7 @@
25#include <linux/stddef.h> 25#include <linux/stddef.h>
26#include <linux/compat.h> 26#include <linux/compat.h>
27#include <linux/elf.h> 27#include <linux/elf.h>
28#include <linux/tracehook.h>
28#include <asm/ucontext.h> 29#include <asm/ucontext.h>
29#include <asm/rt_sigframe.h> 30#include <asm/rt_sigframe.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -645,4 +646,11 @@ void do_notify_resume(struct pt_regs *regs, long in_syscall)
645 if (test_thread_flag(TIF_SIGPENDING) || 646 if (test_thread_flag(TIF_SIGPENDING) ||
646 test_thread_flag(TIF_RESTORE_SIGMASK)) 647 test_thread_flag(TIF_RESTORE_SIGMASK))
647 do_signal(regs, in_syscall); 648 do_signal(regs, in_syscall);
649
650 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
651 clear_thread_flag(TIF_NOTIFY_RESUME);
652 tracehook_notify_resume(regs);
653 if (current->replacement_session_keyring)
654 key_replace_session_keyring();
655 }
648} 656}
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index b44aaabdd1a6..0c34371ec49c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -424,6 +424,29 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
424#endif 424#endif
425} 425}
426 426
427static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
428{
429 struct dma_mapping_ops *ops = get_dma_ops(dev);
430
431 if (ops->addr_needs_map && ops->addr_needs_map(dev, addr, size))
432 return 0;
433
434 if (!dev->dma_mask)
435 return 0;
436
437 return addr + size <= *dev->dma_mask;
438}
439
440static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
441{
442 return paddr + get_dma_direct_offset(dev);
443}
444
445static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
446{
447 return daddr - get_dma_direct_offset(dev);
448}
449
427#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 450#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
428#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 451#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
429#ifdef CONFIG_NOT_COHERENT_CACHE 452#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index eb17da781128..2a5da069714e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -104,8 +104,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
104 else 104 else
105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 105 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
106 106
107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 107#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
108 /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we 108 /* Second case is 32-bit with 64-bit PTE. In this case, we
109 * can just store as long as we do the two halves in the right order 109 * can just store as long as we do the two halves in the right order
110 * with a barrier in between. This is possible because we take care, 110 * with a barrier in between. This is possible because we take care,
111 * in the hash code, to pre-invalidate if the PTE was already hashed, 111 * in the hash code, to pre-invalidate if the PTE was already hashed,
@@ -140,7 +140,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
140 140
141#else 141#else
142 /* Anything else just stores the PTE normally. That covers all 64-bit 142 /* Anything else just stores the PTE normally. That covers all 64-bit
143 * cases, and 32-bit non-hash with 64-bit PTEs in UP mode 143 * cases, and 32-bit non-hash with 32-bit PTEs.
144 */ 144 */
145 *ptep = pte; 145 *ptep = pte;
146#endif 146#endif
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index c3b193121f81..198266cf9e2d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -54,7 +54,7 @@
54 * This returns the old value in the lock, so we succeeded 54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0. 55 * in getting the lock if the return value is 0.
56 */ 56 */
57static inline unsigned long __spin_trylock(raw_spinlock_t *lock) 57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
58{ 58{
59 unsigned long tmp, token; 59 unsigned long tmp, token;
60 60
@@ -76,7 +76,7 @@ static inline unsigned long __spin_trylock(raw_spinlock_t *lock)
76static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76static inline int __raw_spin_trylock(raw_spinlock_t *lock)
77{ 77{
78 CLEAR_IO_SYNC; 78 CLEAR_IO_SYNC;
79 return __spin_trylock(lock) == 0; 79 return arch_spin_trylock(lock) == 0;
80} 80}
81 81
82/* 82/*
@@ -108,7 +108,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
108{ 108{
109 CLEAR_IO_SYNC; 109 CLEAR_IO_SYNC;
110 while (1) { 110 while (1) {
111 if (likely(__spin_trylock(lock) == 0)) 111 if (likely(arch_spin_trylock(lock) == 0))
112 break; 112 break;
113 do { 113 do {
114 HMT_low(); 114 HMT_low();
@@ -126,7 +126,7 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
126 126
127 CLEAR_IO_SYNC; 127 CLEAR_IO_SYNC;
128 while (1) { 128 while (1) {
129 if (likely(__spin_trylock(lock) == 0)) 129 if (likely(arch_spin_trylock(lock) == 0))
130 break; 130 break;
131 local_save_flags(flags_dis); 131 local_save_flags(flags_dis);
132 local_irq_restore(flags); 132 local_irq_restore(flags);
@@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
181 * This returns the old value in the lock + 1, 181 * This returns the old value in the lock + 1,
182 * so we got a read lock if the return value is > 0. 182 * so we got a read lock if the return value is > 0.
183 */ 183 */
184static inline long __read_trylock(raw_rwlock_t *rw) 184static inline long arch_read_trylock(raw_rwlock_t *rw)
185{ 185{
186 long tmp; 186 long tmp;
187 187
@@ -205,7 +205,7 @@ static inline long __read_trylock(raw_rwlock_t *rw)
205 * This returns the old value in the lock, 205 * This returns the old value in the lock,
206 * so we got the write lock if the return value is 0. 206 * so we got the write lock if the return value is 0.
207 */ 207 */
208static inline long __write_trylock(raw_rwlock_t *rw) 208static inline long arch_write_trylock(raw_rwlock_t *rw)
209{ 209{
210 long tmp, token; 210 long tmp, token;
211 211
@@ -228,7 +228,7 @@ static inline long __write_trylock(raw_rwlock_t *rw)
228static inline void __raw_read_lock(raw_rwlock_t *rw) 228static inline void __raw_read_lock(raw_rwlock_t *rw)
229{ 229{
230 while (1) { 230 while (1) {
231 if (likely(__read_trylock(rw) > 0)) 231 if (likely(arch_read_trylock(rw) > 0))
232 break; 232 break;
233 do { 233 do {
234 HMT_low(); 234 HMT_low();
@@ -242,7 +242,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
242static inline void __raw_write_lock(raw_rwlock_t *rw) 242static inline void __raw_write_lock(raw_rwlock_t *rw)
243{ 243{
244 while (1) { 244 while (1) {
245 if (likely(__write_trylock(rw) == 0)) 245 if (likely(arch_write_trylock(rw) == 0))
246 break; 246 break;
247 do { 247 do {
248 HMT_low(); 248 HMT_low();
@@ -255,12 +255,12 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
255 255
256static inline int __raw_read_trylock(raw_rwlock_t *rw) 256static inline int __raw_read_trylock(raw_rwlock_t *rw)
257{ 257{
258 return __read_trylock(rw) > 0; 258 return arch_read_trylock(rw) > 0;
259} 259}
260 260
261static inline int __raw_write_trylock(raw_rwlock_t *rw) 261static inline int __raw_write_trylock(raw_rwlock_t *rw)
262{ 262{
263 return __write_trylock(rw) == 0; 263 return arch_write_trylock(rw) == 0;
264} 264}
265 265
266static inline void __raw_read_unlock(raw_rwlock_t *rw) 266static inline void __raw_read_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b73396b93905..9619285f64e8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -97,7 +97,7 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
97 97
98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 98obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 99obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
100obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o 100obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o
101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 101obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
102 power5+-pmu.o power6-pmu.o power7-pmu.o 102 power5+-pmu.o power6-pmu.o power7-pmu.o
103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 103obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 561b64652311..197b15646eeb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -67,6 +67,8 @@ int main(void)
67 DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); 67 DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
68#ifdef CONFIG_PPC64 68#ifdef CONFIG_PPC64
69 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); 69 DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
70 DEFINE(SIGSEGV, SIGSEGV);
71 DEFINE(NMI_MASK, NMI_MASK);
70#else 72#else
71 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); 73 DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
72#endif /* CONFIG_PPC64 */ 74#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 68ccf11e4f19..e8a57de85bcf 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,50 +24,12 @@
24int swiotlb __read_mostly; 24int swiotlb __read_mostly;
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
27void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
28{
29 unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
30 void *pageaddr = page_address(pfn_to_page(pfn));
31
32 if (pageaddr != NULL)
33 return pageaddr + (addr % PAGE_SIZE);
34 return NULL;
35}
36
37dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
38{
39 return paddr + get_dma_direct_offset(hwdev);
40}
41
42phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
43
44{
45 return baddr - get_dma_direct_offset(hwdev);
46}
47
48/*
49 * Determine if an address needs bounce buffering via swiotlb.
50 * Going forward I expect the swiotlb code to generalize on using
51 * a dma_ops->addr_needs_map, and this function will move from here to the
52 * generic swiotlb code.
53 */
54int
55swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
56 size_t size)
57{
58 struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
59
60 BUG_ON(!dma_ops);
61 return dma_ops->addr_needs_map(hwdev, addr, size);
62}
63
64/* 27/*
65 * Determine if an address is reachable by a pci device, or if we must bounce. 28 * Determine if an address is reachable by a pci device, or if we must bounce.
66 */ 29 */
67static int 30static int
68swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) 31swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
69{ 32{
70 u64 mask = dma_get_mask(hwdev);
71 dma_addr_t max; 33 dma_addr_t max;
72 struct pci_controller *hose; 34 struct pci_controller *hose;
73 struct pci_dev *pdev = to_pci_dev(hwdev); 35 struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -79,16 +41,9 @@ swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
79 if ((addr + size > max) | (addr < hose->dma_window_base_cur)) 41 if ((addr + size > max) | (addr < hose->dma_window_base_cur))
80 return 1; 42 return 1;
81 43
82 return !is_buffer_dma_capable(mask, addr, size); 44 return 0;
83}
84
85static int
86swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
87{
88 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
89} 45}
90 46
91
92/* 47/*
93 * At the moment, all platforms that use this code only require 48 * At the moment, all platforms that use this code only require
94 * swiotlb to be used if we're operating on HIGHMEM. Since 49 * swiotlb to be used if we're operating on HIGHMEM. Since
@@ -104,7 +59,6 @@ struct dma_mapping_ops swiotlb_dma_ops = {
104 .dma_supported = swiotlb_dma_supported, 59 .dma_supported = swiotlb_dma_supported,
105 .map_page = swiotlb_map_page, 60 .map_page = swiotlb_map_page,
106 .unmap_page = swiotlb_unmap_page, 61 .unmap_page = swiotlb_unmap_page,
107 .addr_needs_map = swiotlb_addr_needs_map,
108 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, 62 .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
109 .sync_single_range_for_device = swiotlb_sync_single_range_for_device, 63 .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
110 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 64 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eb898112e577..8ac85e08ffae 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -729,6 +729,11 @@ BEGIN_FTR_SECTION
729 bne- do_ste_alloc /* If so handle it */ 729 bne- do_ste_alloc /* If so handle it */
730END_FTR_SECTION_IFCLR(CPU_FTR_SLB) 730END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
731 731
732 clrrdi r11,r1,THREAD_SHIFT
733 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
734 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
735 bne 77f /* then don't call hash_page now */
736
732 /* 737 /*
733 * On iSeries, we soft-disable interrupts here, then 738 * On iSeries, we soft-disable interrupts here, then
734 * hard-enable interrupts so that the hash_page code can spin on 739 * hard-enable interrupts so that the hash_page code can spin on
@@ -833,6 +838,20 @@ handle_page_fault:
833 bl .low_hash_fault 838 bl .low_hash_fault
834 b .ret_from_except 839 b .ret_from_except
835 840
841/*
842 * We come here as a result of a DSI at a point where we don't want
843 * to call hash_page, such as when we are accessing memory (possibly
844 * user memory) inside a PMU interrupt that occurred while interrupts
845 * were soft-disabled. We want to invoke the exception handler for
846 * the access, or panic if there isn't a handler.
847 */
84877: bl .save_nvgprs
849 mr r4,r3
850 addi r3,r1,STACK_FRAME_OVERHEAD
851 li r5,SIGSEGV
852 bl .bad_page_fault
853 b .ret_from_except
854
836 /* here we have a segment miss */ 855 /* here we have a segment miss */
837do_ste_alloc: 856do_ste_alloc:
838 bl .ste_allocate /* try to insert stab entry */ 857 bl .ste_allocate /* try to insert stab entry */
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
new file mode 100644
index 000000000000..f74b62c67511
--- /dev/null
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -0,0 +1,527 @@
1/*
2 * Performance counter callchain support - powerpc architecture code
3 *
4 * Copyright © 2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_counter.h>
14#include <linux/percpu.h>
15#include <linux/uaccess.h>
16#include <linux/mm.h>
17#include <asm/ptrace.h>
18#include <asm/pgtable.h>
19#include <asm/sigcontext.h>
20#include <asm/ucontext.h>
21#include <asm/vdso.h>
22#ifdef CONFIG_PPC64
23#include "ppc32.h"
24#endif
25
26/*
27 * Store another value in a callchain_entry.
28 */
29static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
30{
31 unsigned int nr = entry->nr;
32
33 if (nr < PERF_MAX_STACK_DEPTH) {
34 entry->ip[nr] = ip;
35 entry->nr = nr + 1;
36 }
37}
38
39/*
40 * Is sp valid as the address of the next kernel stack frame after prev_sp?
41 * The next frame may be in a different stack area but should not go
42 * back down in the same stack area.
43 */
44static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
45{
46 if (sp & 0xf)
47 return 0; /* must be 16-byte aligned */
48 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
49 return 0;
50 if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
51 return 1;
52 /*
53 * sp could decrease when we jump off an interrupt stack
54 * back to the regular process stack.
55 */
56 if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
57 return 1;
58 return 0;
59}
60
61static void perf_callchain_kernel(struct pt_regs *regs,
62 struct perf_callchain_entry *entry)
63{
64 unsigned long sp, next_sp;
65 unsigned long next_ip;
66 unsigned long lr;
67 long level = 0;
68 unsigned long *fp;
69
70 lr = regs->link;
71 sp = regs->gpr[1];
72 callchain_store(entry, PERF_CONTEXT_KERNEL);
73 callchain_store(entry, regs->nip);
74
75 if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
76 return;
77
78 for (;;) {
79 fp = (unsigned long *) sp;
80 next_sp = fp[0];
81
82 if (next_sp == sp + STACK_INT_FRAME_SIZE &&
83 fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
84 /*
85 * This looks like an interrupt frame for an
86 * interrupt that occurred in the kernel
87 */
88 regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
89 next_ip = regs->nip;
90 lr = regs->link;
91 level = 0;
92 callchain_store(entry, PERF_CONTEXT_KERNEL);
93
94 } else {
95 if (level == 0)
96 next_ip = lr;
97 else
98 next_ip = fp[STACK_FRAME_LR_SAVE];
99
100 /*
101 * We can't tell which of the first two addresses
102 * we get are valid, but we can filter out the
103 * obviously bogus ones here. We replace them
104 * with 0 rather than removing them entirely so
105 * that userspace can tell which is which.
106 */
107 if ((level == 1 && next_ip == lr) ||
108 (level <= 1 && !kernel_text_address(next_ip)))
109 next_ip = 0;
110
111 ++level;
112 }
113
114 callchain_store(entry, next_ip);
115 if (!valid_next_sp(next_sp, sp))
116 return;
117 sp = next_sp;
118 }
119}
120
121#ifdef CONFIG_PPC64
122
123#ifdef CONFIG_HUGETLB_PAGE
124#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
125#else
126#define is_huge_psize(pagesize) 0
127#endif
128
129/*
130 * On 64-bit we don't want to invoke hash_page on user addresses from
131 * interrupt context, so if the access faults, we read the page tables
132 * to find which page (if any) is mapped and access it directly.
133 */
134static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
135{
136 pgd_t *pgdir;
137 pte_t *ptep, pte;
138 int pagesize;
139 unsigned long addr = (unsigned long) ptr;
140 unsigned long offset;
141 unsigned long pfn;
142 void *kaddr;
143
144 pgdir = current->mm->pgd;
145 if (!pgdir)
146 return -EFAULT;
147
148 pagesize = get_slice_psize(current->mm, addr);
149
150 /* align address to page boundary */
151 offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
152 addr -= offset;
153
154 if (is_huge_psize(pagesize))
155 ptep = huge_pte_offset(current->mm, addr);
156 else
157 ptep = find_linux_pte(pgdir, addr);
158
159 if (ptep == NULL)
160 return -EFAULT;
161 pte = *ptep;
162 if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
163 return -EFAULT;
164 pfn = pte_pfn(pte);
165 if (!page_is_ram(pfn))
166 return -EFAULT;
167
168 /* no highmem to worry about here */
169 kaddr = pfn_to_kaddr(pfn);
170 memcpy(ret, kaddr + offset, nb);
171 return 0;
172}
173
174static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
175{
176 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
177 ((unsigned long)ptr & 7))
178 return -EFAULT;
179
180 if (!__get_user_inatomic(*ret, ptr))
181 return 0;
182
183 return read_user_stack_slow(ptr, ret, 8);
184}
185
186static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
187{
188 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
189 ((unsigned long)ptr & 3))
190 return -EFAULT;
191
192 if (!__get_user_inatomic(*ret, ptr))
193 return 0;
194
195 return read_user_stack_slow(ptr, ret, 4);
196}
197
198static inline int valid_user_sp(unsigned long sp, int is_64)
199{
200 if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
201 return 0;
202 return 1;
203}
204
205/*
206 * 64-bit user processes use the same stack frame for RT and non-RT signals.
207 */
208struct signal_frame_64 {
209 char dummy[__SIGNAL_FRAMESIZE];
210 struct ucontext uc;
211 unsigned long unused[2];
212 unsigned int tramp[6];
213 struct siginfo *pinfo;
214 void *puc;
215 struct siginfo info;
216 char abigap[288];
217};
218
219static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
220{
221 if (nip == fp + offsetof(struct signal_frame_64, tramp))
222 return 1;
223 if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
224 nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
225 return 1;
226 return 0;
227}
228
229/*
230 * Do some sanity checking on the signal frame pointed to by sp.
231 * We check the pinfo and puc pointers in the frame.
232 */
233static int sane_signal_64_frame(unsigned long sp)
234{
235 struct signal_frame_64 __user *sf;
236 unsigned long pinfo, puc;
237
238 sf = (struct signal_frame_64 __user *) sp;
239 if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
240 read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
241 return 0;
242 return pinfo == (unsigned long) &sf->info &&
243 puc == (unsigned long) &sf->uc;
244}
245
246static void perf_callchain_user_64(struct pt_regs *regs,
247 struct perf_callchain_entry *entry)
248{
249 unsigned long sp, next_sp;
250 unsigned long next_ip;
251 unsigned long lr;
252 long level = 0;
253 struct signal_frame_64 __user *sigframe;
254 unsigned long __user *fp, *uregs;
255
256 next_ip = regs->nip;
257 lr = regs->link;
258 sp = regs->gpr[1];
259 callchain_store(entry, PERF_CONTEXT_USER);
260 callchain_store(entry, next_ip);
261
262 for (;;) {
263 fp = (unsigned long __user *) sp;
264 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
265 return;
266 if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
267 return;
268
269 /*
270 * Note: the next_sp - sp >= signal frame size check
271 * is true when next_sp < sp, which can happen when
272 * transitioning from an alternate signal stack to the
273 * normal stack.
274 */
275 if (next_sp - sp >= sizeof(struct signal_frame_64) &&
276 (is_sigreturn_64_address(next_ip, sp) ||
277 (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
278 sane_signal_64_frame(sp)) {
279 /*
280 * This looks like an signal frame
281 */
282 sigframe = (struct signal_frame_64 __user *) sp;
283 uregs = sigframe->uc.uc_mcontext.gp_regs;
284 if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
285 read_user_stack_64(&uregs[PT_LNK], &lr) ||
286 read_user_stack_64(&uregs[PT_R1], &sp))
287 return;
288 level = 0;
289 callchain_store(entry, PERF_CONTEXT_USER);
290 callchain_store(entry, next_ip);
291 continue;
292 }
293
294 if (level == 0)
295 next_ip = lr;
296 callchain_store(entry, next_ip);
297 ++level;
298 sp = next_sp;
299 }
300}
301
302static inline int current_is_64bit(void)
303{
304 /*
305 * We can't use test_thread_flag() here because we may be on an
306 * interrupt stack, and the thread flags don't get copied over
307 * from the thread_info on the main stack to the interrupt stack.
308 */
309 return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
310}
311
312#else /* CONFIG_PPC64 */
313/*
314 * On 32-bit we just access the address and let hash_page create a
315 * HPTE if necessary, so there is no need to fall back to reading
316 * the page tables. Since this is called at interrupt level,
317 * do_page_fault() won't treat a DSI as a page fault.
318 */
319static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
320{
321 if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
322 ((unsigned long)ptr & 3))
323 return -EFAULT;
324
325 return __get_user_inatomic(*ret, ptr);
326}
327
328static inline void perf_callchain_user_64(struct pt_regs *regs,
329 struct perf_callchain_entry *entry)
330{
331}
332
333static inline int current_is_64bit(void)
334{
335 return 0;
336}
337
338static inline int valid_user_sp(unsigned long sp, int is_64)
339{
340 if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
341 return 0;
342 return 1;
343}
344
345#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
346#define sigcontext32 sigcontext
347#define mcontext32 mcontext
348#define ucontext32 ucontext
349#define compat_siginfo_t struct siginfo
350
351#endif /* CONFIG_PPC64 */
352
353/*
354 * Layout for non-RT signal frames
355 */
356struct signal_frame_32 {
357 char dummy[__SIGNAL_FRAMESIZE32];
358 struct sigcontext32 sctx;
359 struct mcontext32 mctx;
360 int abigap[56];
361};
362
363/*
364 * Layout for RT signal frames
365 */
366struct rt_signal_frame_32 {
367 char dummy[__SIGNAL_FRAMESIZE32 + 16];
368 compat_siginfo_t info;
369 struct ucontext32 uc;
370 int abigap[56];
371};
372
373static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
374{
375 if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
376 return 1;
377 if (vdso32_sigtramp && current->mm->context.vdso_base &&
378 nip == current->mm->context.vdso_base + vdso32_sigtramp)
379 return 1;
380 return 0;
381}
382
383static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
384{
385 if (nip == fp + offsetof(struct rt_signal_frame_32,
386 uc.uc_mcontext.mc_pad))
387 return 1;
388 if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
389 nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
390 return 1;
391 return 0;
392}
393
394static int sane_signal_32_frame(unsigned int sp)
395{
396 struct signal_frame_32 __user *sf;
397 unsigned int regs;
398
399 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
400 if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
401 return 0;
402 return regs == (unsigned long) &sf->mctx;
403}
404
405static int sane_rt_signal_32_frame(unsigned int sp)
406{
407 struct rt_signal_frame_32 __user *sf;
408 unsigned int regs;
409
410 sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
411 if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
412 return 0;
413 return regs == (unsigned long) &sf->uc.uc_mcontext;
414}
415
416static unsigned int __user *signal_frame_32_regs(unsigned int sp,
417 unsigned int next_sp, unsigned int next_ip)
418{
419 struct mcontext32 __user *mctx = NULL;
420 struct signal_frame_32 __user *sf;
421 struct rt_signal_frame_32 __user *rt_sf;
422
423 /*
424 * Note: the next_sp - sp >= signal frame size check
425 * is true when next_sp < sp, for example, when
426 * transitioning from an alternate signal stack to the
427 * normal stack.
428 */
429 if (next_sp - sp >= sizeof(struct signal_frame_32) &&
430 is_sigreturn_32_address(next_ip, sp) &&
431 sane_signal_32_frame(sp)) {
432 sf = (struct signal_frame_32 __user *) (unsigned long) sp;
433 mctx = &sf->mctx;
434 }
435
436 if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
437 is_rt_sigreturn_32_address(next_ip, sp) &&
438 sane_rt_signal_32_frame(sp)) {
439 rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
440 mctx = &rt_sf->uc.uc_mcontext;
441 }
442
443 if (!mctx)
444 return NULL;
445 return mctx->mc_gregs;
446}
447
448static void perf_callchain_user_32(struct pt_regs *regs,
449 struct perf_callchain_entry *entry)
450{
451 unsigned int sp, next_sp;
452 unsigned int next_ip;
453 unsigned int lr;
454 long level = 0;
455 unsigned int __user *fp, *uregs;
456
457 next_ip = regs->nip;
458 lr = regs->link;
459 sp = regs->gpr[1];
460 callchain_store(entry, PERF_CONTEXT_USER);
461 callchain_store(entry, next_ip);
462
463 while (entry->nr < PERF_MAX_STACK_DEPTH) {
464 fp = (unsigned int __user *) (unsigned long) sp;
465 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
466 return;
467 if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
468 return;
469
470 uregs = signal_frame_32_regs(sp, next_sp, next_ip);
471 if (!uregs && level <= 1)
472 uregs = signal_frame_32_regs(sp, next_sp, lr);
473 if (uregs) {
474 /*
475 * This looks like an signal frame, so restart
476 * the stack trace with the values in it.
477 */
478 if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
479 read_user_stack_32(&uregs[PT_LNK], &lr) ||
480 read_user_stack_32(&uregs[PT_R1], &sp))
481 return;
482 level = 0;
483 callchain_store(entry, PERF_CONTEXT_USER);
484 callchain_store(entry, next_ip);
485 continue;
486 }
487
488 if (level == 0)
489 next_ip = lr;
490 callchain_store(entry, next_ip);
491 ++level;
492 sp = next_sp;
493 }
494}
495
496/*
497 * Since we can't get PMU interrupts inside a PMU interrupt handler,
498 * we don't need separate irq and nmi entries here.
499 */
500static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
501
502struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
503{
504 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
505
506 entry->nr = 0;
507
508 if (current->pid == 0) /* idle task? */
509 return entry;
510
511 if (!user_mode(regs)) {
512 perf_callchain_kernel(regs, entry);
513 if (current->mm)
514 regs = task_pt_regs(current);
515 else
516 regs = NULL;
517 }
518
519 if (regs) {
520 if (current_is_64bit())
521 perf_callchain_user_64(regs, entry);
522 else
523 perf_callchain_user_32(regs, entry);
524 }
525
526 return entry;
527}
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index 388cf57ad827..018d094d92f9 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -317,7 +317,7 @@ static int power7_generic_events[] = {
317 */ 317 */
318static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { 318static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
319 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ 319 [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
320 [C(OP_READ)] = { 0x400f0, 0xc880 }, 320 [C(OP_READ)] = { 0xc880, 0x400f0 },
321 [C(OP_WRITE)] = { 0, 0x300f0 }, 321 [C(OP_WRITE)] = { 0, 0x300f0 },
322 [C(OP_PREFETCH)] = { 0xd8b8, 0 }, 322 [C(OP_PREFETCH)] = { 0xd8b8, 0 },
323 }, 323 },
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
327 [C(OP_PREFETCH)] = { 0x408a, 0 }, 327 [C(OP_PREFETCH)] = { 0x408a, 0 },
328 }, 328 },
329 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ 329 [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
330 [C(OP_READ)] = { 0x6080, 0x6084 }, 330 [C(OP_READ)] = { 0x16080, 0x26080 },
331 [C(OP_WRITE)] = { 0x6082, 0x6086 }, 331 [C(OP_WRITE)] = { 0x16082, 0x26082 },
332 [C(OP_PREFETCH)] = { 0, 0 }, 332 [C(OP_PREFETCH)] = { 0, 0 },
333 }, 333 },
334 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ 334 [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5b7038f248b6..a685652effeb 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
92 : "memory" ); 92 : "memory" );
93} 93}
94 94
95void slb_flush_and_rebolt(void) 95static void __slb_flush_and_rebolt(void)
96{ 96{
97 /* If you change this make sure you change SLB_NUM_BOLTED 97 /* If you change this make sure you change SLB_NUM_BOLTED
98 * appropriately too. */ 98 * appropriately too. */
99 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 99 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
100 unsigned long ksp_esid_data, ksp_vsid_data; 100 unsigned long ksp_esid_data, ksp_vsid_data;
101 101
102 WARN_ON(!irqs_disabled());
103
104 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 102 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
105 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 103 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
106 lflags = SLB_VSID_KERNEL | linear_llp; 104 lflags = SLB_VSID_KERNEL | linear_llp;
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
117 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; 115 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
118 } 116 }
119 117
120 /*
121 * We can't take a PMU exception in the following code, so hard
122 * disable interrupts.
123 */
124 hard_irq_disable();
125
126 /* We need to do this all in asm, so we're sure we don't touch 118 /* We need to do this all in asm, so we're sure we don't touch
127 * the stack between the slbia and rebolting it. */ 119 * the stack between the slbia and rebolting it. */
128 asm volatile("isync\n" 120 asm volatile("isync\n"
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
139 : "memory"); 131 : "memory");
140} 132}
141 133
134void slb_flush_and_rebolt(void)
135{
136
137 WARN_ON(!irqs_disabled());
138
139 /*
140 * We can't take a PMU exception in the following code, so hard
141 * disable interrupts.
142 */
143 hard_irq_disable();
144
145 __slb_flush_and_rebolt();
146 get_paca()->slb_cache_ptr = 0;
147}
148
142void slb_vmalloc_update(void) 149void slb_vmalloc_update(void)
143{ 150{
144 unsigned long vflags; 151 unsigned long vflags;
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
180/* Flush all user entries from the segment table of the current processor. */ 187/* Flush all user entries from the segment table of the current processor. */
181void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 188void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
182{ 189{
183 unsigned long offset = get_paca()->slb_cache_ptr; 190 unsigned long offset;
184 unsigned long slbie_data = 0; 191 unsigned long slbie_data = 0;
185 unsigned long pc = KSTK_EIP(tsk); 192 unsigned long pc = KSTK_EIP(tsk);
186 unsigned long stack = KSTK_ESP(tsk); 193 unsigned long stack = KSTK_ESP(tsk);
187 unsigned long unmapped_base; 194 unsigned long unmapped_base;
188 195
196 /*
197 * We need interrupts hard-disabled here, not just soft-disabled,
198 * so that a PMU interrupt can't occur, which might try to access
199 * user memory (to get a stack trace) and possible cause an SLB miss
200 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
201 */
202 hard_irq_disable();
203 offset = get_paca()->slb_cache_ptr;
189 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && 204 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
190 offset <= SLB_CACHE_ENTRIES) { 205 offset <= SLB_CACHE_ENTRIES) {
191 int i; 206 int i;
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
200 } 215 }
201 asm volatile("isync" : : : "memory"); 216 asm volatile("isync" : : : "memory");
202 } else { 217 } else {
203 slb_flush_and_rebolt(); 218 __slb_flush_and_rebolt();
204 } 219 }
205 220
206 /* Workaround POWER5 < DD2.1 issue */ 221 /* Workaround POWER5 < DD2.1 issue */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 98cd1dc2ae75..ab5fb48b3e90 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -164,7 +164,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
164{ 164{
165 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr; 165 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
166 struct stab_entry *ste; 166 struct stab_entry *ste;
167 unsigned long offset = __get_cpu_var(stab_cache_ptr); 167 unsigned long offset;
168 unsigned long pc = KSTK_EIP(tsk); 168 unsigned long pc = KSTK_EIP(tsk);
169 unsigned long stack = KSTK_ESP(tsk); 169 unsigned long stack = KSTK_ESP(tsk);
170 unsigned long unmapped_base; 170 unsigned long unmapped_base;
@@ -172,6 +172,15 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
172 /* Force previous translations to complete. DRENG */ 172 /* Force previous translations to complete. DRENG */
173 asm volatile("isync" : : : "memory"); 173 asm volatile("isync" : : : "memory");
174 174
175 /*
176 * We need interrupts hard-disabled here, not just soft-disabled,
177 * so that a PMU interrupt can't occur, which might try to access
178 * user memory (to get a stack trace) and possible cause an STAB miss
179 * which would update the stab_cache/stab_cache_ptr per-cpu variables.
180 */
181 hard_irq_disable();
182
183 offset = __get_cpu_var(stab_cache_ptr);
175 if (offset <= NR_STAB_CACHE_ENTRIES) { 184 if (offset <= NR_STAB_CACHE_ENTRIES) {
176 int i; 185 int i;
177 186
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 3ee1fd37bbfc..40edad520770 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
234 generic_handle_irq(cascade_irq); 234 generic_handle_irq(cascade_irq);
235 235
236 /* Let xilinx_intc end the interrupt */ 236 /* Let xilinx_intc end the interrupt */
237 desc->chip->ack(irq);
238 desc->chip->unmask(irq); 237 desc->chip->unmask(irq);
239} 238}
240 239
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2ae5d72f47ed..1c866efd217d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -84,7 +84,7 @@ config S390
84 select HAVE_FUNCTION_TRACER 84 select HAVE_FUNCTION_TRACER
85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 85 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
86 select HAVE_FTRACE_MCOUNT_RECORD 86 select HAVE_FTRACE_MCOUNT_RECORD
87 select HAVE_FTRACE_SYSCALLS 87 select HAVE_SYSCALL_TRACEPOINTS
88 select HAVE_DYNAMIC_FTRACE 88 select HAVE_DYNAMIC_FTRACE
89 select HAVE_FUNCTION_GRAPH_TRACER 89 select HAVE_FUNCTION_GRAPH_TRACER
90 select HAVE_DEFAULT_NO_SPIN_MUTEXES 90 select HAVE_DEFAULT_NO_SPIN_MUTEXES
@@ -95,7 +95,6 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_COUNTERS 97 select HAVE_PERF_COUNTERS
98 select GENERIC_ATOMIC64 if !64BIT
99 98
100config SCHED_OMIT_FRAME_POINTER 99config SCHED_OMIT_FRAME_POINTER
101 bool 100 bool
@@ -481,13 +480,6 @@ config CMM_IUCV
481 Select this option to enable the special message interface to 480 Select this option to enable the special message interface to
482 the cooperative memory management. 481 the cooperative memory management.
483 482
484config PAGE_STATES
485 bool "Unused page notification"
486 help
487 This enables the notification of unused pages to the
488 hypervisor. The ESSA instruction is used to do the states
489 changes between a page that has content and the unused state.
490
491config APPLDATA_BASE 483config APPLDATA_BASE
492 bool "Linux - VM Monitor Stream, base infrastructure" 484 bool "Linux - VM Monitor Stream, base infrastructure"
493 depends on PROC_FS 485 depends on PROC_FS
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0ff387cebf88..fc8fb20e7fc0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start
88head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o 88head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
89 89
90core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ 90core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
91 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ 91 arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
92 arch/s390/power/
93 92
94libs-y += arch/s390/lib/ 93libs-y += arch/s390/lib/
95drivers-y += drivers/s390/ 94drivers-y += drivers/s390/
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 4aba83b31596..2bc479ab3a66 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -250,8 +250,9 @@ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
250 const u8 *temp_key = key; 250 const u8 *temp_key = key;
251 u32 *flags = &tfm->crt_flags; 251 u32 *flags = &tfm->crt_flags;
252 252
253 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { 253 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE)) &&
254 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 254 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
255 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
255 return -EINVAL; 256 return -EINVAL;
256 } 257 }
257 for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) { 258 for (i = 0; i < 2; i++, temp_key += DES_KEY_SIZE) {
@@ -411,9 +412,9 @@ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
411 412
412 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && 413 if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
413 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], 414 memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
414 DES_KEY_SIZE))) { 415 DES_KEY_SIZE)) &&
415 416 (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
416 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; 417 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
417 return -EINVAL; 418 return -EINVAL;
418 } 419 }
419 for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) { 420 for (i = 0; i < 3; i++, temp_key += DES_KEY_SIZE) {
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index e85ba348722a..f6de7826c979 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -46,12 +46,38 @@ static int sha1_init(struct shash_desc *desc)
46 return 0; 46 return 0;
47} 47}
48 48
49static int sha1_export(struct shash_desc *desc, void *out)
50{
51 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
52 struct sha1_state *octx = out;
53
54 octx->count = sctx->count;
55 memcpy(octx->state, sctx->state, sizeof(octx->state));
56 memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
57 return 0;
58}
59
60static int sha1_import(struct shash_desc *desc, const void *in)
61{
62 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
63 const struct sha1_state *ictx = in;
64
65 sctx->count = ictx->count;
66 memcpy(sctx->state, ictx->state, sizeof(ictx->state));
67 memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
68 sctx->func = KIMD_SHA_1;
69 return 0;
70}
71
49static struct shash_alg alg = { 72static struct shash_alg alg = {
50 .digestsize = SHA1_DIGEST_SIZE, 73 .digestsize = SHA1_DIGEST_SIZE,
51 .init = sha1_init, 74 .init = sha1_init,
52 .update = s390_sha_update, 75 .update = s390_sha_update,
53 .final = s390_sha_final, 76 .final = s390_sha_final,
77 .export = sha1_export,
78 .import = sha1_import,
54 .descsize = sizeof(struct s390_sha_ctx), 79 .descsize = sizeof(struct s390_sha_ctx),
80 .statesize = sizeof(struct sha1_state),
55 .base = { 81 .base = {
56 .cra_name = "sha1", 82 .cra_name = "sha1",
57 .cra_driver_name= "sha1-s390", 83 .cra_driver_name= "sha1-s390",
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index f9fefc569632..61a7db372121 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -42,12 +42,38 @@ static int sha256_init(struct shash_desc *desc)
42 return 0; 42 return 0;
43} 43}
44 44
45static int sha256_export(struct shash_desc *desc, void *out)
46{
47 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
48 struct sha256_state *octx = out;
49
50 octx->count = sctx->count;
51 memcpy(octx->state, sctx->state, sizeof(octx->state));
52 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
53 return 0;
54}
55
56static int sha256_import(struct shash_desc *desc, const void *in)
57{
58 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
59 const struct sha256_state *ictx = in;
60
61 sctx->count = ictx->count;
62 memcpy(sctx->state, ictx->state, sizeof(ictx->state));
63 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
64 sctx->func = KIMD_SHA_256;
65 return 0;
66}
67
45static struct shash_alg alg = { 68static struct shash_alg alg = {
46 .digestsize = SHA256_DIGEST_SIZE, 69 .digestsize = SHA256_DIGEST_SIZE,
47 .init = sha256_init, 70 .init = sha256_init,
48 .update = s390_sha_update, 71 .update = s390_sha_update,
49 .final = s390_sha_final, 72 .final = s390_sha_final,
73 .export = sha256_export,
74 .import = sha256_import,
50 .descsize = sizeof(struct s390_sha_ctx), 75 .descsize = sizeof(struct s390_sha_ctx),
76 .statesize = sizeof(struct sha256_state),
51 .base = { 77 .base = {
52 .cra_name = "sha256", 78 .cra_name = "sha256",
53 .cra_driver_name= "sha256-s390", 79 .cra_driver_name= "sha256-s390",
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 83192bfc8048..4bf73d0dc525 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -13,7 +13,10 @@
13 * 13 *
14 */ 14 */
15#include <crypto/internal/hash.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h>
17#include <linux/errno.h>
16#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/kernel.h>
17#include <linux/module.h> 20#include <linux/module.h>
18 21
19#include "sha.h" 22#include "sha.h"
@@ -37,12 +40,42 @@ static int sha512_init(struct shash_desc *desc)
37 return 0; 40 return 0;
38} 41}
39 42
43static int sha512_export(struct shash_desc *desc, void *out)
44{
45 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
46 struct sha512_state *octx = out;
47
48 octx->count[0] = sctx->count;
49 octx->count[1] = 0;
50 memcpy(octx->state, sctx->state, sizeof(octx->state));
51 memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
52 return 0;
53}
54
55static int sha512_import(struct shash_desc *desc, const void *in)
56{
57 struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
58 const struct sha512_state *ictx = in;
59
60 if (unlikely(ictx->count[1]))
61 return -ERANGE;
62 sctx->count = ictx->count[0];
63
64 memcpy(sctx->state, ictx->state, sizeof(ictx->state));
65 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
66 sctx->func = KIMD_SHA_512;
67 return 0;
68}
69
40static struct shash_alg sha512_alg = { 70static struct shash_alg sha512_alg = {
41 .digestsize = SHA512_DIGEST_SIZE, 71 .digestsize = SHA512_DIGEST_SIZE,
42 .init = sha512_init, 72 .init = sha512_init,
43 .update = s390_sha_update, 73 .update = s390_sha_update,
44 .final = s390_sha_final, 74 .final = s390_sha_final,
75 .export = sha512_export,
76 .import = sha512_import,
45 .descsize = sizeof(struct s390_sha_ctx), 77 .descsize = sizeof(struct s390_sha_ctx),
78 .statesize = sizeof(struct sha512_state),
46 .base = { 79 .base = {
47 .cra_name = "sha512", 80 .cra_name = "sha512",
48 .cra_driver_name= "sha512-s390", 81 .cra_driver_name= "sha512-s390",
@@ -78,7 +111,10 @@ static struct shash_alg sha384_alg = {
78 .init = sha384_init, 111 .init = sha384_init,
79 .update = s390_sha_update, 112 .update = s390_sha_update,
80 .final = s390_sha_final, 113 .final = s390_sha_final,
114 .export = sha512_export,
115 .import = sha512_import,
81 .descsize = sizeof(struct s390_sha_ctx), 116 .descsize = sizeof(struct s390_sha_ctx),
117 .statesize = sizeof(struct sha512_state),
82 .base = { 118 .base = {
83 .cra_name = "sha384", 119 .cra_name = "sha384",
84 .cra_driver_name= "sha384-s390", 120 .cra_driver_name= "sha384-s390",
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index fcba206529f3..4e91a2573cc4 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -900,7 +900,7 @@ CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
900CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y 900CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
901CONFIG_HAVE_DYNAMIC_FTRACE=y 901CONFIG_HAVE_DYNAMIC_FTRACE=y
902CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 902CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
903CONFIG_HAVE_FTRACE_SYSCALLS=y 903CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
904CONFIG_TRACING_SUPPORT=y 904CONFIG_TRACING_SUPPORT=y
905CONFIG_FTRACE=y 905CONFIG_FTRACE=y
906# CONFIG_FUNCTION_TRACER is not set 906# CONFIG_FUNCTION_TRACER is not set
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 5a805df216bb..bd9914b89488 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
355{ 355{
356 struct dentry *dentry; 356 struct dentry *dentry;
357 struct inode *inode; 357 struct inode *inode;
358 struct qstr qname;
359 358
360 qname.name = name;
361 qname.len = strlen(name);
362 qname.hash = full_name_hash(name, qname.len);
363 mutex_lock(&parent->d_inode->i_mutex); 359 mutex_lock(&parent->d_inode->i_mutex);
364 dentry = lookup_one_len(name, parent, strlen(name)); 360 dentry = lookup_one_len(name, parent, strlen(name));
365 if (IS_ERR(dentry)) { 361 if (IS_ERR(dentry)) {
@@ -426,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
426 char tmp[TMP_SIZE]; 422 char tmp[TMP_SIZE];
427 struct dentry *dentry; 423 struct dentry *dentry;
428 424
429 snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value); 425 snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value);
430 buffer = kstrdup(tmp, GFP_KERNEL); 426 buffer = kstrdup(tmp, GFP_KERNEL);
431 if (!buffer) 427 if (!buffer)
432 return ERR_PTR(-ENOMEM); 428 return ERR_PTR(-ENOMEM);
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index c7d0abfb0f00..ae7c8f9f94a5 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,33 +1,23 @@
1#ifndef __ARCH_S390_ATOMIC__ 1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__ 2#define __ARCH_S390_ATOMIC__
3 3
4#include <linux/compiler.h>
5#include <linux/types.h>
6
7/* 4/*
8 * include/asm-s390/atomic.h 5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Denis Joseph Barrow,
8 * Arnd Bergmann <arndb@de.ibm.com>,
9 * 9 *
10 * S390 version 10 * Atomic operations that C can't guarantee us.
11 * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 11 * Useful for resource counting etc.
12 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13 * Denis Joseph Barrow,
14 * Arnd Bergmann (arndb@de.ibm.com)
15 *
16 * Derived from "include/asm-i386/bitops.h"
17 * Copyright (C) 1992, Linus Torvalds
18 * 13 *
19 */ 14 */
20 15
21/* 16#include <linux/compiler.h>
22 * Atomic operations that C can't guarantee us. Useful for 17#include <linux/types.h>
23 * resource counting etc..
24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
25 */
26 18
27#define ATOMIC_INIT(i) { (i) } 19#define ATOMIC_INIT(i) { (i) }
28 20
29#ifdef __KERNEL__
30
31#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 21#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
32 22
33#define __CS_LOOP(ptr, op_val, op_string) ({ \ 23#define __CS_LOOP(ptr, op_val, op_string) ({ \
@@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)
77 barrier(); 67 barrier();
78} 68}
79 69
80static __inline__ int atomic_add_return(int i, atomic_t * v) 70static inline int atomic_add_return(int i, atomic_t *v)
81{ 71{
82 return __CS_LOOP(v, i, "ar"); 72 return __CS_LOOP(v, i, "ar");
83} 73}
@@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
87#define atomic_inc_return(_v) atomic_add_return(1, _v) 77#define atomic_inc_return(_v) atomic_add_return(1, _v)
88#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) 78#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
89 79
90static __inline__ int atomic_sub_return(int i, atomic_t * v) 80static inline int atomic_sub_return(int i, atomic_t *v)
91{ 81{
92 return __CS_LOOP(v, i, "sr"); 82 return __CS_LOOP(v, i, "sr");
93} 83}
@@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
97#define atomic_dec_return(_v) atomic_sub_return(1, _v) 87#define atomic_dec_return(_v) atomic_sub_return(1, _v)
98#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) 88#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
99 89
100static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) 90static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
101{ 91{
102 __CS_LOOP(v, ~mask, "nr"); 92 __CS_LOOP(v, ~mask, "nr");
103} 93}
104 94
105static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) 95static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
106{ 96{
107 __CS_LOOP(v, mask, "or"); 97 __CS_LOOP(v, mask, "or");
108} 98}
109 99
110#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 100#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
111 101
112static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) 102static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
113{ 103{
114#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 104#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
115 asm volatile( 105 asm volatile(
@@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
127 return old; 117 return old;
128} 118}
129 119
130static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) 120static inline int atomic_add_unless(atomic_t *v, int a, int u)
131{ 121{
132 int c, old; 122 int c, old;
133 c = atomic_read(v); 123 c = atomic_read(v);
@@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
146 136
147#undef __CS_LOOP 137#undef __CS_LOOP
148 138
149#ifdef __s390x__
150#define ATOMIC64_INIT(i) { (i) } 139#define ATOMIC64_INIT(i) { (i) }
151 140
141#ifdef CONFIG_64BIT
142
152#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 143#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
153 144
154#define __CSG_LOOP(ptr, op_val, op_string) ({ \ 145#define __CSG_LOOP(ptr, op_val, op_string) ({ \
@@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
162 : "=&d" (old_val), "=&d" (new_val), \ 153 : "=&d" (old_val), "=&d" (new_val), \
163 "=Q" (((atomic_t *)(ptr))->counter) \ 154 "=Q" (((atomic_t *)(ptr))->counter) \
164 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ 155 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
165 : "cc", "memory" ); \ 156 : "cc", "memory"); \
166 new_val; \ 157 new_val; \
167}) 158})
168 159
@@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
180 "=m" (((atomic_t *)(ptr))->counter) \ 171 "=m" (((atomic_t *)(ptr))->counter) \
181 : "a" (ptr), "d" (op_val), \ 172 : "a" (ptr), "d" (op_val), \
182 "m" (((atomic_t *)(ptr))->counter) \ 173 "m" (((atomic_t *)(ptr))->counter) \
183 : "cc", "memory" ); \ 174 : "cc", "memory"); \
184 new_val; \ 175 new_val; \
185}) 176})
186 177
@@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)
198 barrier(); 189 barrier();
199} 190}
200 191
201static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) 192static inline long long atomic64_add_return(long long i, atomic64_t *v)
202{ 193{
203 return __CSG_LOOP(v, i, "agr"); 194 return __CSG_LOOP(v, i, "agr");
204} 195}
205#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
206#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
207#define atomic64_inc(_v) atomic64_add_return(1, _v)
208#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
209#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
210 196
211static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) 197static inline long long atomic64_sub_return(long long i, atomic64_t *v)
212{ 198{
213 return __CSG_LOOP(v, i, "sgr"); 199 return __CSG_LOOP(v, i, "sgr");
214} 200}
215#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
216#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
217#define atomic64_dec(_v) atomic64_sub_return(1, _v)
218#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
219#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
220 201
221static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) 202static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
222{ 203{
223 __CSG_LOOP(v, ~mask, "ngr"); 204 __CSG_LOOP(v, ~mask, "ngr");
224} 205}
225 206
226static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) 207static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
227{ 208{
228 __CSG_LOOP(v, mask, "ogr"); 209 __CSG_LOOP(v, mask, "ogr");
229} 210}
230 211
231#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 212#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
232 213
233static __inline__ long long atomic64_cmpxchg(atomic64_t *v, 214static inline long long atomic64_cmpxchg(atomic64_t *v,
234 long long old, long long new) 215 long long old, long long new)
235{ 216{
236#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 217#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
@@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
249 return old; 230 return old;
250} 231}
251 232
252static __inline__ int atomic64_add_unless(atomic64_t *v, 233#undef __CSG_LOOP
253 long long a, long long u) 234
235#else /* CONFIG_64BIT */
236
237typedef struct {
238 long long counter;
239} atomic64_t;
240
241static inline long long atomic64_read(const atomic64_t *v)
242{
243 register_pair rp;
244
245 asm volatile(
246 " lm %0,%N0,0(%1)"
247 : "=&d" (rp)
248 : "a" (&v->counter), "m" (v->counter)
249 );
250 return rp.pair;
251}
252
253static inline void atomic64_set(atomic64_t *v, long long i)
254{
255 register_pair rp = {.pair = i};
256
257 asm volatile(
258 " stm %1,%N1,0(%2)"
259 : "=m" (v->counter)
260 : "d" (rp), "a" (&v->counter)
261 );
262}
263
264static inline long long atomic64_xchg(atomic64_t *v, long long new)
265{
266 register_pair rp_new = {.pair = new};
267 register_pair rp_old;
268
269 asm volatile(
270 " lm %0,%N0,0(%2)\n"
271 "0: cds %0,%3,0(%2)\n"
272 " jl 0b\n"
273 : "=&d" (rp_old), "+m" (v->counter)
274 : "a" (&v->counter), "d" (rp_new)
275 : "cc");
276 return rp_old.pair;
277}
278
279static inline long long atomic64_cmpxchg(atomic64_t *v,
280 long long old, long long new)
281{
282 register_pair rp_old = {.pair = old};
283 register_pair rp_new = {.pair = new};
284
285 asm volatile(
286 " cds %0,%3,0(%2)"
287 : "+&d" (rp_old), "+m" (v->counter)
288 : "a" (&v->counter), "d" (rp_new)
289 : "cc");
290 return rp_old.pair;
291}
292
293
294static inline long long atomic64_add_return(long long i, atomic64_t *v)
295{
296 long long old, new;
297
298 do {
299 old = atomic64_read(v);
300 new = old + i;
301 } while (atomic64_cmpxchg(v, old, new) != old);
302 return new;
303}
304
305static inline long long atomic64_sub_return(long long i, atomic64_t *v)
306{
307 long long old, new;
308
309 do {
310 old = atomic64_read(v);
311 new = old - i;
312 } while (atomic64_cmpxchg(v, old, new) != old);
313 return new;
314}
315
316static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
317{
318 long long old, new;
319
320 do {
321 old = atomic64_read(v);
322 new = old | mask;
323 } while (atomic64_cmpxchg(v, old, new) != old);
324}
325
326static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
327{
328 long long old, new;
329
330 do {
331 old = atomic64_read(v);
332 new = old & mask;
333 } while (atomic64_cmpxchg(v, old, new) != old);
334}
335
336#endif /* CONFIG_64BIT */
337
338static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
254{ 339{
255 long long c, old; 340 long long c, old;
256 c = atomic64_read(v); 341 c = atomic64_read(v);
@@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
265 return c != u; 350 return c != u;
266} 351}
267 352
268#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 353#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
269 354#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
270#undef __CSG_LOOP 355#define atomic64_inc(_v) atomic64_add_return(1, _v)
271 356#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
272#else /* __s390x__ */ 357#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
273 358#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
274#include <asm-generic/atomic64.h> 359#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
275 360#define atomic64_dec(_v) atomic64_sub_return(1, _v)
276#endif /* __s390x__ */ 361#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
362#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
363#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
277 364
278#define smp_mb__before_atomic_dec() smp_mb() 365#define smp_mb__before_atomic_dec() smp_mb()
279#define smp_mb__after_atomic_dec() smp_mb() 366#define smp_mb__after_atomic_dec() smp_mb()
@@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,
281#define smp_mb__after_atomic_inc() smp_mb() 368#define smp_mb__after_atomic_inc() smp_mb()
282 369
283#include <asm-generic/atomic-long.h> 370#include <asm-generic/atomic-long.h>
284#endif /* __KERNEL__ */ 371
285#endif /* __ARCH_S390_ATOMIC__ */ 372#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index d5a8e7c1477c..6c00f6800a34 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
78 */ 78 */
79static inline __sum16 csum_fold(__wsum sum) 79static inline __sum16 csum_fold(__wsum sum)
80{ 80{
81#ifndef __s390x__ 81 u32 csum = (__force u32) sum;
82 register_pair rp;
83 82
84 asm volatile( 83 csum += (csum >> 16) + (csum << 16);
85 " slr %N1,%N1\n" /* %0 = H L */ 84 csum >>= 16;
86 " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ 85 return (__force __sum16) ~csum;
87 " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */
88 " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */
89 " alr %0,%1\n" /* %0 = H+L+C L+H */
90 " srl %0,16\n" /* %0 = H+L+C */
91 : "+&d" (sum), "=d" (rp) : : "cc");
92#else /* __s390x__ */
93 asm volatile(
94 " sr 3,3\n" /* %0 = H*65536 + L */
95 " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */
96 " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */
97 " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */
98 " alr %0,2\n" /* %0 = H+L+C L+H */
99 " srl %0,16\n" /* %0 = H+L+C */
100 : "+&d" (sum) : : "cc", "2", "3");
101#endif /* __s390x__ */
102 return (__force __sum16) ~sum;
103} 86}
104 87
105/* 88/*
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 807997f7414b..4943654ed7fd 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -125,4 +125,32 @@ struct chsc_cpd_info {
125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) 125#define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info)
126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) 126#define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal)
127 127
128#ifdef __KERNEL__
129
130struct css_general_char {
131 u64 : 12;
132 u32 dynio : 1; /* bit 12 */
133 u32 : 28;
134 u32 aif : 1; /* bit 41 */
135 u32 : 3;
136 u32 mcss : 1; /* bit 45 */
137 u32 fcs : 1; /* bit 46 */
138 u32 : 1;
139 u32 ext_mb : 1; /* bit 48 */
140 u32 : 7;
141 u32 aif_tdd : 1; /* bit 56 */
142 u32 : 1;
143 u32 qebsm : 1; /* bit 58 */
144 u32 : 8;
145 u32 aif_osa : 1; /* bit 67 */
146 u32 : 14;
147 u32 cib : 1; /* bit 82 */
148 u32 : 5;
149 u32 fcx : 1; /* bit 88 */
150 u32 : 7;
151}__attribute__((packed));
152
153extern struct css_general_char css_general_characteristics;
154
155#endif /* __KERNEL__ */
128#endif 156#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 619bf94b11f1..e85679af54dd 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -15,228 +15,7 @@
15#define LPM_ANYPATH 0xff 15#define LPM_ANYPATH 0xff
16#define __MAX_CSSID 0 16#define __MAX_CSSID 0
17 17
18/** 18#include <asm/scsw.h>
19 * struct cmd_scsw - command-mode subchannel status word
20 * @key: subchannel key
21 * @sctl: suspend control
22 * @eswf: esw format
23 * @cc: deferred condition code
24 * @fmt: format
25 * @pfch: prefetch
26 * @isic: initial-status interruption control
27 * @alcc: address-limit checking control
28 * @ssi: suppress-suspended interruption
29 * @zcc: zero condition code
30 * @ectl: extended control
31 * @pno: path not operational
32 * @res: reserved
33 * @fctl: function control
34 * @actl: activity control
35 * @stctl: status control
36 * @cpa: channel program address
37 * @dstat: device status
38 * @cstat: subchannel status
39 * @count: residual count
40 */
41struct cmd_scsw {
42 __u32 key : 4;
43 __u32 sctl : 1;
44 __u32 eswf : 1;
45 __u32 cc : 2;
46 __u32 fmt : 1;
47 __u32 pfch : 1;
48 __u32 isic : 1;
49 __u32 alcc : 1;
50 __u32 ssi : 1;
51 __u32 zcc : 1;
52 __u32 ectl : 1;
53 __u32 pno : 1;
54 __u32 res : 1;
55 __u32 fctl : 3;
56 __u32 actl : 7;
57 __u32 stctl : 5;
58 __u32 cpa;
59 __u32 dstat : 8;
60 __u32 cstat : 8;
61 __u32 count : 16;
62} __attribute__ ((packed));
63
64/**
65 * struct tm_scsw - transport-mode subchannel status word
66 * @key: subchannel key
67 * @eswf: esw format
68 * @cc: deferred condition code
69 * @fmt: format
70 * @x: IRB-format control
71 * @q: interrogate-complete
72 * @ectl: extended control
73 * @pno: path not operational
74 * @fctl: function control
75 * @actl: activity control
76 * @stctl: status control
77 * @tcw: TCW address
78 * @dstat: device status
79 * @cstat: subchannel status
80 * @fcxs: FCX status
81 * @schxs: subchannel-extended status
82 */
83struct tm_scsw {
84 u32 key:4;
85 u32 :1;
86 u32 eswf:1;
87 u32 cc:2;
88 u32 fmt:3;
89 u32 x:1;
90 u32 q:1;
91 u32 :1;
92 u32 ectl:1;
93 u32 pno:1;
94 u32 :1;
95 u32 fctl:3;
96 u32 actl:7;
97 u32 stctl:5;
98 u32 tcw;
99 u32 dstat:8;
100 u32 cstat:8;
101 u32 fcxs:8;
102 u32 schxs:8;
103} __attribute__ ((packed));
104
105/**
106 * union scsw - subchannel status word
107 * @cmd: command-mode SCSW
108 * @tm: transport-mode SCSW
109 */
110union scsw {
111 struct cmd_scsw cmd;
112 struct tm_scsw tm;
113} __attribute__ ((packed));
114
115int scsw_is_tm(union scsw *scsw);
116u32 scsw_key(union scsw *scsw);
117u32 scsw_eswf(union scsw *scsw);
118u32 scsw_cc(union scsw *scsw);
119u32 scsw_ectl(union scsw *scsw);
120u32 scsw_pno(union scsw *scsw);
121u32 scsw_fctl(union scsw *scsw);
122u32 scsw_actl(union scsw *scsw);
123u32 scsw_stctl(union scsw *scsw);
124u32 scsw_dstat(union scsw *scsw);
125u32 scsw_cstat(union scsw *scsw);
126int scsw_is_solicited(union scsw *scsw);
127int scsw_is_valid_key(union scsw *scsw);
128int scsw_is_valid_eswf(union scsw *scsw);
129int scsw_is_valid_cc(union scsw *scsw);
130int scsw_is_valid_ectl(union scsw *scsw);
131int scsw_is_valid_pno(union scsw *scsw);
132int scsw_is_valid_fctl(union scsw *scsw);
133int scsw_is_valid_actl(union scsw *scsw);
134int scsw_is_valid_stctl(union scsw *scsw);
135int scsw_is_valid_dstat(union scsw *scsw);
136int scsw_is_valid_cstat(union scsw *scsw);
137int scsw_cmd_is_valid_key(union scsw *scsw);
138int scsw_cmd_is_valid_sctl(union scsw *scsw);
139int scsw_cmd_is_valid_eswf(union scsw *scsw);
140int scsw_cmd_is_valid_cc(union scsw *scsw);
141int scsw_cmd_is_valid_fmt(union scsw *scsw);
142int scsw_cmd_is_valid_pfch(union scsw *scsw);
143int scsw_cmd_is_valid_isic(union scsw *scsw);
144int scsw_cmd_is_valid_alcc(union scsw *scsw);
145int scsw_cmd_is_valid_ssi(union scsw *scsw);
146int scsw_cmd_is_valid_zcc(union scsw *scsw);
147int scsw_cmd_is_valid_ectl(union scsw *scsw);
148int scsw_cmd_is_valid_pno(union scsw *scsw);
149int scsw_cmd_is_valid_fctl(union scsw *scsw);
150int scsw_cmd_is_valid_actl(union scsw *scsw);
151int scsw_cmd_is_valid_stctl(union scsw *scsw);
152int scsw_cmd_is_valid_dstat(union scsw *scsw);
153int scsw_cmd_is_valid_cstat(union scsw *scsw);
154int scsw_cmd_is_solicited(union scsw *scsw);
155int scsw_tm_is_valid_key(union scsw *scsw);
156int scsw_tm_is_valid_eswf(union scsw *scsw);
157int scsw_tm_is_valid_cc(union scsw *scsw);
158int scsw_tm_is_valid_fmt(union scsw *scsw);
159int scsw_tm_is_valid_x(union scsw *scsw);
160int scsw_tm_is_valid_q(union scsw *scsw);
161int scsw_tm_is_valid_ectl(union scsw *scsw);
162int scsw_tm_is_valid_pno(union scsw *scsw);
163int scsw_tm_is_valid_fctl(union scsw *scsw);
164int scsw_tm_is_valid_actl(union scsw *scsw);
165int scsw_tm_is_valid_stctl(union scsw *scsw);
166int scsw_tm_is_valid_dstat(union scsw *scsw);
167int scsw_tm_is_valid_cstat(union scsw *scsw);
168int scsw_tm_is_valid_fcxs(union scsw *scsw);
169int scsw_tm_is_valid_schxs(union scsw *scsw);
170int scsw_tm_is_solicited(union scsw *scsw);
171
172#define SCSW_FCTL_CLEAR_FUNC 0x1
173#define SCSW_FCTL_HALT_FUNC 0x2
174#define SCSW_FCTL_START_FUNC 0x4
175
176#define SCSW_ACTL_SUSPENDED 0x1
177#define SCSW_ACTL_DEVACT 0x2
178#define SCSW_ACTL_SCHACT 0x4
179#define SCSW_ACTL_CLEAR_PEND 0x8
180#define SCSW_ACTL_HALT_PEND 0x10
181#define SCSW_ACTL_START_PEND 0x20
182#define SCSW_ACTL_RESUME_PEND 0x40
183
184#define SCSW_STCTL_STATUS_PEND 0x1
185#define SCSW_STCTL_SEC_STATUS 0x2
186#define SCSW_STCTL_PRIM_STATUS 0x4
187#define SCSW_STCTL_INTER_STATUS 0x8
188#define SCSW_STCTL_ALERT_STATUS 0x10
189
190#define DEV_STAT_ATTENTION 0x80
191#define DEV_STAT_STAT_MOD 0x40
192#define DEV_STAT_CU_END 0x20
193#define DEV_STAT_BUSY 0x10
194#define DEV_STAT_CHN_END 0x08
195#define DEV_STAT_DEV_END 0x04
196#define DEV_STAT_UNIT_CHECK 0x02
197#define DEV_STAT_UNIT_EXCEP 0x01
198
199#define SCHN_STAT_PCI 0x80
200#define SCHN_STAT_INCORR_LEN 0x40
201#define SCHN_STAT_PROG_CHECK 0x20
202#define SCHN_STAT_PROT_CHECK 0x10
203#define SCHN_STAT_CHN_DATA_CHK 0x08
204#define SCHN_STAT_CHN_CTRL_CHK 0x04
205#define SCHN_STAT_INTF_CTRL_CHK 0x02
206#define SCHN_STAT_CHAIN_CHECK 0x01
207
208/*
209 * architectured values for first sense byte
210 */
211#define SNS0_CMD_REJECT 0x80
212#define SNS_CMD_REJECT SNS0_CMD_REJEC
213#define SNS0_INTERVENTION_REQ 0x40
214#define SNS0_BUS_OUT_CHECK 0x20
215#define SNS0_EQUIPMENT_CHECK 0x10
216#define SNS0_DATA_CHECK 0x08
217#define SNS0_OVERRUN 0x04
218#define SNS0_INCOMPL_DOMAIN 0x01
219
220/*
221 * architectured values for second sense byte
222 */
223#define SNS1_PERM_ERR 0x80
224#define SNS1_INV_TRACK_FORMAT 0x40
225#define SNS1_EOC 0x20
226#define SNS1_MESSAGE_TO_OPER 0x10
227#define SNS1_NO_REC_FOUND 0x08
228#define SNS1_FILE_PROTECTED 0x04
229#define SNS1_WRITE_INHIBITED 0x02
230#define SNS1_INPRECISE_END 0x01
231
232/*
233 * architectured values for third sense byte
234 */
235#define SNS2_REQ_INH_WRITE 0x80
236#define SNS2_CORRECTABLE 0x40
237#define SNS2_FIRST_LOG_ERR 0x20
238#define SNS2_ENV_DATA_PRESENT 0x10
239#define SNS2_INPRECISE_END 0x04
240 19
241/** 20/**
242 * struct ccw1 - channel command word 21 * struct ccw1 - channel command word
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
new file mode 100644
index 000000000000..471234b90574
--- /dev/null
+++ b/arch/s390/include/asm/cpu.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright IBM Corp. 2000,2009
3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Christian Ehrhardt <ehrhardt@de.ibm.com>,
6 */
7
8#ifndef _ASM_S390_CPU_H
9#define _ASM_S390_CPU_H
10
11#define MAX_CPU_ADDRESS 255
12
13#ifndef __ASSEMBLY__
14
15#include <linux/types.h>
16
17struct cpuid
18{
19 unsigned int version : 8;
20 unsigned int ident : 24;
21 unsigned int machine : 16;
22 unsigned int unused : 16;
23} __packed;
24
25#endif /* __ASSEMBLY__ */
26#endif /* _ASM_S390_CPU_H */
diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h
deleted file mode 100644
index 07836a2e5222..000000000000
--- a/arch/s390/include/asm/cpuid.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright IBM Corp. 2000,2009
3 * Author(s): Hartmut Penner <hp@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Christian Ehrhardt <ehrhardt@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_CPUID_H_
9#define _ASM_S390_CPUID_H_
10
11/*
12 * CPU type and hardware bug flags. Kept separately for each CPU.
13 * Members of this structure are referenced in head.S, so think twice
14 * before touching them. [mj]
15 */
16
17typedef struct
18{
19 unsigned int version : 8;
20 unsigned int ident : 24;
21 unsigned int machine : 16;
22 unsigned int unused : 16;
23} __attribute__ ((packed)) cpuid_t;
24
25#endif /* _ASM_S390_CPUID_H_ */
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 31ed5686a968..18124b75a7ab 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
167 return debug_event_common(id,level,txt,strlen(txt)); 167 return debug_event_common(id,level,txt,strlen(txt));
168} 168}
169 169
170/*
171 * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
172 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
173 */
170extern debug_entry_t * 174extern debug_entry_t *
171debug_sprintf_event(debug_info_t* id,int level,char *string,...) 175debug_sprintf_event(debug_info_t* id,int level,char *string,...)
172 __attribute__ ((format(printf, 3, 4))); 176 __attribute__ ((format(printf, 3, 4)));
@@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
206 return debug_exception_common(id,level,txt,strlen(txt)); 210 return debug_exception_common(id,level,txt,strlen(txt));
207} 211}
208 212
209 213/*
214 * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
215 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
216 */
210extern debug_entry_t * 217extern debug_entry_t *
211debug_sprintf_exception(debug_info_t* id,int level,char *string,...) 218debug_sprintf_exception(debug_info_t* id,int level,char *string,...)
212 __attribute__ ((format(printf, 3, 4))); 219 __attribute__ ((format(printf, 3, 4)));
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 89ec7056da28..498bc3892385 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -18,13 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <asm/lowcore.h> 19#include <asm/lowcore.h>
20 20
21/* irq_cpustat_t is unused currently, but could be converted
22 * into a percpu variable instead of storing softirq_pending
23 * on the lowcore */
24typedef struct {
25 unsigned int __softirq_pending;
26} irq_cpustat_t;
27
28#define local_softirq_pending() (S390_lowcore.softirq_pending) 21#define local_softirq_pending() (S390_lowcore.softirq_pending)
29 22
30#define __ARCH_IRQ_STAT 23#define __ARCH_IRQ_STAT
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 1171e6d144a3..5e95d95450b3 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -57,6 +57,8 @@ struct ipl_block_fcp {
57} __attribute__((packed)); 57} __attribute__((packed));
58 58
59#define DIAG308_VMPARM_SIZE 64 59#define DIAG308_VMPARM_SIZE 64
60#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
61 offsetof(struct ipl_block_fcp, scp_data)))
60 62
61struct ipl_block_ccw { 63struct ipl_block_ccw {
62 u8 load_parm[8]; 64 u8 load_parm[8];
@@ -91,7 +93,8 @@ extern void do_halt(void);
91extern void do_poff(void); 93extern void do_poff(void);
92extern void ipl_save_parameters(void); 94extern void ipl_save_parameters(void);
93extern void ipl_update_parameters(void); 95extern void ipl_update_parameters(void);
94extern void get_ipl_vmparm(char *); 96extern size_t append_ipl_vmparm(char *, size_t);
97extern size_t append_ipl_scpdata(char *, size_t);
95 98
96enum { 99enum {
97 IPL_DEVNO_VALID = 1, 100 IPL_DEVNO_VALID = 1,
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 1cd02f6073a0..698988f69403 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -17,7 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/kvm_host.h> 18#include <linux/kvm_host.h>
19#include <asm/debug.h> 19#include <asm/debug.h>
20#include <asm/cpuid.h> 20#include <asm/cpu.h>
21 21
22#define KVM_MAX_VCPUS 64 22#define KVM_MAX_VCPUS 64
23#define KVM_MEMORY_SLOTS 32 23#define KVM_MEMORY_SLOTS 32
@@ -217,8 +217,8 @@ struct kvm_vcpu_arch {
217 struct hrtimer ckc_timer; 217 struct hrtimer ckc_timer;
218 struct tasklet_struct tasklet; 218 struct tasklet_struct tasklet;
219 union { 219 union {
220 cpuid_t cpu_id; 220 struct cpuid cpu_id;
221 u64 stidp_data; 221 u64 stidp_data;
222 }; 222 };
223}; 223};
224 224
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index 0503936f101f..acdfdff26611 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -54,14 +54,4 @@ struct kvm_vqconfig {
54 * This is pagesize for historical reasons. */ 54 * This is pagesize for historical reasons. */
55#define KVM_S390_VIRTIO_RING_ALIGN 4096 55#define KVM_S390_VIRTIO_RING_ALIGN 4096
56 56
57#ifdef __KERNEL__
58/* early virtio console setup */
59#ifdef CONFIG_S390_GUEST
60extern void s390_virtio_console_init(void);
61#else
62static inline void s390_virtio_console_init(void)
63{
64}
65#endif /* CONFIG_VIRTIO_CONSOLE */
66#endif /* __KERNEL__ */
67#endif 57#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5046ad6b7a63..6bc9426a6fbf 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -132,7 +132,7 @@
132 132
133#ifndef __ASSEMBLY__ 133#ifndef __ASSEMBLY__
134 134
135#include <asm/cpuid.h> 135#include <asm/cpu.h>
136#include <asm/ptrace.h> 136#include <asm/ptrace.h>
137#include <linux/types.h> 137#include <linux/types.h>
138 138
@@ -275,7 +275,7 @@ struct _lowcore
275 __u32 user_exec_asce; /* 0x02ac */ 275 __u32 user_exec_asce; /* 0x02ac */
276 276
277 /* SMP info area */ 277 /* SMP info area */
278 cpuid_t cpu_id; /* 0x02b0 */ 278 struct cpuid cpu_id; /* 0x02b0 */
279 __u32 cpu_nr; /* 0x02b8 */ 279 __u32 cpu_nr; /* 0x02b8 */
280 __u32 softirq_pending; /* 0x02bc */ 280 __u32 softirq_pending; /* 0x02bc */
281 __u32 percpu_offset; /* 0x02c0 */ 281 __u32 percpu_offset; /* 0x02c0 */
@@ -380,7 +380,7 @@ struct _lowcore
380 __u64 user_exec_asce; /* 0x0318 */ 380 __u64 user_exec_asce; /* 0x0318 */
381 381
382 /* SMP info area */ 382 /* SMP info area */
383 cpuid_t cpu_id; /* 0x0320 */ 383 struct cpuid cpu_id; /* 0x0320 */
384 __u32 cpu_nr; /* 0x0328 */ 384 __u32 cpu_nr; /* 0x0328 */
385 __u32 softirq_pending; /* 0x032c */ 385 __u32 softirq_pending; /* 0x032c */
386 __u64 percpu_offset; /* 0x0330 */ 386 __u64 percpu_offset; /* 0x0330 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 3b59216e6284..03be99919d62 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -2,6 +2,7 @@
2#define __MMU_H 2#define __MMU_H
3 3
4typedef struct { 4typedef struct {
5 spinlock_t list_lock;
5 struct list_head crst_list; 6 struct list_head crst_list;
6 struct list_head pgtable_list; 7 struct list_head pgtable_list;
7 unsigned long asce_bits; 8 unsigned long asce_bits;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 3e3594d01f83..5e9daf5d7f22 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr)
125 return skey; 125 return skey;
126} 126}
127 127
128#ifdef CONFIG_PAGE_STATES
129
130struct page; 128struct page;
131void arch_free_page(struct page *page, int order); 129void arch_free_page(struct page *page, int order);
132void arch_alloc_page(struct page *page, int order); 130void arch_alloc_page(struct page *page, int order);
@@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order);
134#define HAVE_ARCH_FREE_PAGE 132#define HAVE_ARCH_FREE_PAGE
135#define HAVE_ARCH_ALLOC_PAGE 133#define HAVE_ARCH_ALLOC_PAGE
136 134
137#endif
138
139#endif /* !__ASSEMBLY__ */ 135#endif /* !__ASSEMBLY__ */
140 136
141#define __PAGE_OFFSET 0x0UL 137#define __PAGE_OFFSET 0x0UL
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index b2658b9220fe..ddad5903341c 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
140 140
141static inline pgd_t *pgd_alloc(struct mm_struct *mm) 141static inline pgd_t *pgd_alloc(struct mm_struct *mm)
142{ 142{
143 spin_lock_init(&mm->context.list_lock);
143 INIT_LIST_HEAD(&mm->context.crst_list); 144 INIT_LIST_HEAD(&mm->context.crst_list);
144 INIT_LIST_HEAD(&mm->context.pgtable_list); 145 INIT_LIST_HEAD(&mm->context.pgtable_list);
145 return (pgd_t *) crst_table_alloc(mm, s390_noexec); 146 return (pgd_t *) crst_table_alloc(mm, s390_noexec);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c139fa7b8e89..cf8eed3fa779 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,7 +14,7 @@
14#define __ASM_S390_PROCESSOR_H 14#define __ASM_S390_PROCESSOR_H
15 15
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <asm/cpuid.h> 17#include <asm/cpu.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
20#include <asm/setup.h> 20#include <asm/setup.h>
@@ -26,7 +26,7 @@
26 */ 26 */
27#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) 27#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
28 28
29static inline void get_cpu_id(cpuid_t *ptr) 29static inline void get_cpu_id(struct cpuid *ptr)
30{ 30{
31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); 31 asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr));
32} 32}
diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h
index 29ec8e28c8df..35d786fe93ae 100644
--- a/arch/s390/include/asm/scatterlist.h
+++ b/arch/s390/include/asm/scatterlist.h
@@ -1,19 +1 @@
1#ifndef _ASMS390_SCATTERLIST_H #include <asm-generic/scatterlist.h>
2#define _ASMS390_SCATTERLIST_H
3
4struct scatterlist {
5#ifdef CONFIG_DEBUG_SG
6 unsigned long sg_magic;
7#endif
8 unsigned long page_link;
9 unsigned int offset;
10 unsigned int length;
11};
12
13#ifdef __s390x__
14#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL)
15#else
16#define ISA_DMA_THRESHOLD (0xffffffffUL)
17#endif
18
19#endif /* _ASMS390X_SCATTERLIST_H */
diff --git a/drivers/s390/cio/scsw.c b/arch/s390/include/asm/scsw.h
index f8da25ab576d..de389cb54d28 100644
--- a/drivers/s390/cio/scsw.c
+++ b/arch/s390/include/asm/scsw.h
@@ -1,15 +1,182 @@
1/* 1/*
2 * Helper functions for scsw access. 2 * Helper functions for scsw access.
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
8#ifndef _ASM_S390_SCSW_H_
9#define _ASM_S390_SCSW_H_
10
8#include <linux/types.h> 11#include <linux/types.h>
9#include <linux/module.h> 12#include <asm/chsc.h>
10#include <asm/cio.h> 13#include <asm/cio.h>
11#include "css.h" 14
12#include "chsc.h" 15/**
16 * struct cmd_scsw - command-mode subchannel status word
17 * @key: subchannel key
18 * @sctl: suspend control
19 * @eswf: esw format
20 * @cc: deferred condition code
21 * @fmt: format
22 * @pfch: prefetch
23 * @isic: initial-status interruption control
24 * @alcc: address-limit checking control
25 * @ssi: suppress-suspended interruption
26 * @zcc: zero condition code
27 * @ectl: extended control
28 * @pno: path not operational
29 * @res: reserved
30 * @fctl: function control
31 * @actl: activity control
32 * @stctl: status control
33 * @cpa: channel program address
34 * @dstat: device status
35 * @cstat: subchannel status
36 * @count: residual count
37 */
38struct cmd_scsw {
39 __u32 key : 4;
40 __u32 sctl : 1;
41 __u32 eswf : 1;
42 __u32 cc : 2;
43 __u32 fmt : 1;
44 __u32 pfch : 1;
45 __u32 isic : 1;
46 __u32 alcc : 1;
47 __u32 ssi : 1;
48 __u32 zcc : 1;
49 __u32 ectl : 1;
50 __u32 pno : 1;
51 __u32 res : 1;
52 __u32 fctl : 3;
53 __u32 actl : 7;
54 __u32 stctl : 5;
55 __u32 cpa;
56 __u32 dstat : 8;
57 __u32 cstat : 8;
58 __u32 count : 16;
59} __attribute__ ((packed));
60
61/**
62 * struct tm_scsw - transport-mode subchannel status word
63 * @key: subchannel key
64 * @eswf: esw format
65 * @cc: deferred condition code
66 * @fmt: format
67 * @x: IRB-format control
68 * @q: interrogate-complete
69 * @ectl: extended control
70 * @pno: path not operational
71 * @fctl: function control
72 * @actl: activity control
73 * @stctl: status control
74 * @tcw: TCW address
75 * @dstat: device status
76 * @cstat: subchannel status
77 * @fcxs: FCX status
78 * @schxs: subchannel-extended status
79 */
80struct tm_scsw {
81 u32 key:4;
82 u32 :1;
83 u32 eswf:1;
84 u32 cc:2;
85 u32 fmt:3;
86 u32 x:1;
87 u32 q:1;
88 u32 :1;
89 u32 ectl:1;
90 u32 pno:1;
91 u32 :1;
92 u32 fctl:3;
93 u32 actl:7;
94 u32 stctl:5;
95 u32 tcw;
96 u32 dstat:8;
97 u32 cstat:8;
98 u32 fcxs:8;
99 u32 schxs:8;
100} __attribute__ ((packed));
101
102/**
103 * union scsw - subchannel status word
104 * @cmd: command-mode SCSW
105 * @tm: transport-mode SCSW
106 */
107union scsw {
108 struct cmd_scsw cmd;
109 struct tm_scsw tm;
110} __attribute__ ((packed));
111
112#define SCSW_FCTL_CLEAR_FUNC 0x1
113#define SCSW_FCTL_HALT_FUNC 0x2
114#define SCSW_FCTL_START_FUNC 0x4
115
116#define SCSW_ACTL_SUSPENDED 0x1
117#define SCSW_ACTL_DEVACT 0x2
118#define SCSW_ACTL_SCHACT 0x4
119#define SCSW_ACTL_CLEAR_PEND 0x8
120#define SCSW_ACTL_HALT_PEND 0x10
121#define SCSW_ACTL_START_PEND 0x20
122#define SCSW_ACTL_RESUME_PEND 0x40
123
124#define SCSW_STCTL_STATUS_PEND 0x1
125#define SCSW_STCTL_SEC_STATUS 0x2
126#define SCSW_STCTL_PRIM_STATUS 0x4
127#define SCSW_STCTL_INTER_STATUS 0x8
128#define SCSW_STCTL_ALERT_STATUS 0x10
129
130#define DEV_STAT_ATTENTION 0x80
131#define DEV_STAT_STAT_MOD 0x40
132#define DEV_STAT_CU_END 0x20
133#define DEV_STAT_BUSY 0x10
134#define DEV_STAT_CHN_END 0x08
135#define DEV_STAT_DEV_END 0x04
136#define DEV_STAT_UNIT_CHECK 0x02
137#define DEV_STAT_UNIT_EXCEP 0x01
138
139#define SCHN_STAT_PCI 0x80
140#define SCHN_STAT_INCORR_LEN 0x40
141#define SCHN_STAT_PROG_CHECK 0x20
142#define SCHN_STAT_PROT_CHECK 0x10
143#define SCHN_STAT_CHN_DATA_CHK 0x08
144#define SCHN_STAT_CHN_CTRL_CHK 0x04
145#define SCHN_STAT_INTF_CTRL_CHK 0x02
146#define SCHN_STAT_CHAIN_CHECK 0x01
147
148/*
149 * architectured values for first sense byte
150 */
151#define SNS0_CMD_REJECT 0x80
152#define SNS_CMD_REJECT SNS0_CMD_REJEC
153#define SNS0_INTERVENTION_REQ 0x40
154#define SNS0_BUS_OUT_CHECK 0x20
155#define SNS0_EQUIPMENT_CHECK 0x10
156#define SNS0_DATA_CHECK 0x08
157#define SNS0_OVERRUN 0x04
158#define SNS0_INCOMPL_DOMAIN 0x01
159
160/*
161 * architectured values for second sense byte
162 */
163#define SNS1_PERM_ERR 0x80
164#define SNS1_INV_TRACK_FORMAT 0x40
165#define SNS1_EOC 0x20
166#define SNS1_MESSAGE_TO_OPER 0x10
167#define SNS1_NO_REC_FOUND 0x08
168#define SNS1_FILE_PROTECTED 0x04
169#define SNS1_WRITE_INHIBITED 0x02
170#define SNS1_INPRECISE_END 0x01
171
172/*
173 * architectured values for third sense byte
174 */
175#define SNS2_REQ_INH_WRITE 0x80
176#define SNS2_CORRECTABLE 0x40
177#define SNS2_FIRST_LOG_ERR 0x20
178#define SNS2_ENV_DATA_PRESENT 0x10
179#define SNS2_INPRECISE_END 0x04
13 180
14/** 181/**
15 * scsw_is_tm - check for transport mode scsw 182 * scsw_is_tm - check for transport mode scsw
@@ -18,11 +185,10 @@
18 * Return non-zero if the specified scsw is a transport mode scsw, zero 185 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise. 186 * otherwise.
20 */ 187 */
21int scsw_is_tm(union scsw *scsw) 188static inline int scsw_is_tm(union scsw *scsw)
22{ 189{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1); 190 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24} 191}
25EXPORT_SYMBOL(scsw_is_tm);
26 192
27/** 193/**
28 * scsw_key - return scsw key field 194 * scsw_key - return scsw key field
@@ -31,14 +197,13 @@ EXPORT_SYMBOL(scsw_is_tm);
31 * Return the value of the key field of the specified scsw, regardless of 197 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw. 198 * whether it is a transport mode or command mode scsw.
33 */ 199 */
34u32 scsw_key(union scsw *scsw) 200static inline u32 scsw_key(union scsw *scsw)
35{ 201{
36 if (scsw_is_tm(scsw)) 202 if (scsw_is_tm(scsw))
37 return scsw->tm.key; 203 return scsw->tm.key;
38 else 204 else
39 return scsw->cmd.key; 205 return scsw->cmd.key;
40} 206}
41EXPORT_SYMBOL(scsw_key);
42 207
43/** 208/**
44 * scsw_eswf - return scsw eswf field 209 * scsw_eswf - return scsw eswf field
@@ -47,14 +212,13 @@ EXPORT_SYMBOL(scsw_key);
47 * Return the value of the eswf field of the specified scsw, regardless of 212 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw. 213 * whether it is a transport mode or command mode scsw.
49 */ 214 */
50u32 scsw_eswf(union scsw *scsw) 215static inline u32 scsw_eswf(union scsw *scsw)
51{ 216{
52 if (scsw_is_tm(scsw)) 217 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf; 218 return scsw->tm.eswf;
54 else 219 else
55 return scsw->cmd.eswf; 220 return scsw->cmd.eswf;
56} 221}
57EXPORT_SYMBOL(scsw_eswf);
58 222
59/** 223/**
60 * scsw_cc - return scsw cc field 224 * scsw_cc - return scsw cc field
@@ -63,14 +227,13 @@ EXPORT_SYMBOL(scsw_eswf);
63 * Return the value of the cc field of the specified scsw, regardless of 227 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw. 228 * whether it is a transport mode or command mode scsw.
65 */ 229 */
66u32 scsw_cc(union scsw *scsw) 230static inline u32 scsw_cc(union scsw *scsw)
67{ 231{
68 if (scsw_is_tm(scsw)) 232 if (scsw_is_tm(scsw))
69 return scsw->tm.cc; 233 return scsw->tm.cc;
70 else 234 else
71 return scsw->cmd.cc; 235 return scsw->cmd.cc;
72} 236}
73EXPORT_SYMBOL(scsw_cc);
74 237
75/** 238/**
76 * scsw_ectl - return scsw ectl field 239 * scsw_ectl - return scsw ectl field
@@ -79,14 +242,13 @@ EXPORT_SYMBOL(scsw_cc);
79 * Return the value of the ectl field of the specified scsw, regardless of 242 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw. 243 * whether it is a transport mode or command mode scsw.
81 */ 244 */
82u32 scsw_ectl(union scsw *scsw) 245static inline u32 scsw_ectl(union scsw *scsw)
83{ 246{
84 if (scsw_is_tm(scsw)) 247 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl; 248 return scsw->tm.ectl;
86 else 249 else
87 return scsw->cmd.ectl; 250 return scsw->cmd.ectl;
88} 251}
89EXPORT_SYMBOL(scsw_ectl);
90 252
91/** 253/**
92 * scsw_pno - return scsw pno field 254 * scsw_pno - return scsw pno field
@@ -95,14 +257,13 @@ EXPORT_SYMBOL(scsw_ectl);
95 * Return the value of the pno field of the specified scsw, regardless of 257 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw. 258 * whether it is a transport mode or command mode scsw.
97 */ 259 */
98u32 scsw_pno(union scsw *scsw) 260static inline u32 scsw_pno(union scsw *scsw)
99{ 261{
100 if (scsw_is_tm(scsw)) 262 if (scsw_is_tm(scsw))
101 return scsw->tm.pno; 263 return scsw->tm.pno;
102 else 264 else
103 return scsw->cmd.pno; 265 return scsw->cmd.pno;
104} 266}
105EXPORT_SYMBOL(scsw_pno);
106 267
107/** 268/**
108 * scsw_fctl - return scsw fctl field 269 * scsw_fctl - return scsw fctl field
@@ -111,14 +272,13 @@ EXPORT_SYMBOL(scsw_pno);
111 * Return the value of the fctl field of the specified scsw, regardless of 272 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw. 273 * whether it is a transport mode or command mode scsw.
113 */ 274 */
114u32 scsw_fctl(union scsw *scsw) 275static inline u32 scsw_fctl(union scsw *scsw)
115{ 276{
116 if (scsw_is_tm(scsw)) 277 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl; 278 return scsw->tm.fctl;
118 else 279 else
119 return scsw->cmd.fctl; 280 return scsw->cmd.fctl;
120} 281}
121EXPORT_SYMBOL(scsw_fctl);
122 282
123/** 283/**
124 * scsw_actl - return scsw actl field 284 * scsw_actl - return scsw actl field
@@ -127,14 +287,13 @@ EXPORT_SYMBOL(scsw_fctl);
127 * Return the value of the actl field of the specified scsw, regardless of 287 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw. 288 * whether it is a transport mode or command mode scsw.
129 */ 289 */
130u32 scsw_actl(union scsw *scsw) 290static inline u32 scsw_actl(union scsw *scsw)
131{ 291{
132 if (scsw_is_tm(scsw)) 292 if (scsw_is_tm(scsw))
133 return scsw->tm.actl; 293 return scsw->tm.actl;
134 else 294 else
135 return scsw->cmd.actl; 295 return scsw->cmd.actl;
136} 296}
137EXPORT_SYMBOL(scsw_actl);
138 297
139/** 298/**
140 * scsw_stctl - return scsw stctl field 299 * scsw_stctl - return scsw stctl field
@@ -143,14 +302,13 @@ EXPORT_SYMBOL(scsw_actl);
143 * Return the value of the stctl field of the specified scsw, regardless of 302 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw. 303 * whether it is a transport mode or command mode scsw.
145 */ 304 */
146u32 scsw_stctl(union scsw *scsw) 305static inline u32 scsw_stctl(union scsw *scsw)
147{ 306{
148 if (scsw_is_tm(scsw)) 307 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl; 308 return scsw->tm.stctl;
150 else 309 else
151 return scsw->cmd.stctl; 310 return scsw->cmd.stctl;
152} 311}
153EXPORT_SYMBOL(scsw_stctl);
154 312
155/** 313/**
156 * scsw_dstat - return scsw dstat field 314 * scsw_dstat - return scsw dstat field
@@ -159,14 +317,13 @@ EXPORT_SYMBOL(scsw_stctl);
159 * Return the value of the dstat field of the specified scsw, regardless of 317 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw. 318 * whether it is a transport mode or command mode scsw.
161 */ 319 */
162u32 scsw_dstat(union scsw *scsw) 320static inline u32 scsw_dstat(union scsw *scsw)
163{ 321{
164 if (scsw_is_tm(scsw)) 322 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat; 323 return scsw->tm.dstat;
166 else 324 else
167 return scsw->cmd.dstat; 325 return scsw->cmd.dstat;
168} 326}
169EXPORT_SYMBOL(scsw_dstat);
170 327
171/** 328/**
172 * scsw_cstat - return scsw cstat field 329 * scsw_cstat - return scsw cstat field
@@ -175,14 +332,13 @@ EXPORT_SYMBOL(scsw_dstat);
175 * Return the value of the cstat field of the specified scsw, regardless of 332 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw. 333 * whether it is a transport mode or command mode scsw.
177 */ 334 */
178u32 scsw_cstat(union scsw *scsw) 335static inline u32 scsw_cstat(union scsw *scsw)
179{ 336{
180 if (scsw_is_tm(scsw)) 337 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat; 338 return scsw->tm.cstat;
182 else 339 else
183 return scsw->cmd.cstat; 340 return scsw->cmd.cstat;
184} 341}
185EXPORT_SYMBOL(scsw_cstat);
186 342
187/** 343/**
188 * scsw_cmd_is_valid_key - check key field validity 344 * scsw_cmd_is_valid_key - check key field validity
@@ -191,11 +347,10 @@ EXPORT_SYMBOL(scsw_cstat);
191 * Return non-zero if the key field of the specified command mode scsw is 347 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise. 348 * valid, zero otherwise.
193 */ 349 */
194int scsw_cmd_is_valid_key(union scsw *scsw) 350static inline int scsw_cmd_is_valid_key(union scsw *scsw)
195{ 351{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 352 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197} 353}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199 354
200/** 355/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity 356 * scsw_cmd_is_valid_sctl - check fctl field validity
@@ -204,11 +359,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_key);
204 * Return non-zero if the fctl field of the specified command mode scsw is 359 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise. 360 * valid, zero otherwise.
206 */ 361 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw) 362static inline int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{ 363{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 364 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210} 365}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212 366
213/** 367/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity 368 * scsw_cmd_is_valid_eswf - check eswf field validity
@@ -217,11 +371,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
217 * Return non-zero if the eswf field of the specified command mode scsw is 371 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise. 372 * valid, zero otherwise.
219 */ 373 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw) 374static inline int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{ 375{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); 376 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223} 377}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225 378
226/** 379/**
227 * scsw_cmd_is_valid_cc - check cc field validity 380 * scsw_cmd_is_valid_cc - check cc field validity
@@ -230,12 +383,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
230 * Return non-zero if the cc field of the specified command mode scsw is 383 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise. 384 * valid, zero otherwise.
232 */ 385 */
233int scsw_cmd_is_valid_cc(union scsw *scsw) 386static inline int scsw_cmd_is_valid_cc(union scsw *scsw)
234{ 387{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && 388 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); 389 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237} 390}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239 391
240/** 392/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity 393 * scsw_cmd_is_valid_fmt - check fmt field validity
@@ -244,11 +396,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
244 * Return non-zero if the fmt field of the specified command mode scsw is 396 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise. 397 * valid, zero otherwise.
246 */ 398 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw) 399static inline int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{ 400{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 401 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250} 402}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252 403
253/** 404/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity 405 * scsw_cmd_is_valid_pfch - check pfch field validity
@@ -257,11 +408,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
257 * Return non-zero if the pfch field of the specified command mode scsw is 408 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise. 409 * valid, zero otherwise.
259 */ 410 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw) 411static inline int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{ 412{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 413 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263} 414}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265 415
266/** 416/**
267 * scsw_cmd_is_valid_isic - check isic field validity 417 * scsw_cmd_is_valid_isic - check isic field validity
@@ -270,11 +420,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
270 * Return non-zero if the isic field of the specified command mode scsw is 420 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise. 421 * valid, zero otherwise.
272 */ 422 */
273int scsw_cmd_is_valid_isic(union scsw *scsw) 423static inline int scsw_cmd_is_valid_isic(union scsw *scsw)
274{ 424{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 425 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276} 426}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278 427
279/** 428/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity 429 * scsw_cmd_is_valid_alcc - check alcc field validity
@@ -283,11 +432,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
283 * Return non-zero if the alcc field of the specified command mode scsw is 432 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise. 433 * valid, zero otherwise.
285 */ 434 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw) 435static inline int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{ 436{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 437 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289} 438}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291 439
292/** 440/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity 441 * scsw_cmd_is_valid_ssi - check ssi field validity
@@ -296,11 +444,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
296 * Return non-zero if the ssi field of the specified command mode scsw is 444 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise. 445 * valid, zero otherwise.
298 */ 446 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw) 447static inline int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{ 448{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); 449 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302} 450}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304 451
305/** 452/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity 453 * scsw_cmd_is_valid_zcc - check zcc field validity
@@ -309,12 +456,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
309 * Return non-zero if the zcc field of the specified command mode scsw is 456 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise. 457 * valid, zero otherwise.
311 */ 458 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw) 459static inline int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{ 460{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && 461 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); 462 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316} 463}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318 464
319/** 465/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity 466 * scsw_cmd_is_valid_ectl - check ectl field validity
@@ -323,13 +469,12 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
323 * Return non-zero if the ectl field of the specified command mode scsw is 469 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise. 470 * valid, zero otherwise.
325 */ 471 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw) 472static inline int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{ 473{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 474 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && 475 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); 476 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331} 477}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333 478
334/** 479/**
335 * scsw_cmd_is_valid_pno - check pno field validity 480 * scsw_cmd_is_valid_pno - check pno field validity
@@ -338,7 +483,7 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
338 * Return non-zero if the pno field of the specified command mode scsw is 483 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise. 484 * valid, zero otherwise.
340 */ 485 */
341int scsw_cmd_is_valid_pno(union scsw *scsw) 486static inline int scsw_cmd_is_valid_pno(union scsw *scsw)
342{ 487{
343 return (scsw->cmd.fctl != 0) && 488 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 489 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -346,7 +491,6 @@ int scsw_cmd_is_valid_pno(union scsw *scsw)
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && 491 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); 492 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348} 493}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350 494
351/** 495/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity 496 * scsw_cmd_is_valid_fctl - check fctl field validity
@@ -355,12 +499,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
355 * Return non-zero if the fctl field of the specified command mode scsw is 499 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise. 500 * valid, zero otherwise.
357 */ 501 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw) 502static inline int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{ 503{
360 /* Only valid if pmcw.dnv == 1*/ 504 /* Only valid if pmcw.dnv == 1*/
361 return 1; 505 return 1;
362} 506}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364 507
365/** 508/**
366 * scsw_cmd_is_valid_actl - check actl field validity 509 * scsw_cmd_is_valid_actl - check actl field validity
@@ -369,12 +512,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
369 * Return non-zero if the actl field of the specified command mode scsw is 512 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise. 513 * valid, zero otherwise.
371 */ 514 */
372int scsw_cmd_is_valid_actl(union scsw *scsw) 515static inline int scsw_cmd_is_valid_actl(union scsw *scsw)
373{ 516{
374 /* Only valid if pmcw.dnv == 1*/ 517 /* Only valid if pmcw.dnv == 1*/
375 return 1; 518 return 1;
376} 519}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378 520
379/** 521/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity 522 * scsw_cmd_is_valid_stctl - check stctl field validity
@@ -383,12 +525,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
383 * Return non-zero if the stctl field of the specified command mode scsw is 525 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise. 526 * valid, zero otherwise.
385 */ 527 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw) 528static inline int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{ 529{
388 /* Only valid if pmcw.dnv == 1*/ 530 /* Only valid if pmcw.dnv == 1*/
389 return 1; 531 return 1;
390} 532}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392 533
393/** 534/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity 535 * scsw_cmd_is_valid_dstat - check dstat field validity
@@ -397,12 +538,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
397 * Return non-zero if the dstat field of the specified command mode scsw is 538 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise. 539 * valid, zero otherwise.
399 */ 540 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw) 541static inline int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{ 542{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 543 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3); 544 (scsw->cmd.cc != 3);
404} 545}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406 546
407/** 547/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity 548 * scsw_cmd_is_valid_cstat - check cstat field validity
@@ -411,12 +551,11 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
411 * Return non-zero if the cstat field of the specified command mode scsw is 551 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise. 552 * valid, zero otherwise.
413 */ 553 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw) 554static inline int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{ 555{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && 556 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3); 557 (scsw->cmd.cc != 3);
418} 558}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420 559
421/** 560/**
422 * scsw_tm_is_valid_key - check key field validity 561 * scsw_tm_is_valid_key - check key field validity
@@ -425,11 +564,10 @@ EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
425 * Return non-zero if the key field of the specified transport mode scsw is 564 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise. 565 * valid, zero otherwise.
427 */ 566 */
428int scsw_tm_is_valid_key(union scsw *scsw) 567static inline int scsw_tm_is_valid_key(union scsw *scsw)
429{ 568{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); 569 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431} 570}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433 571
434/** 572/**
435 * scsw_tm_is_valid_eswf - check eswf field validity 573 * scsw_tm_is_valid_eswf - check eswf field validity
@@ -438,11 +576,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_key);
438 * Return non-zero if the eswf field of the specified transport mode scsw is 576 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise. 577 * valid, zero otherwise.
440 */ 578 */
441int scsw_tm_is_valid_eswf(union scsw *scsw) 579static inline int scsw_tm_is_valid_eswf(union scsw *scsw)
442{ 580{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); 581 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444} 582}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446 583
447/** 584/**
448 * scsw_tm_is_valid_cc - check cc field validity 585 * scsw_tm_is_valid_cc - check cc field validity
@@ -451,12 +588,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
451 * Return non-zero if the cc field of the specified transport mode scsw is 588 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise. 589 * valid, zero otherwise.
453 */ 590 */
454int scsw_tm_is_valid_cc(union scsw *scsw) 591static inline int scsw_tm_is_valid_cc(union scsw *scsw)
455{ 592{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && 593 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); 594 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458} 595}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460 596
461/** 597/**
462 * scsw_tm_is_valid_fmt - check fmt field validity 598 * scsw_tm_is_valid_fmt - check fmt field validity
@@ -465,11 +601,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cc);
465 * Return non-zero if the fmt field of the specified transport mode scsw is 601 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise. 602 * valid, zero otherwise.
467 */ 603 */
468int scsw_tm_is_valid_fmt(union scsw *scsw) 604static inline int scsw_tm_is_valid_fmt(union scsw *scsw)
469{ 605{
470 return 1; 606 return 1;
471} 607}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473 608
474/** 609/**
475 * scsw_tm_is_valid_x - check x field validity 610 * scsw_tm_is_valid_x - check x field validity
@@ -478,11 +613,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
478 * Return non-zero if the x field of the specified transport mode scsw is 613 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise. 614 * valid, zero otherwise.
480 */ 615 */
481int scsw_tm_is_valid_x(union scsw *scsw) 616static inline int scsw_tm_is_valid_x(union scsw *scsw)
482{ 617{
483 return 1; 618 return 1;
484} 619}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486 620
487/** 621/**
488 * scsw_tm_is_valid_q - check q field validity 622 * scsw_tm_is_valid_q - check q field validity
@@ -491,11 +625,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_x);
491 * Return non-zero if the q field of the specified transport mode scsw is 625 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise. 626 * valid, zero otherwise.
493 */ 627 */
494int scsw_tm_is_valid_q(union scsw *scsw) 628static inline int scsw_tm_is_valid_q(union scsw *scsw)
495{ 629{
496 return 1; 630 return 1;
497} 631}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499 632
500/** 633/**
501 * scsw_tm_is_valid_ectl - check ectl field validity 634 * scsw_tm_is_valid_ectl - check ectl field validity
@@ -504,13 +637,12 @@ EXPORT_SYMBOL(scsw_tm_is_valid_q);
504 * Return non-zero if the ectl field of the specified transport mode scsw is 637 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise. 638 * valid, zero otherwise.
506 */ 639 */
507int scsw_tm_is_valid_ectl(union scsw *scsw) 640static inline int scsw_tm_is_valid_ectl(union scsw *scsw)
508{ 641{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 642 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && 643 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); 644 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512} 645}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514 646
515/** 647/**
516 * scsw_tm_is_valid_pno - check pno field validity 648 * scsw_tm_is_valid_pno - check pno field validity
@@ -519,7 +651,7 @@ EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
519 * Return non-zero if the pno field of the specified transport mode scsw is 651 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise. 652 * valid, zero otherwise.
521 */ 653 */
522int scsw_tm_is_valid_pno(union scsw *scsw) 654static inline int scsw_tm_is_valid_pno(union scsw *scsw)
523{ 655{
524 return (scsw->tm.fctl != 0) && 656 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 657 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
@@ -527,7 +659,6 @@ int scsw_tm_is_valid_pno(union scsw *scsw)
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && 659 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); 660 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529} 661}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531 662
532/** 663/**
533 * scsw_tm_is_valid_fctl - check fctl field validity 664 * scsw_tm_is_valid_fctl - check fctl field validity
@@ -536,12 +667,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_pno);
536 * Return non-zero if the fctl field of the specified transport mode scsw is 667 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise. 668 * valid, zero otherwise.
538 */ 669 */
539int scsw_tm_is_valid_fctl(union scsw *scsw) 670static inline int scsw_tm_is_valid_fctl(union scsw *scsw)
540{ 671{
541 /* Only valid if pmcw.dnv == 1*/ 672 /* Only valid if pmcw.dnv == 1*/
542 return 1; 673 return 1;
543} 674}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545 675
546/** 676/**
547 * scsw_tm_is_valid_actl - check actl field validity 677 * scsw_tm_is_valid_actl - check actl field validity
@@ -550,12 +680,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
550 * Return non-zero if the actl field of the specified transport mode scsw is 680 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise. 681 * valid, zero otherwise.
552 */ 682 */
553int scsw_tm_is_valid_actl(union scsw *scsw) 683static inline int scsw_tm_is_valid_actl(union scsw *scsw)
554{ 684{
555 /* Only valid if pmcw.dnv == 1*/ 685 /* Only valid if pmcw.dnv == 1*/
556 return 1; 686 return 1;
557} 687}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559 688
560/** 689/**
561 * scsw_tm_is_valid_stctl - check stctl field validity 690 * scsw_tm_is_valid_stctl - check stctl field validity
@@ -564,12 +693,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_actl);
564 * Return non-zero if the stctl field of the specified transport mode scsw is 693 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise. 694 * valid, zero otherwise.
566 */ 695 */
567int scsw_tm_is_valid_stctl(union scsw *scsw) 696static inline int scsw_tm_is_valid_stctl(union scsw *scsw)
568{ 697{
569 /* Only valid if pmcw.dnv == 1*/ 698 /* Only valid if pmcw.dnv == 1*/
570 return 1; 699 return 1;
571} 700}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573 701
574/** 702/**
575 * scsw_tm_is_valid_dstat - check dstat field validity 703 * scsw_tm_is_valid_dstat - check dstat field validity
@@ -578,12 +706,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
578 * Return non-zero if the dstat field of the specified transport mode scsw is 706 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise. 707 * valid, zero otherwise.
580 */ 708 */
581int scsw_tm_is_valid_dstat(union scsw *scsw) 709static inline int scsw_tm_is_valid_dstat(union scsw *scsw)
582{ 710{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 711 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3); 712 (scsw->tm.cc != 3);
585} 713}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587 714
588/** 715/**
589 * scsw_tm_is_valid_cstat - check cstat field validity 716 * scsw_tm_is_valid_cstat - check cstat field validity
@@ -592,12 +719,11 @@ EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
592 * Return non-zero if the cstat field of the specified transport mode scsw is 719 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise. 720 * valid, zero otherwise.
594 */ 721 */
595int scsw_tm_is_valid_cstat(union scsw *scsw) 722static inline int scsw_tm_is_valid_cstat(union scsw *scsw)
596{ 723{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && 724 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3); 725 (scsw->tm.cc != 3);
599} 726}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601 727
602/** 728/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity 729 * scsw_tm_is_valid_fcxs - check fcxs field validity
@@ -606,11 +732,10 @@ EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
606 * Return non-zero if the fcxs field of the specified transport mode scsw is 732 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise. 733 * valid, zero otherwise.
608 */ 734 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw) 735static inline int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{ 736{
611 return 1; 737 return 1;
612} 738}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614 739
615/** 740/**
616 * scsw_tm_is_valid_schxs - check schxs field validity 741 * scsw_tm_is_valid_schxs - check schxs field validity
@@ -619,14 +744,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
619 * Return non-zero if the schxs field of the specified transport mode scsw is 744 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise. 745 * valid, zero otherwise.
621 */ 746 */
622int scsw_tm_is_valid_schxs(union scsw *scsw) 747static inline int scsw_tm_is_valid_schxs(union scsw *scsw)
623{ 748{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | 749 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK | 750 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK | 751 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK)); 752 SCHN_STAT_CHN_DATA_CHK));
628} 753}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630 754
631/** 755/**
632 * scsw_is_valid_actl - check actl field validity 756 * scsw_is_valid_actl - check actl field validity
@@ -636,14 +760,13 @@ EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
636 * regardless of whether it is a transport mode or command mode scsw. 760 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value. 761 * Return zero if the field does not contain a valid value.
638 */ 762 */
639int scsw_is_valid_actl(union scsw *scsw) 763static inline int scsw_is_valid_actl(union scsw *scsw)
640{ 764{
641 if (scsw_is_tm(scsw)) 765 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw); 766 return scsw_tm_is_valid_actl(scsw);
643 else 767 else
644 return scsw_cmd_is_valid_actl(scsw); 768 return scsw_cmd_is_valid_actl(scsw);
645} 769}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647 770
648/** 771/**
649 * scsw_is_valid_cc - check cc field validity 772 * scsw_is_valid_cc - check cc field validity
@@ -653,14 +776,13 @@ EXPORT_SYMBOL(scsw_is_valid_actl);
653 * regardless of whether it is a transport mode or command mode scsw. 776 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value. 777 * Return zero if the field does not contain a valid value.
655 */ 778 */
656int scsw_is_valid_cc(union scsw *scsw) 779static inline int scsw_is_valid_cc(union scsw *scsw)
657{ 780{
658 if (scsw_is_tm(scsw)) 781 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw); 782 return scsw_tm_is_valid_cc(scsw);
660 else 783 else
661 return scsw_cmd_is_valid_cc(scsw); 784 return scsw_cmd_is_valid_cc(scsw);
662} 785}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664 786
665/** 787/**
666 * scsw_is_valid_cstat - check cstat field validity 788 * scsw_is_valid_cstat - check cstat field validity
@@ -670,14 +792,13 @@ EXPORT_SYMBOL(scsw_is_valid_cc);
670 * regardless of whether it is a transport mode or command mode scsw. 792 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value. 793 * Return zero if the field does not contain a valid value.
672 */ 794 */
673int scsw_is_valid_cstat(union scsw *scsw) 795static inline int scsw_is_valid_cstat(union scsw *scsw)
674{ 796{
675 if (scsw_is_tm(scsw)) 797 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw); 798 return scsw_tm_is_valid_cstat(scsw);
677 else 799 else
678 return scsw_cmd_is_valid_cstat(scsw); 800 return scsw_cmd_is_valid_cstat(scsw);
679} 801}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681 802
682/** 803/**
683 * scsw_is_valid_dstat - check dstat field validity 804 * scsw_is_valid_dstat - check dstat field validity
@@ -687,14 +808,13 @@ EXPORT_SYMBOL(scsw_is_valid_cstat);
687 * regardless of whether it is a transport mode or command mode scsw. 808 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value. 809 * Return zero if the field does not contain a valid value.
689 */ 810 */
690int scsw_is_valid_dstat(union scsw *scsw) 811static inline int scsw_is_valid_dstat(union scsw *scsw)
691{ 812{
692 if (scsw_is_tm(scsw)) 813 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw); 814 return scsw_tm_is_valid_dstat(scsw);
694 else 815 else
695 return scsw_cmd_is_valid_dstat(scsw); 816 return scsw_cmd_is_valid_dstat(scsw);
696} 817}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698 818
699/** 819/**
700 * scsw_is_valid_ectl - check ectl field validity 820 * scsw_is_valid_ectl - check ectl field validity
@@ -704,14 +824,13 @@ EXPORT_SYMBOL(scsw_is_valid_dstat);
704 * regardless of whether it is a transport mode or command mode scsw. 824 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value. 825 * Return zero if the field does not contain a valid value.
706 */ 826 */
707int scsw_is_valid_ectl(union scsw *scsw) 827static inline int scsw_is_valid_ectl(union scsw *scsw)
708{ 828{
709 if (scsw_is_tm(scsw)) 829 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw); 830 return scsw_tm_is_valid_ectl(scsw);
711 else 831 else
712 return scsw_cmd_is_valid_ectl(scsw); 832 return scsw_cmd_is_valid_ectl(scsw);
713} 833}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715 834
716/** 835/**
717 * scsw_is_valid_eswf - check eswf field validity 836 * scsw_is_valid_eswf - check eswf field validity
@@ -721,14 +840,13 @@ EXPORT_SYMBOL(scsw_is_valid_ectl);
721 * regardless of whether it is a transport mode or command mode scsw. 840 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value. 841 * Return zero if the field does not contain a valid value.
723 */ 842 */
724int scsw_is_valid_eswf(union scsw *scsw) 843static inline int scsw_is_valid_eswf(union scsw *scsw)
725{ 844{
726 if (scsw_is_tm(scsw)) 845 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw); 846 return scsw_tm_is_valid_eswf(scsw);
728 else 847 else
729 return scsw_cmd_is_valid_eswf(scsw); 848 return scsw_cmd_is_valid_eswf(scsw);
730} 849}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732 850
733/** 851/**
734 * scsw_is_valid_fctl - check fctl field validity 852 * scsw_is_valid_fctl - check fctl field validity
@@ -738,14 +856,13 @@ EXPORT_SYMBOL(scsw_is_valid_eswf);
738 * regardless of whether it is a transport mode or command mode scsw. 856 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value. 857 * Return zero if the field does not contain a valid value.
740 */ 858 */
741int scsw_is_valid_fctl(union scsw *scsw) 859static inline int scsw_is_valid_fctl(union scsw *scsw)
742{ 860{
743 if (scsw_is_tm(scsw)) 861 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw); 862 return scsw_tm_is_valid_fctl(scsw);
745 else 863 else
746 return scsw_cmd_is_valid_fctl(scsw); 864 return scsw_cmd_is_valid_fctl(scsw);
747} 865}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749 866
750/** 867/**
751 * scsw_is_valid_key - check key field validity 868 * scsw_is_valid_key - check key field validity
@@ -755,14 +872,13 @@ EXPORT_SYMBOL(scsw_is_valid_fctl);
755 * regardless of whether it is a transport mode or command mode scsw. 872 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value. 873 * Return zero if the field does not contain a valid value.
757 */ 874 */
758int scsw_is_valid_key(union scsw *scsw) 875static inline int scsw_is_valid_key(union scsw *scsw)
759{ 876{
760 if (scsw_is_tm(scsw)) 877 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw); 878 return scsw_tm_is_valid_key(scsw);
762 else 879 else
763 return scsw_cmd_is_valid_key(scsw); 880 return scsw_cmd_is_valid_key(scsw);
764} 881}
765EXPORT_SYMBOL(scsw_is_valid_key);
766 882
767/** 883/**
768 * scsw_is_valid_pno - check pno field validity 884 * scsw_is_valid_pno - check pno field validity
@@ -772,14 +888,13 @@ EXPORT_SYMBOL(scsw_is_valid_key);
772 * regardless of whether it is a transport mode or command mode scsw. 888 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value. 889 * Return zero if the field does not contain a valid value.
774 */ 890 */
775int scsw_is_valid_pno(union scsw *scsw) 891static inline int scsw_is_valid_pno(union scsw *scsw)
776{ 892{
777 if (scsw_is_tm(scsw)) 893 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw); 894 return scsw_tm_is_valid_pno(scsw);
779 else 895 else
780 return scsw_cmd_is_valid_pno(scsw); 896 return scsw_cmd_is_valid_pno(scsw);
781} 897}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783 898
784/** 899/**
785 * scsw_is_valid_stctl - check stctl field validity 900 * scsw_is_valid_stctl - check stctl field validity
@@ -789,14 +904,13 @@ EXPORT_SYMBOL(scsw_is_valid_pno);
789 * regardless of whether it is a transport mode or command mode scsw. 904 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value. 905 * Return zero if the field does not contain a valid value.
791 */ 906 */
792int scsw_is_valid_stctl(union scsw *scsw) 907static inline int scsw_is_valid_stctl(union scsw *scsw)
793{ 908{
794 if (scsw_is_tm(scsw)) 909 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw); 910 return scsw_tm_is_valid_stctl(scsw);
796 else 911 else
797 return scsw_cmd_is_valid_stctl(scsw); 912 return scsw_cmd_is_valid_stctl(scsw);
798} 913}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800 914
801/** 915/**
802 * scsw_cmd_is_solicited - check for solicited scsw 916 * scsw_cmd_is_solicited - check for solicited scsw
@@ -805,12 +919,11 @@ EXPORT_SYMBOL(scsw_is_valid_stctl);
805 * Return non-zero if the command mode scsw indicates that the associated 919 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited. 920 * status condition is solicited, zero if it is unsolicited.
807 */ 921 */
808int scsw_cmd_is_solicited(union scsw *scsw) 922static inline int scsw_cmd_is_solicited(union scsw *scsw)
809{ 923{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != 924 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); 925 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812} 926}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814 927
815/** 928/**
816 * scsw_tm_is_solicited - check for solicited scsw 929 * scsw_tm_is_solicited - check for solicited scsw
@@ -819,12 +932,11 @@ EXPORT_SYMBOL(scsw_cmd_is_solicited);
819 * Return non-zero if the transport mode scsw indicates that the associated 932 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited. 933 * status condition is solicited, zero if it is unsolicited.
821 */ 934 */
822int scsw_tm_is_solicited(union scsw *scsw) 935static inline int scsw_tm_is_solicited(union scsw *scsw)
823{ 936{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl != 937 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); 938 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826} 939}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828 940
829/** 941/**
830 * scsw_is_solicited - check for solicited scsw 942 * scsw_is_solicited - check for solicited scsw
@@ -833,11 +945,12 @@ EXPORT_SYMBOL(scsw_tm_is_solicited);
833 * Return non-zero if the transport or command mode scsw indicates that the 945 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited. 946 * associated status condition is solicited, zero if it is unsolicited.
835 */ 947 */
836int scsw_is_solicited(union scsw *scsw) 948static inline int scsw_is_solicited(union scsw *scsw)
837{ 949{
838 if (scsw_is_tm(scsw)) 950 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw); 951 return scsw_tm_is_solicited(scsw);
840 else 952 else
841 return scsw_cmd_is_solicited(scsw); 953 return scsw_cmd_is_solicited(scsw);
842} 954}
843EXPORT_SYMBOL(scsw_is_solicited); 955
956#endif /* _ASM_S390_SCSW_H_ */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 38b0fc221ed7..e37478e87286 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -8,7 +8,7 @@
8#ifndef _ASM_S390_SETUP_H 8#ifndef _ASM_S390_SETUP_H
9#define _ASM_S390_SETUP_H 9#define _ASM_S390_SETUP_H
10 10
11#define COMMAND_LINE_SIZE 1024 11#define COMMAND_LINE_SIZE 4096
12 12
13#define ARCH_COMMAND_LINE_SIZE 896 13#define ARCH_COMMAND_LINE_SIZE 896
14 14
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 72137bc907ac..c991fe6473c9 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -51,32 +51,7 @@ extern void machine_power_off_smp(void);
51#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ 51#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
52 52
53#define raw_smp_processor_id() (S390_lowcore.cpu_nr) 53#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
54 54#define cpu_logical_map(cpu) (cpu)
55/*
56 * returns 1 if cpu is in stopped/check stopped state or not operational
57 * returns 0 otherwise
58 */
59static inline int
60smp_cpu_not_running(int cpu)
61{
62 __u32 status;
63
64 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
65 case sigp_order_code_accepted:
66 case sigp_status_stored:
67 /* Check for stopped and check stop state */
68 if (status & 0x50)
69 return 1;
70 break;
71 case sigp_not_operational:
72 return 1;
73 default:
74 break;
75 }
76 return 0;
77}
78
79#define cpu_logical_map(cpu) (cpu)
80 55
81extern int __cpu_disable (void); 56extern int __cpu_disable (void);
82extern void __cpu_die (unsigned int cpu); 57extern void __cpu_die (unsigned int cpu);
@@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask);
91 66
92#endif 67#endif
93 68
94#ifndef CONFIG_SMP
95#define hard_smp_processor_id() 0
96#define smp_cpu_not_running(cpu) 1
97#endif
98
99#ifdef CONFIG_HOTPLUG_CPU 69#ifdef CONFIG_HOTPLUG_CPU
100extern int smp_rescan_cpus(void); 70extern int smp_rescan_cpus(void);
101#else 71#else
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c9af0d19c7ab..41ce6861174e 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -191,4 +191,33 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
191#define _raw_read_relax(lock) cpu_relax() 191#define _raw_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define _raw_write_relax(lock) cpu_relax()
193 193
194#define __always_inline__spin_lock
195#define __always_inline__read_lock
196#define __always_inline__write_lock
197#define __always_inline__spin_lock_bh
198#define __always_inline__read_lock_bh
199#define __always_inline__write_lock_bh
200#define __always_inline__spin_lock_irq
201#define __always_inline__read_lock_irq
202#define __always_inline__write_lock_irq
203#define __always_inline__spin_lock_irqsave
204#define __always_inline__read_lock_irqsave
205#define __always_inline__write_lock_irqsave
206#define __always_inline__spin_trylock
207#define __always_inline__read_trylock
208#define __always_inline__write_trylock
209#define __always_inline__spin_trylock_bh
210#define __always_inline__spin_unlock
211#define __always_inline__read_unlock
212#define __always_inline__write_unlock
213#define __always_inline__spin_unlock_bh
214#define __always_inline__read_unlock_bh
215#define __always_inline__write_unlock_bh
216#define __always_inline__spin_unlock_irq
217#define __always_inline__read_unlock_irq
218#define __always_inline__write_unlock_irq
219#define __always_inline__spin_unlock_irqrestore
220#define __always_inline__read_unlock_irqrestore
221#define __always_inline__write_unlock_irqrestore
222
194#endif /* __ASM_SPINLOCK_H */ 223#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 4fb83c1cdb77..379661d2f81a 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -109,11 +109,7 @@ extern void pfault_fini(void);
109#define pfault_fini() do { } while (0) 109#define pfault_fini() do { } while (0)
110#endif /* CONFIG_PFAULT */ 110#endif /* CONFIG_PFAULT */
111 111
112#ifdef CONFIG_PAGE_STATES
113extern void cmma_init(void); 112extern void cmma_init(void);
114#else
115static inline void cmma_init(void) { }
116#endif
117 113
118#define finish_arch_switch(prev) do { \ 114#define finish_arch_switch(prev) do { \
119 set_fs(current->thread.mm_segment); \ 115 set_fs(current->thread.mm_segment); \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ba1cab9fc1f9..07eb61b2fb3a 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -92,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
92#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 92#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
93#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 93#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
94#define TIF_SECCOMP 10 /* secure computing */ 94#define TIF_SECCOMP 10 /* secure computing */
95#define TIF_SYSCALL_FTRACE 11 /* ftrace syscall instrumentation */ 95#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
96#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ 96#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
97#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling 97#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
98 TIF_NEED_RESCHED */ 98 TIF_NEED_RESCHED */
@@ -111,7 +111,7 @@ static inline struct thread_info *current_thread_info(void)
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 112#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
113#define _TIF_SECCOMP (1<<TIF_SECCOMP) 113#define _TIF_SECCOMP (1<<TIF_SECCOMP)
114#define _TIF_SYSCALL_FTRACE (1<<TIF_SYSCALL_FTRACE) 114#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
115#define _TIF_USEDFPU (1<<TIF_USEDFPU) 115#define _TIF_USEDFPU (1<<TIF_USEDFPU)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
117#define _TIF_31BIT (1<<TIF_31BIT) 117#define _TIF_31BIT (1<<TIF_31BIT)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index cc21e3e20fd7..24aa1cda20ad 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void);
90 90
91extern u64 sched_clock_base_cc; 91extern u64 sched_clock_base_cc;
92 92
93/**
94 * get_clock_monotonic - returns current time in clock rate units
95 *
96 * The caller must ensure that preemption is disabled.
97 * The clock and sched_clock_base get changed via stop_machine.
98 * Therefore preemption must be disabled when calling this
99 * function, otherwise the returned value is not guaranteed to
100 * be monotonic.
101 */
102static inline unsigned long long get_clock_monotonic(void)
103{
104 return get_clock_xt() - sched_clock_base_cc;
105}
106
93#endif 107#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c75ed43b1a18..c7be8e10b87e 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds
32 32
33obj-$(CONFIG_MODULES) += s390_ksyms.o module.o 33obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
34obj-$(CONFIG_SMP) += smp.o topology.o 34obj-$(CONFIG_SMP) += smp.o topology.o
35 35obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
36obj-$(CONFIG_AUDIT) += audit.o 36obj-$(CONFIG_AUDIT) += audit.o
37compat-obj-$(CONFIG_AUDIT) += compat_audit.o 37compat-obj-$(CONFIG_AUDIT) += compat_audit.o
38obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ 38obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
@@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
41 41
42obj-$(CONFIG_STACKTRACE) += stacktrace.o 42obj-$(CONFIG_STACKTRACE) += stacktrace.o
43obj-$(CONFIG_KPROBES) += kprobes.o 43obj-$(CONFIG_KPROBES) += kprobes.o
44obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 44obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o)
45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 45obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 46obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
47 47
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cae14c499511..bf8b4ae7ff2d 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -6,6 +6,9 @@
6 * Heiko Carstens <heiko.carstens@de.ibm.com> 6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */ 7 */
8 8
9#define KMSG_COMPONENT "setup"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
9#include <linux/compiler.h> 12#include <linux/compiler.h>
10#include <linux/init.h> 13#include <linux/init.h>
11#include <linux/errno.h> 14#include <linux/errno.h>
@@ -16,6 +19,7 @@
16#include <linux/module.h> 19#include <linux/module.h>
17#include <linux/pfn.h> 20#include <linux/pfn.h>
18#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/kernel.h>
19#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
20#include <asm/ipl.h> 24#include <asm/ipl.h>
21#include <asm/lowcore.h> 25#include <asm/lowcore.h>
@@ -35,8 +39,6 @@
35 39
36char kernel_nss_name[NSS_NAME_SIZE + 1]; 40char kernel_nss_name[NSS_NAME_SIZE + 1];
37 41
38static unsigned long machine_flags;
39
40static void __init setup_boot_command_line(void); 42static void __init setup_boot_command_line(void);
41 43
42/* 44/*
@@ -81,6 +83,8 @@ asm(
81 " br 14\n" 83 " br 14\n"
82 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); 84 " .size savesys_ipl_nss, .-savesys_ipl_nss\n");
83 85
86static __initdata char upper_command_line[COMMAND_LINE_SIZE];
87
84static noinline __init void create_kernel_nss(void) 88static noinline __init void create_kernel_nss(void)
85{ 89{
86 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 90 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -90,7 +94,6 @@ static noinline __init void create_kernel_nss(void)
90 int response; 94 int response;
91 size_t len; 95 size_t len;
92 char *savesys_ptr; 96 char *savesys_ptr;
93 char upper_command_line[COMMAND_LINE_SIZE];
94 char defsys_cmd[DEFSYS_CMD_SIZE]; 97 char defsys_cmd[DEFSYS_CMD_SIZE];
95 char savesys_cmd[SAVESYS_CMD_SIZE]; 98 char savesys_cmd[SAVESYS_CMD_SIZE];
96 99
@@ -141,6 +144,8 @@ static noinline __init void create_kernel_nss(void)
141 __cpcmd(defsys_cmd, NULL, 0, &response); 144 __cpcmd(defsys_cmd, NULL, 0, &response);
142 145
143 if (response != 0) { 146 if (response != 0) {
147 pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
148 response);
144 kernel_nss_name[0] = '\0'; 149 kernel_nss_name[0] = '\0';
145 return; 150 return;
146 } 151 }
@@ -153,8 +158,11 @@ static noinline __init void create_kernel_nss(void)
153 * max SAVESYS_CMD_SIZE 158 * max SAVESYS_CMD_SIZE
154 * On error: response contains the numeric portion of cp error message. 159 * On error: response contains the numeric portion of cp error message.
155 * for SAVESYS it will be >= 263 160 * for SAVESYS it will be >= 263
161 * for missing privilege class, it will be 1
156 */ 162 */
157 if (response > SAVESYS_CMD_SIZE) { 163 if (response > SAVESYS_CMD_SIZE || response == 1) {
164 pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
165 response);
158 kernel_nss_name[0] = '\0'; 166 kernel_nss_name[0] = '\0';
159 return; 167 return;
160 } 168 }
@@ -205,12 +213,9 @@ static noinline __init void detect_machine_type(void)
205 213
206 /* Running under KVM? If not we assume z/VM */ 214 /* Running under KVM? If not we assume z/VM */
207 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) 215 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
208 machine_flags |= MACHINE_FLAG_KVM; 216 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
209 else 217 else
210 machine_flags |= MACHINE_FLAG_VM; 218 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
211
212 /* Store machine flags for setting up lowcore early */
213 S390_lowcore.machine_flags = machine_flags;
214} 219}
215 220
216static __init void early_pgm_check_handler(void) 221static __init void early_pgm_check_handler(void)
@@ -245,7 +250,7 @@ static noinline __init void setup_hpage(void)
245 facilities = stfl(); 250 facilities = stfl();
246 if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) 251 if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
247 return; 252 return;
248 machine_flags |= MACHINE_FLAG_HPAGE; 253 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
249 __ctl_set_bit(0, 23); 254 __ctl_set_bit(0, 23);
250#endif 255#endif
251} 256}
@@ -263,7 +268,7 @@ static __init void detect_mvpg(void)
263 EX_TABLE(0b,1b) 268 EX_TABLE(0b,1b)
264 : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); 269 : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
265 if (!rc) 270 if (!rc)
266 machine_flags |= MACHINE_FLAG_MVPG; 271 S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
267#endif 272#endif
268} 273}
269 274
@@ -279,7 +284,7 @@ static __init void detect_ieee(void)
279 EX_TABLE(0b,1b) 284 EX_TABLE(0b,1b)
280 : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); 285 : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
281 if (!rc) 286 if (!rc)
282 machine_flags |= MACHINE_FLAG_IEEE; 287 S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
283#endif 288#endif
284} 289}
285 290
@@ -298,7 +303,7 @@ static __init void detect_csp(void)
298 EX_TABLE(0b,1b) 303 EX_TABLE(0b,1b)
299 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); 304 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
300 if (!rc) 305 if (!rc)
301 machine_flags |= MACHINE_FLAG_CSP; 306 S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
302#endif 307#endif
303} 308}
304 309
@@ -315,7 +320,7 @@ static __init void detect_diag9c(void)
315 EX_TABLE(0b,1b) 320 EX_TABLE(0b,1b)
316 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); 321 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
317 if (!rc) 322 if (!rc)
318 machine_flags |= MACHINE_FLAG_DIAG9C; 323 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
319} 324}
320 325
321static __init void detect_diag44(void) 326static __init void detect_diag44(void)
@@ -330,7 +335,7 @@ static __init void detect_diag44(void)
330 EX_TABLE(0b,1b) 335 EX_TABLE(0b,1b)
331 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); 336 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
332 if (!rc) 337 if (!rc)
333 machine_flags |= MACHINE_FLAG_DIAG44; 338 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
334#endif 339#endif
335} 340}
336 341
@@ -341,11 +346,11 @@ static __init void detect_machine_facilities(void)
341 346
342 facilities = stfl(); 347 facilities = stfl();
343 if (facilities & (1 << 28)) 348 if (facilities & (1 << 28))
344 machine_flags |= MACHINE_FLAG_IDTE; 349 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
345 if (facilities & (1 << 23)) 350 if (facilities & (1 << 23))
346 machine_flags |= MACHINE_FLAG_PFMF; 351 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
347 if (facilities & (1 << 4)) 352 if (facilities & (1 << 4))
348 machine_flags |= MACHINE_FLAG_MVCOS; 353 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
349#endif 354#endif
350} 355}
351 356
@@ -367,21 +372,35 @@ static __init void rescue_initrd(void)
367} 372}
368 373
369/* Set up boot command line */ 374/* Set up boot command line */
370static void __init setup_boot_command_line(void) 375static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
371{ 376{
372 char *parm = NULL; 377 char *parm, *delim;
378 size_t rc, len;
379
380 len = strlen(boot_command_line);
381
382 delim = boot_command_line + len; /* '\0' character position */
383 parm = boot_command_line + len + 1; /* append right after '\0' */
373 384
385 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
386 if (rc) {
387 if (*parm == '=')
388 memmove(boot_command_line, parm + 1, rc);
389 else
390 *delim = ' '; /* replace '\0' with space */
391 }
392}
393
394static void __init setup_boot_command_line(void)
395{
374 /* copy arch command line */ 396 /* copy arch command line */
375 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 397 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
376 398
377 /* append IPL PARM data to the boot command line */ 399 /* append IPL PARM data to the boot command line */
378 if (MACHINE_IS_VM) { 400 if (MACHINE_IS_VM)
379 parm = boot_command_line + strlen(boot_command_line); 401 append_to_cmdline(append_ipl_vmparm);
380 *parm++ = ' '; 402
381 get_ipl_vmparm(parm); 403 append_to_cmdline(append_ipl_scpdata);
382 if (parm[0] == '=')
383 memmove(boot_command_line, parm + 1, strlen(parm));
384 }
385} 404}
386 405
387 406
@@ -413,7 +432,6 @@ void __init startup_init(void)
413 setup_hpage(); 432 setup_hpage();
414 sclp_facilities_detect(); 433 sclp_facilities_detect();
415 detect_memory_layout(memory_chunk); 434 detect_memory_layout(memory_chunk);
416 S390_lowcore.machine_flags = machine_flags;
417#ifdef CONFIG_DYNAMIC_FTRACE 435#ifdef CONFIG_DYNAMIC_FTRACE
418 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; 436 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
419#endif 437#endif
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c4c80a22bc1f..f43d2ee54464 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -54,7 +54,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 54_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
55 _TIF_MCCK_PENDING) 55 _TIF_MCCK_PENDING)
56_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 56_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
57 _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) 57 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
58 58
59STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER 59STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
60STACK_SIZE = 1 << STACK_SHIFT 60STACK_SIZE = 1 << STACK_SHIFT
@@ -278,7 +278,8 @@ sysc_return:
278 bnz BASED(sysc_work) # there is work to do (signals etc.) 278 bnz BASED(sysc_work) # there is work to do (signals etc.)
279sysc_restore: 279sysc_restore:
280#ifdef CONFIG_TRACE_IRQFLAGS 280#ifdef CONFIG_TRACE_IRQFLAGS
281 la %r1,BASED(sysc_restore_trace_psw) 281 la %r1,BASED(sysc_restore_trace_psw_addr)
282 l %r1,0(%r1)
282 lpsw 0(%r1) 283 lpsw 0(%r1)
283sysc_restore_trace: 284sysc_restore_trace:
284 TRACE_IRQS_CHECK 285 TRACE_IRQS_CHECK
@@ -289,10 +290,15 @@ sysc_leave:
289sysc_done: 290sysc_done:
290 291
291#ifdef CONFIG_TRACE_IRQFLAGS 292#ifdef CONFIG_TRACE_IRQFLAGS
293sysc_restore_trace_psw_addr:
294 .long sysc_restore_trace_psw
295
296 .section .data,"aw",@progbits
292 .align 8 297 .align 8
293 .globl sysc_restore_trace_psw 298 .globl sysc_restore_trace_psw
294sysc_restore_trace_psw: 299sysc_restore_trace_psw:
295 .long 0, sysc_restore_trace + 0x80000000 300 .long 0, sysc_restore_trace + 0x80000000
301 .previous
296#endif 302#endif
297 303
298# 304#
@@ -606,7 +612,8 @@ io_return:
606 bnz BASED(io_work) # there is work to do (signals etc.) 612 bnz BASED(io_work) # there is work to do (signals etc.)
607io_restore: 613io_restore:
608#ifdef CONFIG_TRACE_IRQFLAGS 614#ifdef CONFIG_TRACE_IRQFLAGS
609 la %r1,BASED(io_restore_trace_psw) 615 la %r1,BASED(io_restore_trace_psw_addr)
616 l %r1,0(%r1)
610 lpsw 0(%r1) 617 lpsw 0(%r1)
611io_restore_trace: 618io_restore_trace:
612 TRACE_IRQS_CHECK 619 TRACE_IRQS_CHECK
@@ -617,10 +624,15 @@ io_leave:
617io_done: 624io_done:
618 625
619#ifdef CONFIG_TRACE_IRQFLAGS 626#ifdef CONFIG_TRACE_IRQFLAGS
627io_restore_trace_psw_addr:
628 .long io_restore_trace_psw
629
630 .section .data,"aw",@progbits
620 .align 8 631 .align 8
621 .globl io_restore_trace_psw 632 .globl io_restore_trace_psw
622io_restore_trace_psw: 633io_restore_trace_psw:
623 .long 0, io_restore_trace + 0x80000000 634 .long 0, io_restore_trace + 0x80000000
635 .previous
624#endif 636#endif
625 637
626# 638#
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index f6618e9e15ef..a6f7b20df616 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -57,7 +57,7 @@ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 57_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
58 _TIF_MCCK_PENDING) 58 _TIF_MCCK_PENDING)
59_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 59_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
60 _TIF_SECCOMP>>8 | _TIF_SYSCALL_FTRACE>>8) 60 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
61 61
62#define BASED(name) name-system_call(%r13) 62#define BASED(name) name-system_call(%r13)
63 63
@@ -284,10 +284,12 @@ sysc_leave:
284sysc_done: 284sysc_done:
285 285
286#ifdef CONFIG_TRACE_IRQFLAGS 286#ifdef CONFIG_TRACE_IRQFLAGS
287 .section .data,"aw",@progbits
287 .align 8 288 .align 8
288 .globl sysc_restore_trace_psw 289 .globl sysc_restore_trace_psw
289sysc_restore_trace_psw: 290sysc_restore_trace_psw:
290 .quad 0, sysc_restore_trace 291 .quad 0, sysc_restore_trace
292 .previous
291#endif 293#endif
292 294
293# 295#
@@ -595,10 +597,12 @@ io_leave:
595io_done: 597io_done:
596 598
597#ifdef CONFIG_TRACE_IRQFLAGS 599#ifdef CONFIG_TRACE_IRQFLAGS
600 .section .data,"aw",@progbits
598 .align 8 601 .align 8
599 .globl io_restore_trace_psw 602 .globl io_restore_trace_psw
600io_restore_trace_psw: 603io_restore_trace_psw:
601 .quad 0, io_restore_trace 604 .quad 0, io_restore_trace
605 .previous
602#endif 606#endif
603 607
604# 608#
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 3e298e64f0db..57bdcb1e3cdf 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -220,6 +220,29 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
220 return syscalls_metadata[nr]; 220 return syscalls_metadata[nr];
221} 221}
222 222
223int syscall_name_to_nr(char *name)
224{
225 int i;
226
227 if (!syscalls_metadata)
228 return -1;
229 for (i = 0; i < NR_syscalls; i++)
230 if (syscalls_metadata[i])
231 if (!strcmp(syscalls_metadata[i]->name, name))
232 return i;
233 return -1;
234}
235
236void set_syscall_enter_id(int num, int id)
237{
238 syscalls_metadata[num]->enter_id = id;
239}
240
241void set_syscall_exit_id(int num, int id)
242{
243 syscalls_metadata[num]->exit_id = id;
244}
245
223static struct syscall_metadata *find_syscall_meta(unsigned long syscall) 246static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
224{ 247{
225 struct syscall_metadata *start; 248 struct syscall_metadata *start;
@@ -237,24 +260,19 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
237 return NULL; 260 return NULL;
238} 261}
239 262
240void arch_init_ftrace_syscalls(void) 263static int __init arch_init_ftrace_syscalls(void)
241{ 264{
242 struct syscall_metadata *meta; 265 struct syscall_metadata *meta;
243 int i; 266 int i;
244 static atomic_t refs;
245
246 if (atomic_inc_return(&refs) != 1)
247 goto out;
248 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls, 267 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
249 GFP_KERNEL); 268 GFP_KERNEL);
250 if (!syscalls_metadata) 269 if (!syscalls_metadata)
251 goto out; 270 return -ENOMEM;
252 for (i = 0; i < NR_syscalls; i++) { 271 for (i = 0; i < NR_syscalls; i++) {
253 meta = find_syscall_meta((unsigned long)sys_call_table[i]); 272 meta = find_syscall_meta((unsigned long)sys_call_table[i]);
254 syscalls_metadata[i] = meta; 273 syscalls_metadata[i] = meta;
255 } 274 }
256 return; 275 return 0;
257out:
258 atomic_dec(&refs);
259} 276}
277arch_initcall(arch_init_ftrace_syscalls);
260#endif 278#endif
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec6882348520..c52b4f7742fa 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,6 +27,7 @@
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/thread_info.h> 28#include <asm/thread_info.h>
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/cpu.h>
30 31
31#ifdef CONFIG_64BIT 32#ifdef CONFIG_64BIT
32#define ARCH_OFFSET 4 33#define ARCH_OFFSET 4
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index 2ced846065b7..602b508cd4c4 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -24,6 +24,7 @@ startup_continue:
24# Setup stack 24# Setup stack
25# 25#
26 l %r15,.Linittu-.LPG1(%r13) 26 l %r15,.Linittu-.LPG1(%r13)
27 st %r15,__LC_THREAD_INFO # cache thread info in lowcore
27 mvc __LC_CURRENT(4),__TI_task(%r15) 28 mvc __LC_CURRENT(4),__TI_task(%r15)
28 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE 29 ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
29 st %r15,__LC_KERNEL_STACK # set end of kernel stack 30 st %r15,__LC_KERNEL_STACK # set end of kernel stack
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 65667b2e65ce..6a250808092b 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -62,9 +62,9 @@ startup_continue:
62 clr %r11,%r12 62 clr %r11,%r12
63 je 5f # no more space in prefix array 63 je 5f # no more space in prefix array
644: 644:
65 ahi %r8,1 # next cpu (r8 += 1) 65 ahi %r8,1 # next cpu (r8 += 1)
66 cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? 66 chi %r8,MAX_CPU_ADDRESS # is last possible cpu ?
67 jl 1b # jump if not last cpu 67 jle 1b # jump if not last cpu
685: 685:
69 lhi %r1,2 # mode 2 = esame (dump) 69 lhi %r1,2 # mode 2 = esame (dump)
70 j 6f 70 j 6f
@@ -92,6 +92,7 @@ startup_continue:
92# Setup stack 92# Setup stack
93# 93#
94 larl %r15,init_thread_union 94 larl %r15,init_thread_union
95 stg %r15,__LC_THREAD_INFO # cache thread info in lowcore
95 lg %r14,__TI_task(%r15) # cache current in lowcore 96 lg %r14,__TI_task(%r15) # cache current in lowcore
96 stg %r14,__LC_CURRENT 97 stg %r14,__LC_CURRENT
97 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE 98 aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
@@ -129,8 +130,6 @@ startup_continue:
129#ifdef CONFIG_ZFCPDUMP 130#ifdef CONFIG_ZFCPDUMP
130.Lcurrent_cpu: 131.Lcurrent_cpu:
131 .long 0x0 132 .long 0x0
132.Llast_cpu:
133 .long 0x0000ffff
134.Lpref_arr_ptr: 133.Lpref_arr_ptr:
135 .long zfcpdump_prefix_array 134 .long zfcpdump_prefix_array
136#endif /* CONFIG_ZFCPDUMP */ 135#endif /* CONFIG_ZFCPDUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 371a2d88f4ac..ee57a42e6e93 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
272static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 272static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
273 273
274/* VM IPL PARM routines */ 274/* VM IPL PARM routines */
275static void reipl_get_ascii_vmparm(char *dest, 275size_t reipl_get_ascii_vmparm(char *dest, size_t size,
276 const struct ipl_parameter_block *ipb) 276 const struct ipl_parameter_block *ipb)
277{ 277{
278 int i; 278 int i;
279 int len = 0; 279 size_t len;
280 char has_lowercase = 0; 280 char has_lowercase = 0;
281 281
282 len = 0;
282 if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && 283 if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
283 (ipb->ipl_info.ccw.vm_parm_len > 0)) { 284 (ipb->ipl_info.ccw.vm_parm_len > 0)) {
284 285
285 len = ipb->ipl_info.ccw.vm_parm_len; 286 len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
286 memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); 287 memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
287 /* If at least one character is lowercase, we assume mixed 288 /* If at least one character is lowercase, we assume mixed
288 * case; otherwise we convert everything to lowercase. 289 * case; otherwise we convert everything to lowercase.
@@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest,
299 EBCASC(dest, len); 300 EBCASC(dest, len);
300 } 301 }
301 dest[len] = 0; 302 dest[len] = 0;
303
304 return len;
302} 305}
303 306
304void get_ipl_vmparm(char *dest) 307size_t append_ipl_vmparm(char *dest, size_t size)
305{ 308{
309 size_t rc;
310
311 rc = 0;
306 if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) 312 if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
307 reipl_get_ascii_vmparm(dest, &ipl_block); 313 rc = reipl_get_ascii_vmparm(dest, size, &ipl_block);
308 else 314 else
309 dest[0] = 0; 315 dest[0] = 0;
316 return rc;
310} 317}
311 318
312static ssize_t ipl_vm_parm_show(struct kobject *kobj, 319static ssize_t ipl_vm_parm_show(struct kobject *kobj,
@@ -314,10 +321,65 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj,
314{ 321{
315 char parm[DIAG308_VMPARM_SIZE + 1] = {}; 322 char parm[DIAG308_VMPARM_SIZE + 1] = {};
316 323
317 get_ipl_vmparm(parm); 324 append_ipl_vmparm(parm, sizeof(parm));
318 return sprintf(page, "%s\n", parm); 325 return sprintf(page, "%s\n", parm);
319} 326}
320 327
328static size_t scpdata_length(const char* buf, size_t count)
329{
330 while (count) {
331 if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
332 break;
333 count--;
334 }
335 return count;
336}
337
338size_t reipl_append_ascii_scpdata(char *dest, size_t size,
339 const struct ipl_parameter_block *ipb)
340{
341 size_t count;
342 size_t i;
343 int has_lowercase;
344
345 count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
346 ipb->ipl_info.fcp.scp_data_len));
347 if (!count)
348 goto out;
349
350 has_lowercase = 0;
351 for (i = 0; i < count; i++) {
352 if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
353 count = 0;
354 goto out;
355 }
356 if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
357 has_lowercase = 1;
358 }
359
360 if (has_lowercase)
361 memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
362 else
363 for (i = 0; i < count; i++)
364 dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
365out:
366 dest[count] = '\0';
367 return count;
368}
369
370size_t append_ipl_scpdata(char *dest, size_t len)
371{
372 size_t rc;
373
374 rc = 0;
375 if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP)
376 rc = reipl_append_ascii_scpdata(dest, len, &ipl_block);
377 else
378 dest[0] = 0;
379 return rc;
380}
381
382
321static struct kobj_attribute sys_ipl_vm_parm_attr = 383static struct kobj_attribute sys_ipl_vm_parm_attr =
322 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); 384 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
323 385
@@ -553,7 +615,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
553{ 615{
554 char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; 616 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
555 617
556 reipl_get_ascii_vmparm(vmparm, ipb); 618 reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
557 return sprintf(page, "%s\n", vmparm); 619 return sprintf(page, "%s\n", vmparm);
558} 620}
559 621
@@ -626,6 +688,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
626 688
627/* FCP reipl device attributes */ 689/* FCP reipl device attributes */
628 690
691static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj,
692 struct bin_attribute *attr,
693 char *buf, loff_t off, size_t count)
694{
695 size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
696 void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
697
698 return memory_read_from_buffer(buf, count, &off, scp_data, size);
699}
700
701static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj,
702 struct bin_attribute *attr,
703 char *buf, loff_t off, size_t count)
704{
705 size_t padding;
706 size_t scpdata_len;
707
708 if (off < 0)
709 return -EINVAL;
710
711 if (off >= DIAG308_SCPDATA_SIZE)
712 return -ENOSPC;
713
714 if (count > DIAG308_SCPDATA_SIZE - off)
715 count = DIAG308_SCPDATA_SIZE - off;
716
717 memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
718 scpdata_len = off + count;
719
720 if (scpdata_len % 8) {
721 padding = 8 - (scpdata_len % 8);
722 memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
723 0, padding);
724 scpdata_len += padding;
725 }
726
727 reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
728 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
729 reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
730
731 return count;
732}
733
734static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
735 .attr = {
736 .name = "scp_data",
737 .mode = S_IRUGO | S_IWUSR,
738 },
739 .size = PAGE_SIZE,
740 .read = reipl_fcp_scpdata_read,
741 .write = reipl_fcp_scpdata_write,
742};
743
629DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 744DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
630 reipl_block_fcp->ipl_info.fcp.wwpn); 745 reipl_block_fcp->ipl_info.fcp.wwpn);
631DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", 746DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
@@ -647,7 +762,6 @@ static struct attribute *reipl_fcp_attrs[] = {
647}; 762};
648 763
649static struct attribute_group reipl_fcp_attr_group = { 764static struct attribute_group reipl_fcp_attr_group = {
650 .name = IPL_FCP_STR,
651 .attrs = reipl_fcp_attrs, 765 .attrs = reipl_fcp_attrs,
652}; 766};
653 767
@@ -895,6 +1009,7 @@ static struct kobj_attribute reipl_type_attr =
895 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); 1009 __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
896 1010
897static struct kset *reipl_kset; 1011static struct kset *reipl_kset;
1012static struct kset *reipl_fcp_kset;
898 1013
899static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, 1014static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
900 const enum ipl_method m) 1015 const enum ipl_method m)
@@ -906,7 +1021,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
906 1021
907 reipl_get_ascii_loadparm(loadparm, ipb); 1022 reipl_get_ascii_loadparm(loadparm, ipb);
908 reipl_get_ascii_nss_name(nss_name, ipb); 1023 reipl_get_ascii_nss_name(nss_name, ipb);
909 reipl_get_ascii_vmparm(vmparm, ipb); 1024 reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb);
910 1025
911 switch (m) { 1026 switch (m) {
912 case REIPL_METHOD_CCW_VM: 1027 case REIPL_METHOD_CCW_VM:
@@ -1076,23 +1191,44 @@ static int __init reipl_fcp_init(void)
1076 int rc; 1191 int rc;
1077 1192
1078 if (!diag308_set_works) { 1193 if (!diag308_set_works) {
1079 if (ipl_info.type == IPL_TYPE_FCP) 1194 if (ipl_info.type == IPL_TYPE_FCP) {
1080 make_attrs_ro(reipl_fcp_attrs); 1195 make_attrs_ro(reipl_fcp_attrs);
1081 else 1196 sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO;
1197 } else
1082 return 0; 1198 return 0;
1083 } 1199 }
1084 1200
1085 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 1201 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
1086 if (!reipl_block_fcp) 1202 if (!reipl_block_fcp)
1087 return -ENOMEM; 1203 return -ENOMEM;
1088 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group); 1204
1205 /* sysfs: create fcp kset for mixing attr group and bin attrs */
1206 reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL,
1207 &reipl_kset->kobj);
1208 if (!reipl_kset) {
1209 free_page((unsigned long) reipl_block_fcp);
1210 return -ENOMEM;
1211 }
1212
1213 rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
1214 if (rc) {
1215 kset_unregister(reipl_fcp_kset);
1216 free_page((unsigned long) reipl_block_fcp);
1217 return rc;
1218 }
1219
1220 rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
1221 &sys_reipl_fcp_scp_data_attr);
1089 if (rc) { 1222 if (rc) {
1090 free_page((unsigned long)reipl_block_fcp); 1223 sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
1224 kset_unregister(reipl_fcp_kset);
1225 free_page((unsigned long) reipl_block_fcp);
1091 return rc; 1226 return rc;
1092 } 1227 }
1093 if (ipl_info.type == IPL_TYPE_FCP) { 1228
1229 if (ipl_info.type == IPL_TYPE_FCP)
1094 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); 1230 memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
1095 } else { 1231 else {
1096 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; 1232 reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
1097 reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; 1233 reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
1098 reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; 1234 reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 2a0a5e97ba8c..dfe015d7398c 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -11,111 +11,27 @@
11ftrace_stub: 11ftrace_stub:
12 br %r14 12 br %r14
13 13
14#ifdef CONFIG_64BIT
15
16#ifdef CONFIG_DYNAMIC_FTRACE
17
18 .globl _mcount 14 .globl _mcount
19_mcount: 15_mcount:
20 br %r14 16#ifdef CONFIG_DYNAMIC_FTRACE
21
22 .globl ftrace_caller
23ftrace_caller:
24 larl %r1,function_trace_stop
25 icm %r1,0xf,0(%r1)
26 bnzr %r14
27 stmg %r2,%r5,32(%r15)
28 stg %r14,112(%r15)
29 lgr %r1,%r15
30 aghi %r15,-160
31 stg %r1,__SF_BACKCHAIN(%r15)
32 lgr %r2,%r14
33 lg %r3,168(%r15)
34 larl %r14,ftrace_dyn_func
35 lg %r14,0(%r14)
36 basr %r14,%r14
37#ifdef CONFIG_FUNCTION_GRAPH_TRACER
38 .globl ftrace_graph_caller
39ftrace_graph_caller:
40 # This unconditional branch gets runtime patched. Change only if
41 # you know what you are doing. See ftrace_enable_graph_caller().
42 j 0f
43 lg %r2,272(%r15)
44 lg %r3,168(%r15)
45 brasl %r14,prepare_ftrace_return
46 stg %r2,168(%r15)
470:
48#endif
49 aghi %r15,160
50 lmg %r2,%r5,32(%r15)
51 lg %r14,112(%r15)
52 br %r14 17 br %r14
53 18
54 .data 19 .data
55 .globl ftrace_dyn_func 20 .globl ftrace_dyn_func
56ftrace_dyn_func: 21ftrace_dyn_func:
57 .quad ftrace_stub 22 .long ftrace_stub
58 .previous 23 .previous
59 24
60#else /* CONFIG_DYNAMIC_FTRACE */
61
62 .globl _mcount
63_mcount:
64 larl %r1,function_trace_stop
65 icm %r1,0xf,0(%r1)
66 bnzr %r14
67 stmg %r2,%r5,32(%r15)
68 stg %r14,112(%r15)
69 lgr %r1,%r15
70 aghi %r15,-160
71 stg %r1,__SF_BACKCHAIN(%r15)
72 lgr %r2,%r14
73 lg %r3,168(%r15)
74 larl %r14,ftrace_trace_function
75 lg %r14,0(%r14)
76 basr %r14,%r14
77#ifdef CONFIG_FUNCTION_GRAPH_TRACER
78 lg %r2,272(%r15)
79 lg %r3,168(%r15)
80 brasl %r14,prepare_ftrace_return
81 stg %r2,168(%r15)
82#endif
83 aghi %r15,160
84 lmg %r2,%r5,32(%r15)
85 lg %r14,112(%r15)
86 br %r14
87
88#endif /* CONFIG_DYNAMIC_FTRACE */
89
90#ifdef CONFIG_FUNCTION_GRAPH_TRACER
91
92 .globl return_to_handler
93return_to_handler:
94 stmg %r2,%r5,32(%r15)
95 lgr %r1,%r15
96 aghi %r15,-160
97 stg %r1,__SF_BACKCHAIN(%r15)
98 brasl %r14,ftrace_return_to_handler
99 aghi %r15,160
100 lgr %r14,%r2
101 lmg %r2,%r5,32(%r15)
102 br %r14
103
104#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
105
106#else /* CONFIG_64BIT */
107
108#ifdef CONFIG_DYNAMIC_FTRACE
109
110 .globl _mcount
111_mcount:
112 br %r14
113
114 .globl ftrace_caller 25 .globl ftrace_caller
115ftrace_caller: 26ftrace_caller:
27#endif
116 stm %r2,%r5,16(%r15) 28 stm %r2,%r5,16(%r15)
117 bras %r1,2f 29 bras %r1,2f
30#ifdef CONFIG_DYNAMIC_FTRACE
310: .long ftrace_dyn_func
32#else
1180: .long ftrace_trace_function 330: .long ftrace_trace_function
34#endif
1191: .long function_trace_stop 351: .long function_trace_stop
1202: l %r2,1b-0b(%r1) 362: l %r2,1b-0b(%r1)
121 icm %r2,0xf,0(%r2) 37 icm %r2,0xf,0(%r2)
@@ -131,53 +47,13 @@ ftrace_caller:
131 l %r14,0(%r14) 47 l %r14,0(%r14)
132 basr %r14,%r14 48 basr %r14,%r14
133#ifdef CONFIG_FUNCTION_GRAPH_TRACER 49#ifdef CONFIG_FUNCTION_GRAPH_TRACER
50#ifdef CONFIG_DYNAMIC_FTRACE
134 .globl ftrace_graph_caller 51 .globl ftrace_graph_caller
135ftrace_graph_caller: 52ftrace_graph_caller:
136 # This unconditional branch gets runtime patched. Change only if 53 # This unconditional branch gets runtime patched. Change only if
137 # you know what you are doing. See ftrace_enable_graph_caller(). 54 # you know what you are doing. See ftrace_enable_graph_caller().
138 j 1f 55 j 1f
139 bras %r1,0f
140 .long prepare_ftrace_return
1410: l %r2,152(%r15)
142 l %r4,0(%r1)
143 l %r3,100(%r15)
144 basr %r14,%r4
145 st %r2,100(%r15)
1461:
147#endif 56#endif
148 ahi %r15,96
149 l %r14,56(%r15)
1503: lm %r2,%r5,16(%r15)
151 br %r14
152
153 .data
154 .globl ftrace_dyn_func
155ftrace_dyn_func:
156 .long ftrace_stub
157 .previous
158
159#else /* CONFIG_DYNAMIC_FTRACE */
160
161 .globl _mcount
162_mcount:
163 stm %r2,%r5,16(%r15)
164 bras %r1,2f
1650: .long ftrace_trace_function
1661: .long function_trace_stop
1672: l %r2,1b-0b(%r1)
168 icm %r2,0xf,0(%r2)
169 jnz 3f
170 st %r14,56(%r15)
171 lr %r0,%r15
172 ahi %r15,-96
173 l %r3,100(%r15)
174 la %r2,0(%r14)
175 st %r0,__SF_BACKCHAIN(%r15)
176 la %r3,0(%r3)
177 l %r14,0b-0b(%r1)
178 l %r14,0(%r14)
179 basr %r14,%r14
180#ifdef CONFIG_FUNCTION_GRAPH_TRACER
181 bras %r1,0f 57 bras %r1,0f
182 .long prepare_ftrace_return 58 .long prepare_ftrace_return
1830: l %r2,152(%r15) 590: l %r2,152(%r15)
@@ -185,14 +61,13 @@ _mcount:
185 l %r3,100(%r15) 61 l %r3,100(%r15)
186 basr %r14,%r4 62 basr %r14,%r4
187 st %r2,100(%r15) 63 st %r2,100(%r15)
641:
188#endif 65#endif
189 ahi %r15,96 66 ahi %r15,96
190 l %r14,56(%r15) 67 l %r14,56(%r15)
1913: lm %r2,%r5,16(%r15) 683: lm %r2,%r5,16(%r15)
192 br %r14 69 br %r14
193 70
194#endif /* CONFIG_DYNAMIC_FTRACE */
195
196#ifdef CONFIG_FUNCTION_GRAPH_TRACER 71#ifdef CONFIG_FUNCTION_GRAPH_TRACER
197 72
198 .globl return_to_handler 73 .globl return_to_handler
@@ -211,6 +86,4 @@ return_to_handler:
211 lm %r2,%r5,16(%r15) 86 lm %r2,%r5,16(%r15)
212 br %r14 87 br %r14
213 88
214#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 89#endif
215
216#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
new file mode 100644
index 000000000000..c37211c6092b
--- /dev/null
+++ b/arch/s390/kernel/mcount64.S
@@ -0,0 +1,78 @@
1/*
2 * Copyright IBM Corp. 2008,2009
3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 *
6 */
7
8#include <asm/asm-offsets.h>
9
10 .globl ftrace_stub
11ftrace_stub:
12 br %r14
13
14 .globl _mcount
15_mcount:
16#ifdef CONFIG_DYNAMIC_FTRACE
17 br %r14
18
19 .data
20 .globl ftrace_dyn_func
21ftrace_dyn_func:
22 .quad ftrace_stub
23 .previous
24
25 .globl ftrace_caller
26ftrace_caller:
27#endif
28 larl %r1,function_trace_stop
29 icm %r1,0xf,0(%r1)
30 bnzr %r14
31 stmg %r2,%r5,32(%r15)
32 stg %r14,112(%r15)
33 lgr %r1,%r15
34 aghi %r15,-160
35 stg %r1,__SF_BACKCHAIN(%r15)
36 lgr %r2,%r14
37 lg %r3,168(%r15)
38#ifdef CONFIG_DYNAMIC_FTRACE
39 larl %r14,ftrace_dyn_func
40#else
41 larl %r14,ftrace_trace_function
42#endif
43 lg %r14,0(%r14)
44 basr %r14,%r14
45#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46#ifdef CONFIG_DYNAMIC_FTRACE
47 .globl ftrace_graph_caller
48ftrace_graph_caller:
49 # This unconditional branch gets runtime patched. Change only if
50 # you know what you are doing. See ftrace_enable_graph_caller().
51 j 0f
52#endif
53 lg %r2,272(%r15)
54 lg %r3,168(%r15)
55 brasl %r14,prepare_ftrace_return
56 stg %r2,168(%r15)
570:
58#endif
59 aghi %r15,160
60 lmg %r2,%r5,32(%r15)
61 lg %r14,112(%r15)
62 br %r14
63
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65
66 .globl return_to_handler
67return_to_handler:
68 stmg %r2,%r5,32(%r15)
69 lgr %r1,%r15
70 aghi %r15,-160
71 stg %r1,__SF_BACKCHAIN(%r15)
72 brasl %r14,ftrace_return_to_handler
73 aghi %r15,160
74 lgr %r14,%r2
75 lmg %r2,%r5,32(%r15)
76 br %r14
77
78#endif
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 43acd73105b7..f3ddd7ac06c5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -51,6 +51,9 @@
51#include "compat_ptrace.h" 51#include "compat_ptrace.h"
52#endif 52#endif
53 53
54#define CREATE_TRACE_POINTS
55#include <trace/events/syscalls.h>
56
54enum s390_regset { 57enum s390_regset {
55 REGSET_GENERAL, 58 REGSET_GENERAL,
56 REGSET_FP, 59 REGSET_FP,
@@ -661,8 +664,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
661 ret = -1; 664 ret = -1;
662 } 665 }
663 666
664 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 667 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
665 ftrace_syscall_enter(regs); 668 trace_sys_enter(regs, regs->gprs[2]);
666 669
667 if (unlikely(current->audit_context)) 670 if (unlikely(current->audit_context))
668 audit_syscall_entry(is_compat_task() ? 671 audit_syscall_entry(is_compat_task() ?
@@ -679,8 +682,8 @@ asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
679 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), 682 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
680 regs->gprs[2]); 683 regs->gprs[2]);
681 684
682 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 685 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
683 ftrace_syscall_exit(regs); 686 trace_sys_exit(regs, regs->gprs[2]);
684 687
685 if (test_thread_flag(TIF_SYSCALL_TRACE)) 688 if (test_thread_flag(TIF_SYSCALL_TRACE))
686 tracehook_report_syscall_exit(regs, 0); 689 tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index cbb897bc50bd..9ed13a1ed376 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -156,15 +156,11 @@ __setup("condev=", condev_setup);
156 156
157static void __init set_preferred_console(void) 157static void __init set_preferred_console(void)
158{ 158{
159 if (MACHINE_IS_KVM) { 159 if (MACHINE_IS_KVM)
160 add_preferred_console("hvc", 0, NULL); 160 add_preferred_console("hvc", 0, NULL);
161 s390_virtio_console_init(); 161 else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
162 return;
163 }
164
165 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
166 add_preferred_console("ttyS", 0, NULL); 162 add_preferred_console("ttyS", 0, NULL);
167 if (CONSOLE_IS_3270) 163 else if (CONSOLE_IS_3270)
168 add_preferred_console("tty3270", 0, NULL); 164 add_preferred_console("tty3270", 0, NULL);
169} 165}
170 166
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 062bd64e65fa..6b4fef877f9d 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -536,4 +536,6 @@ void do_notify_resume(struct pt_regs *regs)
536{ 536{
537 clear_thread_flag(TIF_NOTIFY_RESUME); 537 clear_thread_flag(TIF_NOTIFY_RESUME);
538 tracehook_notify_resume(regs); 538 tracehook_notify_resume(regs);
539 if (current->replacement_session_keyring)
540 key_replace_session_keyring();
539} 541}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index be2cae083406..56c16876b919 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -49,6 +49,7 @@
49#include <asm/sclp.h> 49#include <asm/sclp.h>
50#include <asm/cputime.h> 50#include <asm/cputime.h>
51#include <asm/vdso.h> 51#include <asm/vdso.h>
52#include <asm/cpu.h>
52#include "entry.h" 53#include "entry.h"
53 54
54static struct task_struct *current_set[NR_CPUS]; 55static struct task_struct *current_set[NR_CPUS];
@@ -70,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
70 71
71static void smp_ext_bitcall(int, ec_bit_sig); 72static void smp_ext_bitcall(int, ec_bit_sig);
72 73
74static int cpu_stopped(int cpu)
75{
76 __u32 status;
77
78 switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) {
79 case sigp_order_code_accepted:
80 case sigp_status_stored:
81 /* Check for stopped and check stop state */
82 if (status & 0x50)
83 return 1;
84 break;
85 default:
86 break;
87 }
88 return 0;
89}
90
73void smp_send_stop(void) 91void smp_send_stop(void)
74{ 92{
75 int cpu, rc; 93 int cpu, rc;
@@ -86,7 +104,7 @@ void smp_send_stop(void)
86 rc = signal_processor(cpu, sigp_stop); 104 rc = signal_processor(cpu, sigp_stop);
87 } while (rc == sigp_busy); 105 } while (rc == sigp_busy);
88 106
89 while (!smp_cpu_not_running(cpu)) 107 while (!cpu_stopped(cpu))
90 cpu_relax(); 108 cpu_relax();
91 } 109 }
92} 110}
@@ -269,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
269 287
270#endif /* CONFIG_ZFCPDUMP */ 288#endif /* CONFIG_ZFCPDUMP */
271 289
272static int cpu_stopped(int cpu)
273{
274 __u32 status;
275
276 /* Check for stopped state */
277 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
278 sigp_status_stored) {
279 if (status & 0x40)
280 return 1;
281 }
282 return 0;
283}
284
285static int cpu_known(int cpu_id) 290static int cpu_known(int cpu_id)
286{ 291{
287 int cpu; 292 int cpu;
@@ -300,7 +305,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
300 logical_cpu = cpumask_first(&avail); 305 logical_cpu = cpumask_first(&avail);
301 if (logical_cpu >= nr_cpu_ids) 306 if (logical_cpu >= nr_cpu_ids)
302 return 0; 307 return 0;
303 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { 308 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
304 if (cpu_known(cpu_id)) 309 if (cpu_known(cpu_id))
305 continue; 310 continue;
306 __cpu_logical_map[logical_cpu] = cpu_id; 311 __cpu_logical_map[logical_cpu] = cpu_id;
@@ -379,7 +384,7 @@ static void __init smp_detect_cpus(void)
379 /* Use sigp detection algorithm if sclp doesn't work. */ 384 /* Use sigp detection algorithm if sclp doesn't work. */
380 if (sclp_get_cpu_info(info)) { 385 if (sclp_get_cpu_info(info)) {
381 smp_use_sigp_detection = 1; 386 smp_use_sigp_detection = 1;
382 for (cpu = 0; cpu <= 65535; cpu++) { 387 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
383 if (cpu == boot_cpu_addr) 388 if (cpu == boot_cpu_addr)
384 continue; 389 continue;
385 __cpu_logical_map[CPU_INIT_NO] = cpu; 390 __cpu_logical_map[CPU_INIT_NO] = cpu;
@@ -635,7 +640,7 @@ int __cpu_disable(void)
635void __cpu_die(unsigned int cpu) 640void __cpu_die(unsigned int cpu)
636{ 641{
637 /* Wait until target cpu is down */ 642 /* Wait until target cpu is down */
638 while (!smp_cpu_not_running(cpu)) 643 while (!cpu_stopped(cpu))
639 cpu_relax(); 644 cpu_relax();
640 smp_free_lowcore(cpu); 645 smp_free_lowcore(cpu);
641 pr_info("Processor %d stopped\n", cpu); 646 pr_info("Processor %d stopped\n", cpu);
diff --git a/arch/s390/power/swsusp.c b/arch/s390/kernel/suspend.c
index bd1f5c6b0b8c..086bee970cae 100644
--- a/arch/s390/power/swsusp.c
+++ b/arch/s390/kernel/suspend.c
@@ -1,13 +1,44 @@
1/* 1/*
2 * Support for suspend and resume on s390 2 * Suspend support specific for s390.
3 * 3 *
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009
5 * 5 *
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> 6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
7 *
8 */ 7 */
9 8
9#include <linux/suspend.h>
10#include <linux/reboot.h>
11#include <linux/pfn.h>
12#include <linux/mm.h>
13#include <asm/sections.h>
10#include <asm/system.h> 14#include <asm/system.h>
15#include <asm/ipl.h>
16
17/*
18 * References to section boundaries
19 */
20extern const void __nosave_begin, __nosave_end;
21
22/*
23 * check if given pfn is in the 'nosave' or in the read only NSS section
24 */
25int pfn_is_nosave(unsigned long pfn)
26{
27 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
28 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
29 >> PAGE_SHIFT;
30 unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
31 unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
32
33 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
34 return 1;
35 if (pfn >= stext_pfn && pfn <= eshared_pfn) {
36 if (ipl_info.type == IPL_TYPE_NSS)
37 return 1;
38 } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
39 return 1;
40 return 0;
41}
11 42
12void save_processor_state(void) 43void save_processor_state(void)
13{ 44{
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index b26df5c5933e..7cd6b096f0d1 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -21,7 +21,7 @@
21 * This function runs with disabled interrupts. 21 * This function runs with disabled interrupts.
22 */ 22 */
23 .section .text 23 .section .text
24 .align 2 24 .align 4
25 .globl swsusp_arch_suspend 25 .globl swsusp_arch_suspend
26swsusp_arch_suspend: 26swsusp_arch_suspend:
27 stmg %r6,%r15,__SF_GPRS(%r15) 27 stmg %r6,%r15,__SF_GPRS(%r15)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4c8e9c47c81..54e327e9af04 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -60,6 +60,7 @@
60#define TICK_SIZE tick 60#define TICK_SIZE tick
61 61
62u64 sched_clock_base_cc = -1; /* Force to data section. */ 62u64 sched_clock_base_cc = -1; /* Force to data section. */
63EXPORT_SYMBOL_GPL(sched_clock_base_cc);
63 64
64static DEFINE_PER_CPU(struct clock_event_device, comparators); 65static DEFINE_PER_CPU(struct clock_event_device, comparators);
65 66
@@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
68 */ 69 */
69unsigned long long notrace sched_clock(void) 70unsigned long long notrace sched_clock(void)
70{ 71{
71 return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; 72 return (get_clock_monotonic() * 125) >> 9;
72} 73}
73 74
74/* 75/*
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index a53db23ee092..7315f9e67e1d 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -52,55 +52,18 @@ SECTIONS
52 . = ALIGN(PAGE_SIZE); 52 . = ALIGN(PAGE_SIZE);
53 _eshared = .; /* End of shareable data */ 53 _eshared = .; /* End of shareable data */
54 54
55 . = ALIGN(16); /* Exception table */ 55 EXCEPTION_TABLE(16) :data
56 __ex_table : {
57 __start___ex_table = .;
58 *(__ex_table)
59 __stop___ex_table = .;
60 } :data
61
62 .data : { /* Data */
63 DATA_DATA
64 CONSTRUCTORS
65 }
66
67 . = ALIGN(PAGE_SIZE);
68 .data_nosave : {
69 __nosave_begin = .;
70 *(.data.nosave)
71 }
72 . = ALIGN(PAGE_SIZE);
73 __nosave_end = .;
74
75 . = ALIGN(PAGE_SIZE);
76 .data.page_aligned : {
77 *(.data.idt)
78 }
79 56
80 . = ALIGN(0x100); 57 RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
81 .data.cacheline_aligned : {
82 *(.data.cacheline_aligned)
83 }
84 58
85 . = ALIGN(0x100);
86 .data.read_mostly : {
87 *(.data.read_mostly)
88 }
89 _edata = .; /* End of data section */ 59 _edata = .; /* End of data section */
90 60
91 . = ALIGN(THREAD_SIZE); /* init_task */
92 .data.init_task : {
93 *(.data.init_task)
94 }
95
96 /* will be freed after init */ 61 /* will be freed after init */
97 . = ALIGN(PAGE_SIZE); /* Init code and data */ 62 . = ALIGN(PAGE_SIZE); /* Init code and data */
98 __init_begin = .; 63 __init_begin = .;
99 .init.text : { 64
100 _sinittext = .; 65 INIT_TEXT_SECTION(PAGE_SIZE)
101 INIT_TEXT 66
102 _einittext = .;
103 }
104 /* 67 /*
105 * .exit.text is discarded at runtime, not link time, 68 * .exit.text is discarded at runtime, not link time,
106 * to deal with references from __bug_table 69 * to deal with references from __bug_table
@@ -111,49 +74,13 @@ SECTIONS
111 74
112 /* early.c uses stsi, which requires page aligned data. */ 75 /* early.c uses stsi, which requires page aligned data. */
113 . = ALIGN(PAGE_SIZE); 76 . = ALIGN(PAGE_SIZE);
114 .init.data : { 77 INIT_DATA_SECTION(0x100)
115 INIT_DATA
116 }
117 . = ALIGN(0x100);
118 .init.setup : {
119 __setup_start = .;
120 *(.init.setup)
121 __setup_end = .;
122 }
123 .initcall.init : {
124 __initcall_start = .;
125 INITCALLS
126 __initcall_end = .;
127 }
128
129 .con_initcall.init : {
130 __con_initcall_start = .;
131 *(.con_initcall.init)
132 __con_initcall_end = .;
133 }
134 SECURITY_INIT
135
136#ifdef CONFIG_BLK_DEV_INITRD
137 . = ALIGN(0x100);
138 .init.ramfs : {
139 __initramfs_start = .;
140 *(.init.ramfs)
141 . = ALIGN(2);
142 __initramfs_end = .;
143 }
144#endif
145 78
146 PERCPU(PAGE_SIZE) 79 PERCPU(PAGE_SIZE)
147 . = ALIGN(PAGE_SIZE); 80 . = ALIGN(PAGE_SIZE);
148 __init_end = .; /* freed after init ends here */ 81 __init_end = .; /* freed after init ends here */
149 82
150 /* BSS */ 83 BSS_SECTION(0, 2, 0)
151 .bss : {
152 __bss_start = .;
153 *(.bss)
154 . = ALIGN(2);
155 __bss_stop = .;
156 }
157 84
158 _end = . ; 85 _end = . ;
159 86
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index db05661ac895..eec054484419 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
6 page-states.o
6obj-$(CONFIG_CMM) += cmm.o 7obj-$(CONFIG_CMM) += cmm.o
7obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
8obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e5e119fe03b2..1abbadd497e1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -10,6 +10,7 @@
10 * Copyright (C) 1995 Linus Torvalds 10 * Copyright (C) 1995 Linus Torvalds
11 */ 11 */
12 12
13#include <linux/perf_counter.h>
13#include <linux/signal.h> 14#include <linux/signal.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write)
305 * interrupts again and then search the VMAs 306 * interrupts again and then search the VMAs
306 */ 307 */
307 local_irq_enable(); 308 local_irq_enable();
308 309 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
309 down_read(&mm->mmap_sem); 310 down_read(&mm->mmap_sem);
310 311
311 si_code = SEGV_MAPERR; 312 si_code = SEGV_MAPERR;
@@ -363,11 +364,15 @@ good_area:
363 } 364 }
364 BUG(); 365 BUG();
365 } 366 }
366 if (fault & VM_FAULT_MAJOR) 367 if (fault & VM_FAULT_MAJOR) {
367 tsk->maj_flt++; 368 tsk->maj_flt++;
368 else 369 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
370 regs, address);
371 } else {
369 tsk->min_flt++; 372 tsk->min_flt++;
370 373 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
374 regs, address);
375 }
371 up_read(&mm->mmap_sem); 376 up_read(&mm->mmap_sem);
372 /* 377 /*
373 * The instruction that caused the program check will 378 * The instruction that caused the program check will
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index fc0ad73ffd90..f92ec203ad92 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/s390/mm/page-states.c
3 *
4 * Copyright IBM Corp. 2008 2 * Copyright IBM Corp. 2008
5 * 3 *
6 * Guest page hinting for unused pages. 4 * Guest page hinting for unused pages.
@@ -17,11 +15,12 @@
17#define ESSA_SET_STABLE 1 15#define ESSA_SET_STABLE 1
18#define ESSA_SET_UNUSED 2 16#define ESSA_SET_UNUSED 2
19 17
20static int cmma_flag; 18static int cmma_flag = 1;
21 19
22static int __init cmma(char *str) 20static int __init cmma(char *str)
23{ 21{
24 char *parm; 22 char *parm;
23
25 parm = strstrip(str); 24 parm = strstrip(str);
26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { 25 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
27 cmma_flag = 1; 26 cmma_flag = 1;
@@ -32,7 +31,6 @@ static int __init cmma(char *str)
32 return 1; 31 return 1;
33 return 0; 32 return 0;
34} 33}
35
36__setup("cmma=", cmma); 34__setup("cmma=", cmma);
37 35
38void __init cmma_init(void) 36void __init cmma_init(void)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 565667207985..c70215247071 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
78 } 78 }
79 page->index = page_to_phys(shadow); 79 page->index = page_to_phys(shadow);
80 } 80 }
81 spin_lock(&mm->page_table_lock); 81 spin_lock(&mm->context.list_lock);
82 list_add(&page->lru, &mm->context.crst_list); 82 list_add(&page->lru, &mm->context.crst_list);
83 spin_unlock(&mm->page_table_lock); 83 spin_unlock(&mm->context.list_lock);
84 return (unsigned long *) page_to_phys(page); 84 return (unsigned long *) page_to_phys(page);
85} 85}
86 86
@@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
89 unsigned long *shadow = get_shadow_table(table); 89 unsigned long *shadow = get_shadow_table(table);
90 struct page *page = virt_to_page(table); 90 struct page *page = virt_to_page(table);
91 91
92 spin_lock(&mm->page_table_lock); 92 spin_lock(&mm->context.list_lock);
93 list_del(&page->lru); 93 list_del(&page->lru);
94 spin_unlock(&mm->page_table_lock); 94 spin_unlock(&mm->context.list_lock);
95 if (shadow) 95 if (shadow)
96 free_pages((unsigned long) shadow, ALLOC_ORDER); 96 free_pages((unsigned long) shadow, ALLOC_ORDER);
97 free_pages((unsigned long) table, ALLOC_ORDER); 97 free_pages((unsigned long) table, ALLOC_ORDER);
@@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
182 unsigned long bits; 182 unsigned long bits;
183 183
184 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 184 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
185 spin_lock(&mm->page_table_lock); 185 spin_lock(&mm->context.list_lock);
186 page = NULL; 186 page = NULL;
187 if (!list_empty(&mm->context.pgtable_list)) { 187 if (!list_empty(&mm->context.pgtable_list)) {
188 page = list_first_entry(&mm->context.pgtable_list, 188 page = list_first_entry(&mm->context.pgtable_list,
@@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
191 page = NULL; 191 page = NULL;
192 } 192 }
193 if (!page) { 193 if (!page) {
194 spin_unlock(&mm->page_table_lock); 194 spin_unlock(&mm->context.list_lock);
195 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 195 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
196 if (!page) 196 if (!page)
197 return NULL; 197 return NULL;
@@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
202 clear_table_pgstes(table); 202 clear_table_pgstes(table);
203 else 203 else
204 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 204 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
205 spin_lock(&mm->page_table_lock); 205 spin_lock(&mm->context.list_lock);
206 list_add(&page->lru, &mm->context.pgtable_list); 206 list_add(&page->lru, &mm->context.pgtable_list);
207 } 207 }
208 table = (unsigned long *) page_to_phys(page); 208 table = (unsigned long *) page_to_phys(page);
@@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
213 page->flags |= bits; 213 page->flags |= bits;
214 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 214 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
215 list_move_tail(&page->lru, &mm->context.pgtable_list); 215 list_move_tail(&page->lru, &mm->context.pgtable_list);
216 spin_unlock(&mm->page_table_lock); 216 spin_unlock(&mm->context.list_lock);
217 return table; 217 return table;
218} 218}
219 219
@@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
225 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; 225 bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL;
226 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 226 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
227 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 227 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
228 spin_lock(&mm->page_table_lock); 228 spin_lock(&mm->context.list_lock);
229 page->flags ^= bits; 229 page->flags ^= bits;
230 if (page->flags & FRAG_MASK) { 230 if (page->flags & FRAG_MASK) {
231 /* Page now has some free pgtable fragments. */ 231 /* Page now has some free pgtable fragments. */
@@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
234 } else 234 } else
235 /* All fragments of the 4K page have been freed. */ 235 /* All fragments of the 4K page have been freed. */
236 list_del(&page->lru); 236 list_del(&page->lru);
237 spin_unlock(&mm->page_table_lock); 237 spin_unlock(&mm->context.list_lock);
238 if (page) { 238 if (page) {
239 pgtable_page_dtor(page); 239 pgtable_page_dtor(page);
240 __free_page(page); 240 __free_page(page);
@@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
245{ 245{
246 struct page *page; 246 struct page *page;
247 247
248 spin_lock(&mm->page_table_lock); 248 spin_lock(&mm->context.list_lock);
249 /* Free shadow region and segment tables. */ 249 /* Free shadow region and segment tables. */
250 list_for_each_entry(page, &mm->context.crst_list, lru) 250 list_for_each_entry(page, &mm->context.crst_list, lru)
251 if (page->index) { 251 if (page->index) {
@@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
255 /* "Free" second halves of page tables. */ 255 /* "Free" second halves of page tables. */
256 list_for_each_entry(page, &mm->context.pgtable_list, lru) 256 list_for_each_entry(page, &mm->context.pgtable_list, lru)
257 page->flags &= ~SECOND_HALVES; 257 page->flags &= ~SECOND_HALVES;
258 spin_unlock(&mm->page_table_lock); 258 spin_unlock(&mm->context.list_lock);
259 mm->context.noexec = 0; 259 mm->context.noexec = 0;
260 update_mm(mm, tsk); 260 update_mm(mm, tsk);
261} 261}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index e4868bfc672f..5f91a38d7592 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -331,6 +331,7 @@ void __init vmem_map_init(void)
331 unsigned long start, end; 331 unsigned long start, end;
332 int i; 332 int i;
333 333
334 spin_lock_init(&init_mm.context.list_lock);
334 INIT_LIST_HEAD(&init_mm.context.crst_list); 335 INIT_LIST_HEAD(&init_mm.context.crst_list);
335 INIT_LIST_HEAD(&init_mm.context.pgtable_list); 336 INIT_LIST_HEAD(&init_mm.context.pgtable_list);
336 init_mm.context.noexec = 0; 337 init_mm.context.noexec = 0;
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
deleted file mode 100644
index 973bb45a8fec..000000000000
--- a/arch/s390/power/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for s390 PM support
3#
4
5obj-$(CONFIG_HIBERNATION) += suspend.o
6obj-$(CONFIG_HIBERNATION) += swsusp.o
7obj-$(CONFIG_HIBERNATION) += swsusp_64.o
8obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
deleted file mode 100644
index b3351eceebbe..000000000000
--- a/arch/s390/power/suspend.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * Suspend support specific for s390.
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
7 */
8
9#include <linux/mm.h>
10#include <linux/suspend.h>
11#include <linux/reboot.h>
12#include <linux/pfn.h>
13#include <asm/sections.h>
14#include <asm/ipl.h>
15
16/*
17 * References to section boundaries
18 */
19extern const void __nosave_begin, __nosave_end;
20
21/*
22 * check if given pfn is in the 'nosave' or in the read only NSS section
23 */
24int pfn_is_nosave(unsigned long pfn)
25{
26 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
27 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
28 >> PAGE_SHIFT;
29 unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
30 unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
31
32 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
33 return 1;
34 if (pfn >= stext_pfn && pfn <= eshared_pfn) {
35 if (ipl_info.type == IPL_TYPE_NSS)
36 return 1;
37 } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
38 return 1;
39 return 0;
40}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
deleted file mode 100644
index 9516a517d72f..000000000000
--- a/arch/s390/power/swsusp_64.c
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Support for suspend and resume on s390
3 *
4 * Copyright IBM Corp. 2009
5 *
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
7 *
8 */
9
10#include <asm/system.h>
11#include <linux/interrupt.h>
12
13void do_after_copyback(void)
14{
15 mb();
16}
17
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index b5afbec1db59..04a21883f327 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -640,5 +640,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
640 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 640 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
641 clear_thread_flag(TIF_NOTIFY_RESUME); 641 clear_thread_flag(TIF_NOTIFY_RESUME);
642 tracehook_notify_resume(regs); 642 tracehook_notify_resume(regs);
643 if (current->replacement_session_keyring)
644 key_replace_session_keyring();
643 } 645 }
644} 646}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index 0663a0ee6021..9e5c9b1d7e98 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -772,5 +772,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
772 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 772 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
773 clear_thread_flag(TIF_NOTIFY_RESUME); 773 clear_thread_flag(TIF_NOTIFY_RESUME);
774 tracehook_notify_resume(regs); 774 tracehook_notify_resume(regs);
775 if (current->replacement_session_keyring)
776 key_replace_session_keyring();
775 } 777 }
776} 778}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2aa7cd39b481..2bd5c287538a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,6 +26,8 @@ config SPARC
26 select RTC_CLASS 26 select RTC_CLASS
27 select RTC_DRV_M48T59 27 select RTC_DRV_M48T59
28 select HAVE_PERF_COUNTERS 28 select HAVE_PERF_COUNTERS
29 select HAVE_DMA_ATTRS
30 select HAVE_DMA_API_DEBUG
29 31
30config SPARC32 32config SPARC32
31 def_bool !64BIT 33 def_bool !64BIT
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 204e4bf64438..5a8c308e2b5c 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/scatterlist.h> 4#include <linux/scatterlist.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/dma-debug.h>
6 7
7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
8 9
@@ -13,142 +14,40 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 14#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14#define dma_is_consistent(d, h) (1) 15#define dma_is_consistent(d, h) (1)
15 16
16struct dma_ops { 17extern struct dma_map_ops *dma_ops, pci32_dma_ops;
17 void *(*alloc_coherent)(struct device *dev, size_t size, 18extern struct bus_type pci_bus_type;
18 dma_addr_t *dma_handle, gfp_t flag);
19 void (*free_coherent)(struct device *dev, size_t size,
20 void *cpu_addr, dma_addr_t dma_handle);
21 dma_addr_t (*map_page)(struct device *dev, struct page *page,
22 unsigned long offset, size_t size,
23 enum dma_data_direction direction);
24 void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
25 size_t size,
26 enum dma_data_direction direction);
27 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
28 enum dma_data_direction direction);
29 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction);
32 void (*sync_single_for_cpu)(struct device *dev,
33 dma_addr_t dma_handle, size_t size,
34 enum dma_data_direction direction);
35 void (*sync_single_for_device)(struct device *dev,
36 dma_addr_t dma_handle, size_t size,
37 enum dma_data_direction direction);
38 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
39 int nelems,
40 enum dma_data_direction direction);
41 void (*sync_sg_for_device)(struct device *dev,
42 struct scatterlist *sg, int nents,
43 enum dma_data_direction dir);
44};
45extern const struct dma_ops *dma_ops;
46 19
47static inline void *dma_alloc_coherent(struct device *dev, size_t size, 20static inline struct dma_map_ops *get_dma_ops(struct device *dev)
48 dma_addr_t *dma_handle, gfp_t flag)
49{
50 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
51}
52
53static inline void dma_free_coherent(struct device *dev, size_t size,
54 void *cpu_addr, dma_addr_t dma_handle)
55{
56 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
57}
58
59static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
60 size_t size,
61 enum dma_data_direction direction)
62{
63 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
64 (unsigned long)cpu_addr & ~PAGE_MASK, size,
65 direction);
66}
67
68static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
69 size_t size,
70 enum dma_data_direction direction)
71{
72 dma_ops->unmap_page(dev, dma_addr, size, direction);
73}
74
75static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
76 unsigned long offset, size_t size,
77 enum dma_data_direction direction)
78{
79 return dma_ops->map_page(dev, page, offset, size, direction);
80}
81
82static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
83 size_t size,
84 enum dma_data_direction direction)
85{
86 dma_ops->unmap_page(dev, dma_address, size, direction);
87}
88
89static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
90 int nents, enum dma_data_direction direction)
91{
92 return dma_ops->map_sg(dev, sg, nents, direction);
93}
94
95static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 int nents, enum dma_data_direction direction)
97{ 21{
98 dma_ops->unmap_sg(dev, sg, nents, direction); 22#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
99} 23 if (dev->bus == &pci_bus_type)
100 24 return &pci32_dma_ops;
101static inline void dma_sync_single_for_cpu(struct device *dev, 25#endif
102 dma_addr_t dma_handle, size_t size, 26 return dma_ops;
103 enum dma_data_direction direction)
104{
105 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
106} 27}
107 28
108static inline void dma_sync_single_for_device(struct device *dev, 29#include <asm-generic/dma-mapping-common.h>
109 dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 if (dma_ops->sync_single_for_device)
114 dma_ops->sync_single_for_device(dev, dma_handle, size,
115 direction);
116}
117 30
118static inline void dma_sync_sg_for_cpu(struct device *dev, 31static inline void *dma_alloc_coherent(struct device *dev, size_t size,
119 struct scatterlist *sg, int nelems, 32 dma_addr_t *dma_handle, gfp_t flag)
120 enum dma_data_direction direction)
121{ 33{
122 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 34 struct dma_map_ops *ops = get_dma_ops(dev);
123} 35 void *cpu_addr;
124 36
125static inline void dma_sync_sg_for_device(struct device *dev, 37 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
126 struct scatterlist *sg, int nelems, 38 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
127 enum dma_data_direction direction) 39 return cpu_addr;
128{
129 if (dma_ops->sync_sg_for_device)
130 dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
131} 40}
132 41
133static inline void dma_sync_single_range_for_cpu(struct device *dev, 42static inline void dma_free_coherent(struct device *dev, size_t size,
134 dma_addr_t dma_handle, 43 void *cpu_addr, dma_addr_t dma_handle)
135 unsigned long offset,
136 size_t size,
137 enum dma_data_direction dir)
138{ 44{
139 dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir); 45 struct dma_map_ops *ops = get_dma_ops(dev);
140}
141 46
142static inline void dma_sync_single_range_for_device(struct device *dev, 47 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
143 dma_addr_t dma_handle, 48 ops->free_coherent(dev, size, cpu_addr, dma_handle);
144 unsigned long offset,
145 size_t size,
146 enum dma_data_direction dir)
147{
148 dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
149} 49}
150 50
151
152static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 51static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
153{ 52{
154 return (dma_addr == DMA_ERROR_CODE); 53 return (dma_addr == DMA_ERROR_CODE);
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 1934f2cbf513..a0b443cb3c1f 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,8 +89,8 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void __trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(void);
93#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
96extern void *softirq_stack[NR_CPUS]; 96extern void *softirq_stack[NR_CPUS];
diff --git a/arch/sparc/include/asm/pci.h b/arch/sparc/include/asm/pci.h
index 6e14fd179335..d9c031f9910f 100644
--- a/arch/sparc/include/asm/pci.h
+++ b/arch/sparc/include/asm/pci.h
@@ -5,4 +5,7 @@
5#else 5#else
6#include <asm/pci_32.h> 6#include <asm/pci_32.h>
7#endif 7#endif
8
9#include <asm-generic/pci-dma-compat.h>
10
8#endif 11#endif
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index b41c4c198159..ac0e8369fd97 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -31,42 +31,8 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
31 */ 31 */
32#define PCI_DMA_BUS_IS_PHYS (0) 32#define PCI_DMA_BUS_IS_PHYS (0)
33 33
34#include <asm/scatterlist.h>
35
36struct pci_dev; 34struct pci_dev;
37 35
38/* Allocate and map kernel buffer using consistent mode DMA for a device.
39 * hwdev should be valid struct pci_dev pointer for PCI devices.
40 */
41extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
42
43/* Free and unmap a consistent DMA buffer.
44 * cpu_addr is what was returned from pci_alloc_consistent,
45 * size must be the same as what as passed into pci_alloc_consistent,
46 * and likewise dma_addr must be the same as what *dma_addrp was set to.
47 *
48 * References to the memory and mappings assosciated with cpu_addr/dma_addr
49 * past this call are illegal.
50 */
51extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
52
53/* Map a single buffer of the indicated size for DMA in streaming mode.
54 * The 32-bit bus address to use is returned.
55 *
56 * Once the device is given the dma address, the device owns this memory
57 * until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
58 */
59extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
60
61/* Unmap a single streaming mode DMA translation. The dma_addr and size
62 * must match what was provided for in a previous pci_map_single call. All
63 * other usages are undefined.
64 *
65 * After this call, reads by the cpu to the buffer are guaranteed to see
66 * whatever the device wrote there.
67 */
68extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
69
70/* pci_unmap_{single,page} is not a nop, thus... */ 36/* pci_unmap_{single,page} is not a nop, thus... */
71#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 37#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
72 dma_addr_t ADDR_NAME; 38 dma_addr_t ADDR_NAME;
@@ -81,69 +47,6 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
81#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 47#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
82 (((PTR)->LEN_NAME) = (VAL)) 48 (((PTR)->LEN_NAME) = (VAL))
83 49
84/*
85 * Same as above, only with pages instead of mapped addresses.
86 */
87extern dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
88 unsigned long offset, size_t size, int direction);
89extern void pci_unmap_page(struct pci_dev *hwdev,
90 dma_addr_t dma_address, size_t size, int direction);
91
92/* Map a set of buffers described by scatterlist in streaming
93 * mode for DMA. This is the scather-gather version of the
94 * above pci_map_single interface. Here the scatter gather list
95 * elements are each tagged with the appropriate dma address
96 * and length. They are obtained via sg_dma_{address,length}(SG).
97 *
98 * NOTE: An implementation may be able to use a smaller number of
99 * DMA address/length pairs than there are SG table elements.
100 * (for example via virtual mapping capabilities)
101 * The routine returns the number of addr/length pairs actually
102 * used, at most nents.
103 *
104 * Device ownership issues as mentioned above for pci_map_single are
105 * the same here.
106 */
107extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
108
109/* Unmap a set of streaming mode DMA translations.
110 * Again, cpu read rules concerning calls here are the same as for
111 * pci_unmap_single() above.
112 */
113extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction);
114
115/* Make physical memory consistent for a single
116 * streaming mode DMA translation after a transfer.
117 *
118 * If you perform a pci_map_single() but wish to interrogate the
119 * buffer using the cpu, yet do not wish to teardown the PCI dma
120 * mapping, you must call this function before doing so. At the
121 * next point you give the PCI dma address back to the card, you
122 * must first perform a pci_dma_sync_for_device, and then the device
123 * again owns the buffer.
124 */
125extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
126extern void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction);
127
128/* Make physical memory consistent for a set of streaming
129 * mode DMA translations after a transfer.
130 *
131 * The same as pci_dma_sync_single_* but for a scatter-gather list,
132 * same rules and usage.
133 */
134extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
135extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
136
137/* Return whether the given PCI device DMA address mask can
138 * be supported properly. For example, if your device can
139 * only drive the low 24-bits during PCI bus mastering, then
140 * you would pass 0x00ffffff as the mask to this function.
141 */
142static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
143{
144 return 1;
145}
146
147#ifdef CONFIG_PCI 50#ifdef CONFIG_PCI
148static inline void pci_dma_burst_advice(struct pci_dev *pdev, 51static inline void pci_dma_burst_advice(struct pci_dev *pdev,
149 enum pci_dma_burst_strategy *strat, 52 enum pci_dma_burst_strategy *strat,
@@ -154,14 +57,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
154} 57}
155#endif 58#endif
156 59
157#define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0)
158
159static inline int pci_dma_mapping_error(struct pci_dev *pdev,
160 dma_addr_t dma_addr)
161{
162 return (dma_addr == PCI_DMA_ERROR_CODE);
163}
164
165struct device_node; 60struct device_node;
166extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev); 61extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
167 62
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 7a1e3566e59c..5cc9f6aa5494 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -35,37 +35,6 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
35 */ 35 */
36#define PCI_DMA_BUS_IS_PHYS (0) 36#define PCI_DMA_BUS_IS_PHYS (0)
37 37
38static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
39 dma_addr_t *dma_handle)
40{
41 return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC);
42}
43
44static inline void pci_free_consistent(struct pci_dev *pdev, size_t size,
45 void *vaddr, dma_addr_t dma_handle)
46{
47 return dma_free_coherent(&pdev->dev, size, vaddr, dma_handle);
48}
49
50static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr,
51 size_t size, int direction)
52{
53 return dma_map_single(&pdev->dev, ptr, size,
54 (enum dma_data_direction) direction);
55}
56
57static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
58 size_t size, int direction)
59{
60 dma_unmap_single(&pdev->dev, dma_addr, size,
61 (enum dma_data_direction) direction);
62}
63
64#define pci_map_page(dev, page, off, size, dir) \
65 pci_map_single(dev, (page_address(page) + (off)), size, dir)
66#define pci_unmap_page(dev,addr,sz,dir) \
67 pci_unmap_single(dev,addr,sz,dir)
68
69/* pci_unmap_{single,page} is not a nop, thus... */ 38/* pci_unmap_{single,page} is not a nop, thus... */
70#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ 39#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
71 dma_addr_t ADDR_NAME; 40 dma_addr_t ADDR_NAME;
@@ -80,57 +49,6 @@ static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr,
80#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ 49#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
81 (((PTR)->LEN_NAME) = (VAL)) 50 (((PTR)->LEN_NAME) = (VAL))
82 51
83static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg,
84 int nents, int direction)
85{
86 return dma_map_sg(&pdev->dev, sg, nents,
87 (enum dma_data_direction) direction);
88}
89
90static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg,
91 int nents, int direction)
92{
93 dma_unmap_sg(&pdev->dev, sg, nents,
94 (enum dma_data_direction) direction);
95}
96
97static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev,
98 dma_addr_t dma_handle,
99 size_t size, int direction)
100{
101 dma_sync_single_for_cpu(&pdev->dev, dma_handle, size,
102 (enum dma_data_direction) direction);
103}
104
105static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev,
106 dma_addr_t dma_handle,
107 size_t size, int direction)
108{
109 /* No flushing needed to sync cpu writes to the device. */
110}
111
112static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev,
113 struct scatterlist *sg,
114 int nents, int direction)
115{
116 dma_sync_sg_for_cpu(&pdev->dev, sg, nents,
117 (enum dma_data_direction) direction);
118}
119
120static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev,
121 struct scatterlist *sg,
122 int nelems, int direction)
123{
124 /* No flushing needed to sync cpu writes to the device. */
125}
126
127/* Return whether the given PCI device DMA address mask can
128 * be supported properly. For example, if your device can
129 * only drive the low 24-bits during PCI bus mastering, then
130 * you would pass 0x00ffffff as the mask to this function.
131 */
132extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
133
134/* PCI IOMMU mapping bypass support. */ 52/* PCI IOMMU mapping bypass support. */
135 53
136/* PCI 64-bit addressing works for all slots on all controller 54/* PCI 64-bit addressing works for all slots on all controller
@@ -140,12 +58,6 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
140#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 58#define PCI64_REQUIRED_MASK (~(dma64_addr_t)0)
141#define PCI64_ADDR_BASE 0xfffc000000000000UL 59#define PCI64_ADDR_BASE 0xfffc000000000000UL
142 60
143static inline int pci_dma_mapping_error(struct pci_dev *pdev,
144 dma_addr_t dma_addr)
145{
146 return dma_mapping_error(&pdev->dev, dma_addr);
147}
148
149#ifdef CONFIG_PCI 61#ifdef CONFIG_PCI
150static inline void pci_dma_burst_advice(struct pci_dev *pdev, 62static inline void pci_dma_burst_advice(struct pci_dev *pdev,
151 enum pci_dma_burst_strategy *strat, 63 enum pci_dma_burst_strategy *strat,
diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h
index 46f91ab66a50..857630cff636 100644
--- a/arch/sparc/include/asm/spinlock_32.h
+++ b/arch/sparc/include/asm/spinlock_32.h
@@ -76,7 +76,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 * 76 *
77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 77 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 */ 78 */
79static inline void __read_lock(raw_rwlock_t *rw) 79static inline void arch_read_lock(raw_rwlock_t *rw)
80{ 80{
81 register raw_rwlock_t *lp asm("g1"); 81 register raw_rwlock_t *lp asm("g1");
82 lp = rw; 82 lp = rw;
@@ -92,11 +92,11 @@ static inline void __read_lock(raw_rwlock_t *rw)
92#define __raw_read_lock(lock) \ 92#define __raw_read_lock(lock) \
93do { unsigned long flags; \ 93do { unsigned long flags; \
94 local_irq_save(flags); \ 94 local_irq_save(flags); \
95 __read_lock(lock); \ 95 arch_read_lock(lock); \
96 local_irq_restore(flags); \ 96 local_irq_restore(flags); \
97} while(0) 97} while(0)
98 98
99static inline void __read_unlock(raw_rwlock_t *rw) 99static inline void arch_read_unlock(raw_rwlock_t *rw)
100{ 100{
101 register raw_rwlock_t *lp asm("g1"); 101 register raw_rwlock_t *lp asm("g1");
102 lp = rw; 102 lp = rw;
@@ -112,7 +112,7 @@ static inline void __read_unlock(raw_rwlock_t *rw)
112#define __raw_read_unlock(lock) \ 112#define __raw_read_unlock(lock) \
113do { unsigned long flags; \ 113do { unsigned long flags; \
114 local_irq_save(flags); \ 114 local_irq_save(flags); \
115 __read_unlock(lock); \ 115 arch_read_unlock(lock); \
116 local_irq_restore(flags); \ 116 local_irq_restore(flags); \
117} while(0) 117} while(0)
118 118
@@ -150,7 +150,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
150 return (val == 0); 150 return (val == 0);
151} 151}
152 152
153static inline int __read_trylock(raw_rwlock_t *rw) 153static inline int arch_read_trylock(raw_rwlock_t *rw)
154{ 154{
155 register raw_rwlock_t *lp asm("g1"); 155 register raw_rwlock_t *lp asm("g1");
156 register int res asm("o0"); 156 register int res asm("o0");
@@ -169,7 +169,7 @@ static inline int __read_trylock(raw_rwlock_t *rw)
169({ unsigned long flags; \ 169({ unsigned long flags; \
170 int res; \ 170 int res; \
171 local_irq_save(flags); \ 171 local_irq_save(flags); \
172 res = __read_trylock(lock); \ 172 res = arch_read_trylock(lock); \
173 local_irq_restore(flags); \ 173 local_irq_restore(flags); \
174 res; \ 174 res; \
175}) 175})
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index f6b2b92ad8d2..43e514783582 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
92 92
93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 93/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
94 94
95static void inline __read_lock(raw_rwlock_t *lock) 95static void inline arch_read_lock(raw_rwlock_t *lock)
96{ 96{
97 unsigned long tmp1, tmp2; 97 unsigned long tmp1, tmp2;
98 98
@@ -115,7 +115,7 @@ static void inline __read_lock(raw_rwlock_t *lock)
115 : "memory"); 115 : "memory");
116} 116}
117 117
118static int inline __read_trylock(raw_rwlock_t *lock) 118static int inline arch_read_trylock(raw_rwlock_t *lock)
119{ 119{
120 int tmp1, tmp2; 120 int tmp1, tmp2;
121 121
@@ -136,7 +136,7 @@ static int inline __read_trylock(raw_rwlock_t *lock)
136 return tmp1; 136 return tmp1;
137} 137}
138 138
139static void inline __read_unlock(raw_rwlock_t *lock) 139static void inline arch_read_unlock(raw_rwlock_t *lock)
140{ 140{
141 unsigned long tmp1, tmp2; 141 unsigned long tmp1, tmp2;
142 142
@@ -152,7 +152,7 @@ static void inline __read_unlock(raw_rwlock_t *lock)
152 : "memory"); 152 : "memory");
153} 153}
154 154
155static void inline __write_lock(raw_rwlock_t *lock) 155static void inline arch_write_lock(raw_rwlock_t *lock)
156{ 156{
157 unsigned long mask, tmp1, tmp2; 157 unsigned long mask, tmp1, tmp2;
158 158
@@ -177,7 +177,7 @@ static void inline __write_lock(raw_rwlock_t *lock)
177 : "memory"); 177 : "memory");
178} 178}
179 179
180static void inline __write_unlock(raw_rwlock_t *lock) 180static void inline arch_write_unlock(raw_rwlock_t *lock)
181{ 181{
182 __asm__ __volatile__( 182 __asm__ __volatile__(
183" stw %%g0, [%0]" 183" stw %%g0, [%0]"
@@ -186,7 +186,7 @@ static void inline __write_unlock(raw_rwlock_t *lock)
186 : "memory"); 186 : "memory");
187} 187}
188 188
189static int inline __write_trylock(raw_rwlock_t *lock) 189static int inline arch_write_trylock(raw_rwlock_t *lock)
190{ 190{
191 unsigned long mask, tmp1, tmp2, result; 191 unsigned long mask, tmp1, tmp2, result;
192 192
@@ -210,14 +210,14 @@ static int inline __write_trylock(raw_rwlock_t *lock)
210 return result; 210 return result;
211} 211}
212 212
213#define __raw_read_lock(p) __read_lock(p) 213#define __raw_read_lock(p) arch_read_lock(p)
214#define __raw_read_lock_flags(p, f) __read_lock(p) 214#define __raw_read_lock_flags(p, f) arch_read_lock(p)
215#define __raw_read_trylock(p) __read_trylock(p) 215#define __raw_read_trylock(p) arch_read_trylock(p)
216#define __raw_read_unlock(p) __read_unlock(p) 216#define __raw_read_unlock(p) arch_read_unlock(p)
217#define __raw_write_lock(p) __write_lock(p) 217#define __raw_write_lock(p) arch_write_lock(p)
218#define __raw_write_lock_flags(p, f) __write_lock(p) 218#define __raw_write_lock_flags(p, f) arch_write_lock(p)
219#define __raw_write_unlock(p) __write_unlock(p) 219#define __raw_write_unlock(p) arch_write_unlock(p)
220#define __raw_write_trylock(p) __write_trylock(p) 220#define __raw_write_trylock(p) arch_write_trylock(p)
221 221
222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 222#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
223#define __raw_write_can_lock(rw) (!(rw)->lock) 223#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index f96dc5761f74..247cc620cee5 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -63,7 +63,7 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
63obj-$(CONFIG_SPARC32) += devres.o 63obj-$(CONFIG_SPARC32) += devres.o
64devres-y := ../../../kernel/irq/devres.o 64devres-y := ../../../kernel/irq/devres.o
65 65
66obj-$(CONFIG_SPARC32) += dma.o 66obj-y += dma.o
67 67
68obj-$(CONFIG_SPARC32_PCI) += pcic.o 68obj-$(CONFIG_SPARC32_PCI) += pcic.o
69 69
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index 524c32f97c55..e1ba8ee21b9a 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -1,178 +1,13 @@
1/* dma.c: PCI and SBUS DMA accessors for 32-bit sparc.
2 *
3 * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h> 1#include <linux/kernel.h>
7#include <linux/module.h> 2#include <linux/module.h>
8#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h> 4#include <linux/dma-debug.h>
10#include <linux/mm.h>
11
12#ifdef CONFIG_PCI
13#include <linux/pci.h>
14#endif
15 5
16#include "dma.h" 6#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
17 7
18int dma_supported(struct device *dev, u64 mask) 8static int __init dma_init(void)
19{ 9{
20#ifdef CONFIG_PCI 10 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
21 if (dev->bus == &pci_bus_type)
22 return pci_dma_supported(to_pci_dev(dev), mask);
23#endif
24 return 0; 11 return 0;
25} 12}
26EXPORT_SYMBOL(dma_supported); 13fs_initcall(dma_init);
27
28int dma_set_mask(struct device *dev, u64 dma_mask)
29{
30#ifdef CONFIG_PCI
31 if (dev->bus == &pci_bus_type)
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33#endif
34 return -EOPNOTSUPP;
35}
36EXPORT_SYMBOL(dma_set_mask);
37
38static void *dma32_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flag)
40{
41#ifdef CONFIG_PCI
42 if (dev->bus == &pci_bus_type)
43 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
44#endif
45 return sbus_alloc_consistent(dev, size, dma_handle);
46}
47
48static void dma32_free_coherent(struct device *dev, size_t size,
49 void *cpu_addr, dma_addr_t dma_handle)
50{
51#ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type) {
53 pci_free_consistent(to_pci_dev(dev), size,
54 cpu_addr, dma_handle);
55 return;
56 }
57#endif
58 sbus_free_consistent(dev, size, cpu_addr, dma_handle);
59}
60
61static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
62 unsigned long offset, size_t size,
63 enum dma_data_direction direction)
64{
65#ifdef CONFIG_PCI
66 if (dev->bus == &pci_bus_type)
67 return pci_map_page(to_pci_dev(dev), page, offset,
68 size, (int)direction);
69#endif
70 return sbus_map_single(dev, page_address(page) + offset,
71 size, (int)direction);
72}
73
74static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
75 size_t size, enum dma_data_direction direction)
76{
77#ifdef CONFIG_PCI
78 if (dev->bus == &pci_bus_type) {
79 pci_unmap_page(to_pci_dev(dev), dma_address,
80 size, (int)direction);
81 return;
82 }
83#endif
84 sbus_unmap_single(dev, dma_address, size, (int)direction);
85}
86
87static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
88 int nents, enum dma_data_direction direction)
89{
90#ifdef CONFIG_PCI
91 if (dev->bus == &pci_bus_type)
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93#endif
94 return sbus_map_sg(dev, sg, nents, direction);
95}
96
97void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
98 int nents, enum dma_data_direction direction)
99{
100#ifdef CONFIG_PCI
101 if (dev->bus == &pci_bus_type) {
102 pci_unmap_sg(to_pci_dev(dev), sg, nents, (int)direction);
103 return;
104 }
105#endif
106 sbus_unmap_sg(dev, sg, nents, (int)direction);
107}
108
109static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
110 size_t size,
111 enum dma_data_direction direction)
112{
113#ifdef CONFIG_PCI
114 if (dev->bus == &pci_bus_type) {
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117 return;
118 }
119#endif
120 sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
121}
122
123static void dma32_sync_single_for_device(struct device *dev,
124 dma_addr_t dma_handle, size_t size,
125 enum dma_data_direction direction)
126{
127#ifdef CONFIG_PCI
128 if (dev->bus == &pci_bus_type) {
129 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
130 size, (int)direction);
131 return;
132 }
133#endif
134 sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
135}
136
137static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
138 int nelems, enum dma_data_direction direction)
139{
140#ifdef CONFIG_PCI
141 if (dev->bus == &pci_bus_type) {
142 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg,
143 nelems, (int)direction);
144 return;
145 }
146#endif
147 BUG();
148}
149
150static void dma32_sync_sg_for_device(struct device *dev,
151 struct scatterlist *sg, int nelems,
152 enum dma_data_direction direction)
153{
154#ifdef CONFIG_PCI
155 if (dev->bus == &pci_bus_type) {
156 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg,
157 nelems, (int)direction);
158 return;
159 }
160#endif
161 BUG();
162}
163
164static const struct dma_ops dma32_dma_ops = {
165 .alloc_coherent = dma32_alloc_coherent,
166 .free_coherent = dma32_free_coherent,
167 .map_page = dma32_map_page,
168 .unmap_page = dma32_unmap_page,
169 .map_sg = dma32_map_sg,
170 .unmap_sg = dma32_unmap_sg,
171 .sync_single_for_cpu = dma32_sync_single_for_cpu,
172 .sync_single_for_device = dma32_sync_single_for_device,
173 .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
174 .sync_sg_for_device = dma32_sync_sg_for_device,
175};
176
177const struct dma_ops *dma_ops = &dma32_dma_ops;
178EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/dma.h b/arch/sparc/kernel/dma.h
deleted file mode 100644
index f8d8951adb53..000000000000
--- a/arch/sparc/kernel/dma.h
+++ /dev/null
@@ -1,14 +0,0 @@
1void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp);
2void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba);
3dma_addr_t sbus_map_single(struct device *dev, void *va,
4 size_t len, int direction);
5void sbus_unmap_single(struct device *dev, dma_addr_t ba,
6 size_t n, int direction);
7int sbus_map_sg(struct device *dev, struct scatterlist *sg,
8 int n, int direction);
9void sbus_unmap_sg(struct device *dev, struct scatterlist *sg,
10 int n, int direction);
11void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
12 size_t size, int direction);
13void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba,
14 size_t size, int direction);
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0aeaefe696b9..7690cc219ecc 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -353,7 +353,8 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
353 353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355 unsigned long offset, size_t sz, 355 unsigned long offset, size_t sz,
356 enum dma_data_direction direction) 356 enum dma_data_direction direction,
357 struct dma_attrs *attrs)
357{ 358{
358 struct iommu *iommu; 359 struct iommu *iommu;
359 struct strbuf *strbuf; 360 struct strbuf *strbuf;
@@ -474,7 +475,8 @@ do_flush_sync:
474} 475}
475 476
476static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
477 size_t sz, enum dma_data_direction direction) 478 size_t sz, enum dma_data_direction direction,
479 struct dma_attrs *attrs)
478{ 480{
479 struct iommu *iommu; 481 struct iommu *iommu;
480 struct strbuf *strbuf; 482 struct strbuf *strbuf;
@@ -520,7 +522,8 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
520} 522}
521 523
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523 int nelems, enum dma_data_direction direction) 525 int nelems, enum dma_data_direction direction,
526 struct dma_attrs *attrs)
524{ 527{
525 struct scatterlist *s, *outs, *segstart; 528 struct scatterlist *s, *outs, *segstart;
526 unsigned long flags, handle, prot, ctx; 529 unsigned long flags, handle, prot, ctx;
@@ -691,7 +694,8 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
691} 694}
692 695
693static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
694 int nelems, enum dma_data_direction direction) 697 int nelems, enum dma_data_direction direction,
698 struct dma_attrs *attrs)
695{ 699{
696 unsigned long flags, ctx; 700 unsigned long flags, ctx;
697 struct scatterlist *sg; 701 struct scatterlist *sg;
@@ -822,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
822 spin_unlock_irqrestore(&iommu->lock, flags); 826 spin_unlock_irqrestore(&iommu->lock, flags);
823} 827}
824 828
825static const struct dma_ops sun4u_dma_ops = { 829static struct dma_map_ops sun4u_dma_ops = {
826 .alloc_coherent = dma_4u_alloc_coherent, 830 .alloc_coherent = dma_4u_alloc_coherent,
827 .free_coherent = dma_4u_free_coherent, 831 .free_coherent = dma_4u_free_coherent,
828 .map_page = dma_4u_map_page, 832 .map_page = dma_4u_map_page,
@@ -833,9 +837,11 @@ static const struct dma_ops sun4u_dma_ops = {
833 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 837 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
834}; 838};
835 839
836const struct dma_ops *dma_ops = &sun4u_dma_ops; 840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
837EXPORT_SYMBOL(dma_ops); 841EXPORT_SYMBOL(dma_ops);
838 842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
839int dma_supported(struct device *dev, u64 device_mask) 845int dma_supported(struct device *dev, u64 device_mask)
840{ 846{
841 struct iommu *iommu = dev->archdata.iommu; 847 struct iommu *iommu = dev->archdata.iommu;
@@ -849,7 +855,7 @@ int dma_supported(struct device *dev, u64 device_mask)
849 855
850#ifdef CONFIG_PCI 856#ifdef CONFIG_PCI
851 if (dev->bus == &pci_bus_type) 857 if (dev->bus == &pci_bus_type)
852 return pci_dma_supported(to_pci_dev(dev), device_mask); 858 return pci64_dma_supported(to_pci_dev(dev), device_mask);
853#endif 859#endif
854 860
855 return 0; 861 return 0;
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index e71ce79d8c15..9f61fd8cbb7b 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -49,8 +49,6 @@
49#include <asm/iommu.h> 49#include <asm/iommu.h>
50#include <asm/io-unit.h> 50#include <asm/io-unit.h>
51 51
52#include "dma.h"
53
54#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ 52#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
55 53
56static struct resource *_sparc_find_resource(struct resource *r, 54static struct resource *_sparc_find_resource(struct resource *r,
@@ -247,7 +245,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
247 * Typically devices use them for control blocks. 245 * Typically devices use them for control blocks.
248 * CPU may access them without any explicit flushing. 246 * CPU may access them without any explicit flushing.
249 */ 247 */
250void *sbus_alloc_consistent(struct device *dev, long len, u32 *dma_addrp) 248static void *sbus_alloc_coherent(struct device *dev, size_t len,
249 dma_addr_t *dma_addrp, gfp_t gfp)
251{ 250{
252 struct of_device *op = to_of_device(dev); 251 struct of_device *op = to_of_device(dev);
253 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 252 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
@@ -300,7 +299,8 @@ err_nopages:
300 return NULL; 299 return NULL;
301} 300}
302 301
303void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba) 302static void sbus_free_coherent(struct device *dev, size_t n, void *p,
303 dma_addr_t ba)
304{ 304{
305 struct resource *res; 305 struct resource *res;
306 struct page *pgv; 306 struct page *pgv;
@@ -318,7 +318,7 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
318 318
319 n = (n + PAGE_SIZE-1) & PAGE_MASK; 319 n = (n + PAGE_SIZE-1) & PAGE_MASK;
320 if ((res->end-res->start)+1 != n) { 320 if ((res->end-res->start)+1 != n) {
321 printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", 321 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
322 (long)((res->end-res->start)+1), n); 322 (long)((res->end-res->start)+1), n);
323 return; 323 return;
324 } 324 }
@@ -338,8 +338,13 @@ void sbus_free_consistent(struct device *dev, long n, void *p, u32 ba)
338 * CPU view of this memory may be inconsistent with 338 * CPU view of this memory may be inconsistent with
339 * a device view and explicit flushing is necessary. 339 * a device view and explicit flushing is necessary.
340 */ 340 */
341dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int direction) 341static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
342 unsigned long offset, size_t len,
343 enum dma_data_direction dir,
344 struct dma_attrs *attrs)
342{ 345{
346 void *va = page_address(page) + offset;
347
343 /* XXX why are some lengths signed, others unsigned? */ 348 /* XXX why are some lengths signed, others unsigned? */
344 if (len <= 0) { 349 if (len <= 0) {
345 return 0; 350 return 0;
@@ -351,12 +356,14 @@ dma_addr_t sbus_map_single(struct device *dev, void *va, size_t len, int directi
351 return mmu_get_scsi_one(dev, va, len); 356 return mmu_get_scsi_one(dev, va, len);
352} 357}
353 358
354void sbus_unmap_single(struct device *dev, dma_addr_t ba, size_t n, int direction) 359static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
360 enum dma_data_direction dir, struct dma_attrs *attrs)
355{ 361{
356 mmu_release_scsi_one(dev, ba, n); 362 mmu_release_scsi_one(dev, ba, n);
357} 363}
358 364
359int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 365static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
366 enum dma_data_direction dir, struct dma_attrs *attrs)
360{ 367{
361 mmu_get_scsi_sgl(dev, sg, n); 368 mmu_get_scsi_sgl(dev, sg, n);
362 369
@@ -367,19 +374,38 @@ int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, int direction
367 return n; 374 return n;
368} 375}
369 376
370void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, int direction) 377static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
378 enum dma_data_direction dir, struct dma_attrs *attrs)
371{ 379{
372 mmu_release_scsi_sgl(dev, sg, n); 380 mmu_release_scsi_sgl(dev, sg, n);
373} 381}
374 382
375void sbus_dma_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, int direction) 383static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
384 int n, enum dma_data_direction dir)
376{ 385{
386 BUG();
377} 387}
378 388
379void sbus_dma_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, int direction) 389static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
390 int n, enum dma_data_direction dir)
380{ 391{
392 BUG();
381} 393}
382 394
395struct dma_map_ops sbus_dma_ops = {
396 .alloc_coherent = sbus_alloc_coherent,
397 .free_coherent = sbus_free_coherent,
398 .map_page = sbus_map_page,
399 .unmap_page = sbus_unmap_page,
400 .map_sg = sbus_map_sg,
401 .unmap_sg = sbus_unmap_sg,
402 .sync_sg_for_cpu = sbus_sync_sg_for_cpu,
403 .sync_sg_for_device = sbus_sync_sg_for_device,
404};
405
406struct dma_map_ops *dma_ops = &sbus_dma_ops;
407EXPORT_SYMBOL(dma_ops);
408
383static int __init sparc_register_ioport(void) 409static int __init sparc_register_ioport(void)
384{ 410{
385 register_proc_sparc_ioport(); 411 register_proc_sparc_ioport();
@@ -396,7 +422,8 @@ arch_initcall(sparc_register_ioport);
396/* Allocate and map kernel buffer using consistent mode DMA for a device. 422/* Allocate and map kernel buffer using consistent mode DMA for a device.
397 * hwdev should be valid struct pci_dev pointer for PCI devices. 423 * hwdev should be valid struct pci_dev pointer for PCI devices.
398 */ 424 */
399void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) 425static void *pci32_alloc_coherent(struct device *dev, size_t len,
426 dma_addr_t *pba, gfp_t gfp)
400{ 427{
401 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; 428 unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
402 unsigned long va; 429 unsigned long va;
@@ -440,7 +467,6 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
440 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ 467 *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
441 return (void *) res->start; 468 return (void *) res->start;
442} 469}
443EXPORT_SYMBOL(pci_alloc_consistent);
444 470
445/* Free and unmap a consistent DMA buffer. 471/* Free and unmap a consistent DMA buffer.
446 * cpu_addr is what was returned from pci_alloc_consistent, 472 * cpu_addr is what was returned from pci_alloc_consistent,
@@ -450,7 +476,8 @@ EXPORT_SYMBOL(pci_alloc_consistent);
450 * References to the memory and mappings associated with cpu_addr/dma_addr 476 * References to the memory and mappings associated with cpu_addr/dma_addr
451 * past this call are illegal. 477 * past this call are illegal.
452 */ 478 */
453void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) 479static void pci32_free_coherent(struct device *dev, size_t n, void *p,
480 dma_addr_t ba)
454{ 481{
455 struct resource *res; 482 struct resource *res;
456 unsigned long pgp; 483 unsigned long pgp;
@@ -482,60 +509,18 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
482 509
483 free_pages(pgp, get_order(n)); 510 free_pages(pgp, get_order(n));
484} 511}
485EXPORT_SYMBOL(pci_free_consistent);
486
487/* Map a single buffer of the indicated size for DMA in streaming mode.
488 * The 32-bit bus address to use is returned.
489 *
490 * Once the device is given the dma address, the device owns this memory
491 * until either pci_unmap_single or pci_dma_sync_single_* is performed.
492 */
493dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
494 int direction)
495{
496 BUG_ON(direction == PCI_DMA_NONE);
497 /* IIep is write-through, not flushing. */
498 return virt_to_phys(ptr);
499}
500EXPORT_SYMBOL(pci_map_single);
501
502/* Unmap a single streaming mode DMA translation. The dma_addr and size
503 * must match what was provided for in a previous pci_map_single call. All
504 * other usages are undefined.
505 *
506 * After this call, reads by the cpu to the buffer are guaranteed to see
507 * whatever the device wrote there.
508 */
509void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
510 int direction)
511{
512 BUG_ON(direction == PCI_DMA_NONE);
513 if (direction != PCI_DMA_TODEVICE) {
514 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
515 (size + PAGE_SIZE-1) & PAGE_MASK);
516 }
517}
518EXPORT_SYMBOL(pci_unmap_single);
519 512
520/* 513/*
521 * Same as pci_map_single, but with pages. 514 * Same as pci_map_single, but with pages.
522 */ 515 */
523dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, 516static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
524 unsigned long offset, size_t size, int direction) 517 unsigned long offset, size_t size,
518 enum dma_data_direction dir,
519 struct dma_attrs *attrs)
525{ 520{
526 BUG_ON(direction == PCI_DMA_NONE);
527 /* IIep is write-through, not flushing. */ 521 /* IIep is write-through, not flushing. */
528 return page_to_phys(page) + offset; 522 return page_to_phys(page) + offset;
529} 523}
530EXPORT_SYMBOL(pci_map_page);
531
532void pci_unmap_page(struct pci_dev *hwdev,
533 dma_addr_t dma_address, size_t size, int direction)
534{
535 BUG_ON(direction == PCI_DMA_NONE);
536 /* mmu_inval_dma_area XXX */
537}
538EXPORT_SYMBOL(pci_unmap_page);
539 524
540/* Map a set of buffers described by scatterlist in streaming 525/* Map a set of buffers described by scatterlist in streaming
541 * mode for DMA. This is the scather-gather version of the 526 * mode for DMA. This is the scather-gather version of the
@@ -552,13 +537,13 @@ EXPORT_SYMBOL(pci_unmap_page);
552 * Device ownership issues as mentioned above for pci_map_single are 537 * Device ownership issues as mentioned above for pci_map_single are
553 * the same here. 538 * the same here.
554 */ 539 */
555int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 540static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
556 int direction) 541 int nents, enum dma_data_direction dir,
542 struct dma_attrs *attrs)
557{ 543{
558 struct scatterlist *sg; 544 struct scatterlist *sg;
559 int n; 545 int n;
560 546
561 BUG_ON(direction == PCI_DMA_NONE);
562 /* IIep is write-through, not flushing. */ 547 /* IIep is write-through, not flushing. */
563 for_each_sg(sgl, sg, nents, n) { 548 for_each_sg(sgl, sg, nents, n) {
564 BUG_ON(page_address(sg_page(sg)) == NULL); 549 BUG_ON(page_address(sg_page(sg)) == NULL);
@@ -567,20 +552,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
567 } 552 }
568 return nents; 553 return nents;
569} 554}
570EXPORT_SYMBOL(pci_map_sg);
571 555
572/* Unmap a set of streaming mode DMA translations. 556/* Unmap a set of streaming mode DMA translations.
573 * Again, cpu read rules concerning calls here are the same as for 557 * Again, cpu read rules concerning calls here are the same as for
574 * pci_unmap_single() above. 558 * pci_unmap_single() above.
575 */ 559 */
576void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, 560static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
577 int direction) 561 int nents, enum dma_data_direction dir,
562 struct dma_attrs *attrs)
578{ 563{
579 struct scatterlist *sg; 564 struct scatterlist *sg;
580 int n; 565 int n;
581 566
582 BUG_ON(direction == PCI_DMA_NONE); 567 if (dir != PCI_DMA_TODEVICE) {
583 if (direction != PCI_DMA_TODEVICE) {
584 for_each_sg(sgl, sg, nents, n) { 568 for_each_sg(sgl, sg, nents, n) {
585 BUG_ON(page_address(sg_page(sg)) == NULL); 569 BUG_ON(page_address(sg_page(sg)) == NULL);
586 mmu_inval_dma_area( 570 mmu_inval_dma_area(
@@ -589,7 +573,6 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
589 } 573 }
590 } 574 }
591} 575}
592EXPORT_SYMBOL(pci_unmap_sg);
593 576
594/* Make physical memory consistent for a single 577/* Make physical memory consistent for a single
595 * streaming mode DMA translation before or after a transfer. 578 * streaming mode DMA translation before or after a transfer.
@@ -601,25 +584,23 @@ EXPORT_SYMBOL(pci_unmap_sg);
601 * must first perform a pci_dma_sync_for_device, and then the 584 * must first perform a pci_dma_sync_for_device, and then the
602 * device again owns the buffer. 585 * device again owns the buffer.
603 */ 586 */
604void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 587static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
588 size_t size, enum dma_data_direction dir)
605{ 589{
606 BUG_ON(direction == PCI_DMA_NONE); 590 if (dir != PCI_DMA_TODEVICE) {
607 if (direction != PCI_DMA_TODEVICE) {
608 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 591 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
609 (size + PAGE_SIZE-1) & PAGE_MASK); 592 (size + PAGE_SIZE-1) & PAGE_MASK);
610 } 593 }
611} 594}
612EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
613 595
614void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) 596static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
597 size_t size, enum dma_data_direction dir)
615{ 598{
616 BUG_ON(direction == PCI_DMA_NONE); 599 if (dir != PCI_DMA_TODEVICE) {
617 if (direction != PCI_DMA_TODEVICE) {
618 mmu_inval_dma_area((unsigned long)phys_to_virt(ba), 600 mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
619 (size + PAGE_SIZE-1) & PAGE_MASK); 601 (size + PAGE_SIZE-1) & PAGE_MASK);
620 } 602 }
621} 603}
622EXPORT_SYMBOL(pci_dma_sync_single_for_device);
623 604
624/* Make physical memory consistent for a set of streaming 605/* Make physical memory consistent for a set of streaming
625 * mode DMA translations after a transfer. 606 * mode DMA translations after a transfer.
@@ -627,13 +608,13 @@ EXPORT_SYMBOL(pci_dma_sync_single_for_device);
627 * The same as pci_dma_sync_single_* but for a scatter-gather list, 608 * The same as pci_dma_sync_single_* but for a scatter-gather list,
628 * same rules and usage. 609 * same rules and usage.
629 */ 610 */
630void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 611static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
612 int nents, enum dma_data_direction dir)
631{ 613{
632 struct scatterlist *sg; 614 struct scatterlist *sg;
633 int n; 615 int n;
634 616
635 BUG_ON(direction == PCI_DMA_NONE); 617 if (dir != PCI_DMA_TODEVICE) {
636 if (direction != PCI_DMA_TODEVICE) {
637 for_each_sg(sgl, sg, nents, n) { 618 for_each_sg(sgl, sg, nents, n) {
638 BUG_ON(page_address(sg_page(sg)) == NULL); 619 BUG_ON(page_address(sg_page(sg)) == NULL);
639 mmu_inval_dma_area( 620 mmu_inval_dma_area(
@@ -642,15 +623,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
642 } 623 }
643 } 624 }
644} 625}
645EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
646 626
647void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction) 627static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
628 int nents, enum dma_data_direction dir)
648{ 629{
649 struct scatterlist *sg; 630 struct scatterlist *sg;
650 int n; 631 int n;
651 632
652 BUG_ON(direction == PCI_DMA_NONE); 633 if (dir != PCI_DMA_TODEVICE) {
653 if (direction != PCI_DMA_TODEVICE) {
654 for_each_sg(sgl, sg, nents, n) { 634 for_each_sg(sgl, sg, nents, n) {
655 BUG_ON(page_address(sg_page(sg)) == NULL); 635 BUG_ON(page_address(sg_page(sg)) == NULL);
656 mmu_inval_dma_area( 636 mmu_inval_dma_area(
@@ -659,9 +639,49 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
659 } 639 }
660 } 640 }
661} 641}
662EXPORT_SYMBOL(pci_dma_sync_sg_for_device); 642
643struct dma_map_ops pci32_dma_ops = {
644 .alloc_coherent = pci32_alloc_coherent,
645 .free_coherent = pci32_free_coherent,
646 .map_page = pci32_map_page,
647 .map_sg = pci32_map_sg,
648 .unmap_sg = pci32_unmap_sg,
649 .sync_single_for_cpu = pci32_sync_single_for_cpu,
650 .sync_single_for_device = pci32_sync_single_for_device,
651 .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
652 .sync_sg_for_device = pci32_sync_sg_for_device,
653};
654EXPORT_SYMBOL(pci32_dma_ops);
655
663#endif /* CONFIG_PCI */ 656#endif /* CONFIG_PCI */
664 657
658/*
659 * Return whether the given PCI device DMA address mask can be
660 * supported properly. For example, if your device can only drive the
661 * low 24-bits during PCI bus mastering, then you would pass
662 * 0x00ffffff as the mask to this function.
663 */
664int dma_supported(struct device *dev, u64 mask)
665{
666#ifdef CONFIG_PCI
667 if (dev->bus == &pci_bus_type)
668 return 1;
669#endif
670 return 0;
671}
672EXPORT_SYMBOL(dma_supported);
673
674int dma_set_mask(struct device *dev, u64 dma_mask)
675{
676#ifdef CONFIG_PCI
677 if (dev->bus == &pci_bus_type)
678 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
679#endif
680 return -EOPNOTSUPP;
681}
682EXPORT_SYMBOL(dma_set_mask);
683
684
665#ifdef CONFIG_PROC_FS 685#ifdef CONFIG_PROC_FS
666 686
667static int sparc_io_proc_show(struct seq_file *m, void *v) 687static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index f0ee79055409..8daab33fc17d 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void)
886 * Therefore you cannot make any OBP calls, not even prom_printf, 886 * Therefore you cannot make any OBP calls, not even prom_printf,
887 * from these two routines. 887 * from these two routines.
888 */ 888 */
889static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 889static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
890{ 890{
891 unsigned long num_entries = (qmask + 1) / 64; 891 unsigned long num_entries = (qmask + 1) / 64;
892 unsigned long status; 892 unsigned long status;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 391a6ed9a184..378eb53e0776 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -113,7 +113,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
113 } 113 }
114 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 114 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
115 local_inc(&__get_cpu_var(alert_counter)); 115 local_inc(&__get_cpu_var(alert_counter));
116 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) 116 if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz)
117 die_nmi("BUG: NMI Watchdog detected LOCKUP", 117 die_nmi("BUG: NMI Watchdog detected LOCKUP",
118 regs, panic_on_timeout); 118 regs, panic_on_timeout);
119 } else { 119 } else {
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 57859ad23547..c68648662802 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1039,7 +1039,7 @@ static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
1039 pci_dev_put(ali_isa_bridge); 1039 pci_dev_put(ali_isa_bridge);
1040} 1040}
1041 1041
1042int pci_dma_supported(struct pci_dev *pdev, u64 device_mask) 1042int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
1043{ 1043{
1044 u64 dma_addr_mask; 1044 u64 dma_addr_mask;
1045 1045
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 2485eaa23101..23c33ff9c31e 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -232,7 +232,8 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
232 232
233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 233static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t sz, 234 unsigned long offset, size_t sz,
235 enum dma_data_direction direction) 235 enum dma_data_direction direction,
236 struct dma_attrs *attrs)
236{ 237{
237 struct iommu *iommu; 238 struct iommu *iommu;
238 unsigned long flags, npages, oaddr; 239 unsigned long flags, npages, oaddr;
@@ -296,7 +297,8 @@ iommu_map_fail:
296} 297}
297 298
298static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 299static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
299 size_t sz, enum dma_data_direction direction) 300 size_t sz, enum dma_data_direction direction,
301 struct dma_attrs *attrs)
300{ 302{
301 struct pci_pbm_info *pbm; 303 struct pci_pbm_info *pbm;
302 struct iommu *iommu; 304 struct iommu *iommu;
@@ -336,7 +338,8 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
336} 338}
337 339
338static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 340static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
339 int nelems, enum dma_data_direction direction) 341 int nelems, enum dma_data_direction direction,
342 struct dma_attrs *attrs)
340{ 343{
341 struct scatterlist *s, *outs, *segstart; 344 struct scatterlist *s, *outs, *segstart;
342 unsigned long flags, handle, prot; 345 unsigned long flags, handle, prot;
@@ -478,7 +481,8 @@ iommu_map_failed:
478} 481}
479 482
480static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 483static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
481 int nelems, enum dma_data_direction direction) 484 int nelems, enum dma_data_direction direction,
485 struct dma_attrs *attrs)
482{ 486{
483 struct pci_pbm_info *pbm; 487 struct pci_pbm_info *pbm;
484 struct scatterlist *sg; 488 struct scatterlist *sg;
@@ -521,29 +525,13 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
521 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
522} 526}
523 527
524static void dma_4v_sync_single_for_cpu(struct device *dev, 528static struct dma_map_ops sun4v_dma_ops = {
525 dma_addr_t bus_addr, size_t sz,
526 enum dma_data_direction direction)
527{
528 /* Nothing to do... */
529}
530
531static void dma_4v_sync_sg_for_cpu(struct device *dev,
532 struct scatterlist *sglist, int nelems,
533 enum dma_data_direction direction)
534{
535 /* Nothing to do... */
536}
537
538static const struct dma_ops sun4v_dma_ops = {
539 .alloc_coherent = dma_4v_alloc_coherent, 529 .alloc_coherent = dma_4v_alloc_coherent,
540 .free_coherent = dma_4v_free_coherent, 530 .free_coherent = dma_4v_free_coherent,
541 .map_page = dma_4v_map_page, 531 .map_page = dma_4v_map_page,
542 .unmap_page = dma_4v_unmap_page, 532 .unmap_page = dma_4v_unmap_page,
543 .map_sg = dma_4v_map_sg, 533 .map_sg = dma_4v_map_sg,
544 .unmap_sg = dma_4v_unmap_sg, 534 .unmap_sg = dma_4v_unmap_sg,
545 .sync_single_for_cpu = dma_4v_sync_single_for_cpu,
546 .sync_sg_for_cpu = dma_4v_sync_sg_for_cpu,
547}; 535};
548 536
549static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, 537static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm,
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 4041f94e7724..18d67854a1b8 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -251,7 +251,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
251 } 251 }
252} 252}
253 253
254void __trigger_all_cpu_backtrace(void) 254void arch_trigger_all_cpu_backtrace(void)
255{ 255{
256 struct thread_info *tp = current_thread_info(); 256 struct thread_info *tp = current_thread_info();
257 struct pt_regs *regs = get_irq_regs(); 257 struct pt_regs *regs = get_irq_regs();
@@ -304,7 +304,7 @@ void __trigger_all_cpu_backtrace(void)
304 304
305static void sysrq_handle_globreg(int key, struct tty_struct *tty) 305static void sysrq_handle_globreg(int key, struct tty_struct *tty)
306{ 306{
307 __trigger_all_cpu_backtrace(); 307 arch_trigger_all_cpu_backtrace();
308} 308}
309 309
310static struct sysrq_key_op sparc_globalreg_op = { 310static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 181d069a2d44..7ce1a1005b1d 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -590,6 +590,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 590 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 591 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 592 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
593 } 595 }
594} 596}
595 597
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index ec82d76dc6f2..647afbda7ae1 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -613,5 +613,8 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
613 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 613 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
614 clear_thread_flag(TIF_NOTIFY_RESUME); 614 clear_thread_flag(TIF_NOTIFY_RESUME);
615 tracehook_notify_resume(regs); 615 tracehook_notify_resume(regs);
616 if (current->replacement_session_keyring)
617 key_replace_session_keyring();
616 } 618 }
617} 619}
620
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index eedffb4fec2d..39fc6af21b7c 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -88,7 +88,7 @@ void prom_cmdline(void)
88/* Drop into the prom, but completely terminate the program. 88/* Drop into the prom, but completely terminate the program.
89 * No chance of continuing. 89 * No chance of continuing.
90 */ 90 */
91void prom_halt(void) 91void notrace prom_halt(void)
92{ 92{
93#ifdef CONFIG_SUN_LDOMS 93#ifdef CONFIG_SUN_LDOMS
94 if (ldom_domaining_enabled) 94 if (ldom_domaining_enabled)
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
index 660943ee4c2a..ca869266b9f3 100644
--- a/arch/sparc/prom/printf.c
+++ b/arch/sparc/prom/printf.c
@@ -14,14 +14,14 @@
14 */ 14 */
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/compiler.h>
17 18
18#include <asm/openprom.h> 19#include <asm/openprom.h>
19#include <asm/oplib.h> 20#include <asm/oplib.h>
20 21
21static char ppbuf[1024]; 22static char ppbuf[1024];
22 23
23void 24void notrace prom_write(const char *buf, unsigned int n)
24prom_write(const char *buf, unsigned int n)
25{ 25{
26 char ch; 26 char ch;
27 27
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n)
33 } 33 }
34} 34}
35 35
36void 36void notrace prom_printf(const char *fmt, ...)
37prom_printf(const char *fmt, ...)
38{ 37{
39 va_list args; 38 va_list args;
40 int i; 39 int i;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 13ffa5df37d7..fc20fdc0f7f2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -38,7 +38,7 @@ config X86
38 select HAVE_FUNCTION_GRAPH_FP_TEST 38 select HAVE_FUNCTION_GRAPH_FP_TEST
39 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 39 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
40 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE 40 select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
41 select HAVE_FTRACE_SYSCALLS 41 select HAVE_SYSCALL_TRACEPOINTS
42 select HAVE_KVM 42 select HAVE_KVM
43 select HAVE_ARCH_KGDB 43 select HAVE_ARCH_KGDB
44 select HAVE_ARCH_TRACEHOOK 44 select HAVE_ARCH_TRACEHOOK
@@ -586,7 +586,6 @@ config GART_IOMMU
586 bool "GART IOMMU support" if EMBEDDED 586 bool "GART IOMMU support" if EMBEDDED
587 default y 587 default y
588 select SWIOTLB 588 select SWIOTLB
589 select AGP
590 depends on X86_64 && PCI 589 depends on X86_64 && PCI
591 ---help--- 590 ---help---
592 Support for full DMA access of devices with 32bit memory access only 591 Support for full DMA access of devices with 32bit memory access only
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index edb992ebef92..d28fad19654a 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -2355,7 +2355,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2355CONFIG_HAVE_DYNAMIC_FTRACE=y 2355CONFIG_HAVE_DYNAMIC_FTRACE=y
2356CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 2356CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2357CONFIG_HAVE_HW_BRANCH_TRACER=y 2357CONFIG_HAVE_HW_BRANCH_TRACER=y
2358CONFIG_HAVE_FTRACE_SYSCALLS=y 2358CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
2359CONFIG_RING_BUFFER=y 2359CONFIG_RING_BUFFER=y
2360CONFIG_TRACING=y 2360CONFIG_TRACING=y
2361CONFIG_TRACING_SUPPORT=y 2361CONFIG_TRACING_SUPPORT=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index cee1dd2e69b2..6c86acd847a4 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -2329,7 +2329,7 @@ CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
2329CONFIG_HAVE_DYNAMIC_FTRACE=y 2329CONFIG_HAVE_DYNAMIC_FTRACE=y
2330CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y 2330CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
2331CONFIG_HAVE_HW_BRANCH_TRACER=y 2331CONFIG_HAVE_HW_BRANCH_TRACER=y
2332CONFIG_HAVE_FTRACE_SYSCALLS=y 2332CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
2333CONFIG_RING_BUFFER=y 2333CONFIG_RING_BUFFER=y
2334CONFIG_TRACING=y 2334CONFIG_TRACING=y
2335CONFIG_TRACING_SUPPORT=y 2335CONFIG_TRACING_SUPPORT=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index c580c5ec1cad..d3ec8d588d4b 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -636,7 +636,7 @@ static int __init aesni_init(void)
636 int err; 636 int err;
637 637
638 if (!cpu_has_aes) { 638 if (!cpu_has_aes) {
639 printk(KERN_ERR "Intel AES-NI instructions are not detected.\n"); 639 printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
640 return -ENODEV; 640 return -ENODEV;
641 } 641 }
642 if ((err = crypto_register_alg(&aesni_alg))) 642 if ((err = crypto_register_alg(&aesni_alg)))
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index bdf96f119f06..ac95995b7bad 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -25,6 +25,7 @@
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
27extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
28extern int amd_iommu_init_passthrough(void);
28extern void amd_iommu_detect(void); 29extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 30extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
30extern void amd_iommu_flush_all_domains(void); 31extern void amd_iommu_flush_all_domains(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 0c878caaa0a2..2a2cc7a78a81 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -143,22 +143,29 @@
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */ 143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56) 144#define EVT_LEN_MASK (0x9ULL << 56)
145 145
146#define PAGE_MODE_NONE 0x00
146#define PAGE_MODE_1_LEVEL 0x01 147#define PAGE_MODE_1_LEVEL 0x01
147#define PAGE_MODE_2_LEVEL 0x02 148#define PAGE_MODE_2_LEVEL 0x02
148#define PAGE_MODE_3_LEVEL 0x03 149#define PAGE_MODE_3_LEVEL 0x03
149 150#define PAGE_MODE_4_LEVEL 0x04
150#define IOMMU_PDE_NL_0 0x000ULL 151#define PAGE_MODE_5_LEVEL 0x05
151#define IOMMU_PDE_NL_1 0x200ULL 152#define PAGE_MODE_6_LEVEL 0x06
152#define IOMMU_PDE_NL_2 0x400ULL 153
153#define IOMMU_PDE_NL_3 0x600ULL 154#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
154 155#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
155#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) 156 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
156#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) 157 (0xffffffffffffffffULL))
157#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) 158#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
158 159#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
159#define IOMMU_MAP_SIZE_L1 (1ULL << 21) 160#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
160#define IOMMU_MAP_SIZE_L2 (1ULL << 30) 161 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
161#define IOMMU_MAP_SIZE_L3 (1ULL << 39) 162#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
163
164#define PM_MAP_4k 0
165#define PM_ADDR_MASK 0x000ffffffffff000ULL
166#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
167 (~((1ULL << (12 + ((lvl) * 9))) - 1)))
168#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
162 169
163#define IOMMU_PTE_P (1ULL << 0) 170#define IOMMU_PTE_P (1ULL << 0)
164#define IOMMU_PTE_TV (1ULL << 1) 171#define IOMMU_PTE_TV (1ULL << 1)
@@ -167,11 +174,6 @@
167#define IOMMU_PTE_IR (1ULL << 61) 174#define IOMMU_PTE_IR (1ULL << 61)
168#define IOMMU_PTE_IW (1ULL << 62) 175#define IOMMU_PTE_IW (1ULL << 62)
169 176
170#define IOMMU_L1_PDE(address) \
171 ((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
172#define IOMMU_L2_PDE(address) \
173 ((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
174
175#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 177#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
176#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 178#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
177#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 179#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
@@ -194,11 +196,14 @@
194#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 196#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
195#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 197#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
196 domain for an IOMMU */ 198 domain for an IOMMU */
199#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
200 translation */
201
197extern bool amd_iommu_dump; 202extern bool amd_iommu_dump;
198#define DUMP_printk(format, arg...) \ 203#define DUMP_printk(format, arg...) \
199 do { \ 204 do { \
200 if (amd_iommu_dump) \ 205 if (amd_iommu_dump) \
201 printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ 206 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
202 } while(0); 207 } while(0);
203 208
204/* 209/*
@@ -226,6 +231,7 @@ struct protection_domain {
226 int mode; /* paging mode (0-6 levels) */ 231 int mode; /* paging mode (0-6 levels) */
227 u64 *pt_root; /* page table root pointer */ 232 u64 *pt_root; /* page table root pointer */
228 unsigned long flags; /* flags to find out type of domain */ 233 unsigned long flags; /* flags to find out type of domain */
234 bool updated; /* complete domain flush required */
229 unsigned dev_cnt; /* devices assigned to this domain */ 235 unsigned dev_cnt; /* devices assigned to this domain */
230 void *priv; /* private data */ 236 void *priv; /* private data */
231}; 237};
@@ -337,6 +343,9 @@ struct amd_iommu {
337 /* if one, we need to send a completion wait command */ 343 /* if one, we need to send a completion wait command */
338 bool need_sync; 344 bool need_sync;
339 345
346 /* becomes true if a command buffer reset is running */
347 bool reset_in_progress;
348
340 /* default dma_ops domain for that IOMMU */ 349 /* default dma_ops domain for that IOMMU */
341 struct dma_ops_domain *default_dom; 350 struct dma_ops_domain *default_dom;
342}; 351};
@@ -457,4 +466,7 @@ static inline void amd_iommu_stats_init(void) { }
457 466
458#endif /* CONFIG_AMD_IOMMU_STATS */ 467#endif /* CONFIG_AMD_IOMMU_STATS */
459 468
469/* some function prototypes */
470extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
471
460#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 472#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 1c3f9435f1c9..0ee770d23d0e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -55,6 +55,24 @@ extern int dma_set_mask(struct device *dev, u64 mask);
55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 55extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
56 dma_addr_t *dma_addr, gfp_t flag); 56 dma_addr_t *dma_addr, gfp_t flag);
57 57
58static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
59{
60 if (!dev->dma_mask)
61 return 0;
62
63 return addr + size <= *dev->dma_mask;
64}
65
66static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
67{
68 return paddr;
69}
70
71static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
72{
73 return daddr;
74}
75
58static inline void 76static inline void
59dma_cache_sync(struct device *dev, void *vaddr, size_t size, 77dma_cache_sync(struct device *dev, void *vaddr, size_t size,
60 enum dma_data_direction dir) 78 enum dma_data_direction dir)
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index bd2c6511c887..db24c2278be0 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,13 +28,6 @@
28 28
29#endif 29#endif
30 30
31/* FIXME: I don't want to stay hardcoded */
32#ifdef CONFIG_X86_64
33# define FTRACE_SYSCALL_MAX 296
34#else
35# define FTRACE_SYSCALL_MAX 333
36#endif
37
38#ifdef CONFIG_FUNCTION_TRACER 31#ifdef CONFIG_FUNCTION_TRACER
39#define MCOUNT_ADDR ((long)(mcount)) 32#define MCOUNT_ADDR ((long)(mcount))
40#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ 33#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c86e5ed4af51..e63cf7d441e1 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -45,8 +45,8 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
45 void __user *, size_t *, loff_t *); 45 void __user *, size_t *, loff_t *);
46extern int unknown_nmi_panic; 46extern int unknown_nmi_panic;
47 47
48void __trigger_all_cpu_backtrace(void); 48void arch_trigger_all_cpu_backtrace(void);
49#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() 49#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
50 50
51static inline void localise_nmi_watchdog(void) 51static inline void localise_nmi_watchdog(void)
52{ 52{
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index fa64e401589d..e7b7c938ae27 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -84,6 +84,16 @@ union cpuid10_edx {
84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b 84#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) 85#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
86 86
87/*
88 * We model BTS tracing as another fixed-mode PMC.
89 *
90 * We choose a value in the middle of the fixed counter range, since lower
91 * values are used by actual fixed counters and higher values are used
92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
93 */
94#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
95
96
87#ifdef CONFIG_PERF_COUNTERS 97#ifdef CONFIG_PERF_COUNTERS
88extern void init_hw_perf_counters(void); 98extern void init_hw_perf_counters(void);
89extern void perf_counters_lapic_init(void); 99extern void perf_counters_lapic_init(void);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index fad7d40b75f8..6f7786aea4fc 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -95,7 +95,7 @@ struct thread_info {
95#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ 95#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
96#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ 96#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
97#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ 97#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
98#define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ 98#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
99 99
100#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 100#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -118,17 +118,17 @@ struct thread_info {
118#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) 118#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
119#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) 119#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
120#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) 120#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
121#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) 121#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
122 122
123/* work to do in syscall_trace_enter() */ 123/* work to do in syscall_trace_enter() */
124#define _TIF_WORK_SYSCALL_ENTRY \ 124#define _TIF_WORK_SYSCALL_ENTRY \
125 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ 125 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
126 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) 126 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
127 127
128/* work to do in syscall_trace_leave() */ 128/* work to do in syscall_trace_leave() */
129#define _TIF_WORK_SYSCALL_EXIT \ 129#define _TIF_WORK_SYSCALL_EXIT \
130 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ 130 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
131 _TIF_SYSCALL_FTRACE) 131 _TIF_SYSCALL_TRACEPOINT)
132 132
133/* work to do on interrupt/exception return */ 133/* work to do on interrupt/exception return */
134#define _TIF_WORK_MASK \ 134#define _TIF_WORK_MASK \
@@ -137,7 +137,8 @@ struct thread_info {
137 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) 137 _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
138 138
139/* work to do on any return to user space */ 139/* work to do on any return to user space */
140#define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) 140#define _TIF_ALLWORK_MASK \
141 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
141 142
142/* Only used for 64 bit */ 143/* Only used for 64 bit */
143#define _TIF_DO_NOTIFY_MASK \ 144#define _TIF_DO_NOTIFY_MASK \
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 066ef590d7e0..26d06e052a18 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -129,25 +129,34 @@ extern unsigned long node_remap_size[];
129#endif 129#endif
130 130
131/* sched_domains SD_NODE_INIT for NUMA machines */ 131/* sched_domains SD_NODE_INIT for NUMA machines */
132#define SD_NODE_INIT (struct sched_domain) { \ 132#define SD_NODE_INIT (struct sched_domain) { \
133 .min_interval = 8, \ 133 .min_interval = 8, \
134 .max_interval = 32, \ 134 .max_interval = 32, \
135 .busy_factor = 32, \ 135 .busy_factor = 32, \
136 .imbalance_pct = 125, \ 136 .imbalance_pct = 125, \
137 .cache_nice_tries = SD_CACHE_NICE_TRIES, \ 137 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
138 .busy_idx = 3, \ 138 .busy_idx = 3, \
139 .idle_idx = SD_IDLE_IDX, \ 139 .idle_idx = SD_IDLE_IDX, \
140 .newidle_idx = SD_NEWIDLE_IDX, \ 140 .newidle_idx = SD_NEWIDLE_IDX, \
141 .wake_idx = 1, \ 141 .wake_idx = 1, \
142 .forkexec_idx = SD_FORKEXEC_IDX, \ 142 .forkexec_idx = SD_FORKEXEC_IDX, \
143 .flags = SD_LOAD_BALANCE \ 143 \
144 | SD_BALANCE_EXEC \ 144 .flags = 1*SD_LOAD_BALANCE \
145 | SD_BALANCE_FORK \ 145 | 1*SD_BALANCE_NEWIDLE \
146 | SD_WAKE_AFFINE \ 146 | 1*SD_BALANCE_EXEC \
147 | SD_WAKE_BALANCE \ 147 | 1*SD_BALANCE_FORK \
148 | SD_SERIALIZE, \ 148 | 0*SD_WAKE_IDLE \
149 .last_balance = jiffies, \ 149 | 1*SD_WAKE_AFFINE \
150 .balance_interval = 1, \ 150 | 1*SD_WAKE_BALANCE \
151 | 0*SD_SHARE_CPUPOWER \
152 | 0*SD_POWERSAVINGS_BALANCE \
153 | 0*SD_SHARE_PKG_RESOURCES \
154 | 1*SD_SERIALIZE \
155 | 1*SD_WAKE_IDLE_FAR \
156 | 0*SD_PREFER_SIBLING \
157 , \
158 .last_balance = jiffies, \
159 .balance_interval = 1, \
151} 160}
152 161
153#ifdef CONFIG_X86_64_ACPI_NUMA 162#ifdef CONFIG_X86_64_ACPI_NUMA
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 732a30706153..8deaada61bc8 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -345,6 +345,8 @@
345 345
346#ifdef __KERNEL__ 346#ifdef __KERNEL__
347 347
348#define NR_syscalls 337
349
348#define __ARCH_WANT_IPC_PARSE_VERSION 350#define __ARCH_WANT_IPC_PARSE_VERSION
349#define __ARCH_WANT_OLD_READDIR 351#define __ARCH_WANT_OLD_READDIR
350#define __ARCH_WANT_OLD_STAT 352#define __ARCH_WANT_OLD_STAT
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 900e1617e672..b9f3c60de5f7 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
688#endif /* __NO_STUBS */ 688#endif /* __NO_STUBS */
689 689
690#ifdef __KERNEL__ 690#ifdef __KERNEL__
691
692#ifndef COMPILE_OFFSETS
693#include <asm/asm-offsets.h>
694#define NR_syscalls (__NR_syscall_max + 1)
695#endif
696
691/* 697/*
692 * "Conditional" syscalls 698 * "Conditional" syscalls
693 * 699 *
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 6c99f5037801..98f230f6a28d 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -41,9 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
41static LIST_HEAD(iommu_pd_list); 41static LIST_HEAD(iommu_pd_list);
42static DEFINE_SPINLOCK(iommu_pd_list_lock); 42static DEFINE_SPINLOCK(iommu_pd_list_lock);
43 43
44#ifdef CONFIG_IOMMU_API 44/*
45 * Domain for untranslated devices - only allocated
46 * if iommu=pt passed on kernel cmd line.
47 */
48static struct protection_domain *pt_domain;
49
45static struct iommu_ops amd_iommu_ops; 50static struct iommu_ops amd_iommu_ops;
46#endif
47 51
48/* 52/*
49 * general struct to manage commands send to an IOMMU 53 * general struct to manage commands send to an IOMMU
@@ -55,16 +59,16 @@ struct iommu_cmd {
55static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, 59static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
56 struct unity_map_entry *e); 60 struct unity_map_entry *e);
57static struct dma_ops_domain *find_protection_domain(u16 devid); 61static struct dma_ops_domain *find_protection_domain(u16 devid);
58static u64* alloc_pte(struct protection_domain *dom, 62static u64 *alloc_pte(struct protection_domain *domain,
59 unsigned long address, u64 63 unsigned long address, int end_lvl,
60 **pte_page, gfp_t gfp); 64 u64 **pte_page, gfp_t gfp);
61static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, 65static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
62 unsigned long start_page, 66 unsigned long start_page,
63 unsigned int pages); 67 unsigned int pages);
64 68static void reset_iommu_command_buffer(struct amd_iommu *iommu);
65#ifndef BUS_NOTIFY_UNBOUND_DRIVER 69static u64 *fetch_pte(struct protection_domain *domain,
66#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 70 unsigned long address, int map_size);
67#endif 71static void update_domain(struct protection_domain *domain);
68 72
69#ifdef CONFIG_AMD_IOMMU_STATS 73#ifdef CONFIG_AMD_IOMMU_STATS
70 74
@@ -138,7 +142,25 @@ static int iommu_has_npcache(struct amd_iommu *iommu)
138 * 142 *
139 ****************************************************************************/ 143 ****************************************************************************/
140 144
141static void iommu_print_event(void *__evt) 145static void dump_dte_entry(u16 devid)
146{
147 int i;
148
149 for (i = 0; i < 8; ++i)
150 pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
151 amd_iommu_dev_table[devid].data[i]);
152}
153
154static void dump_command(unsigned long phys_addr)
155{
156 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
157 int i;
158
159 for (i = 0; i < 4; ++i)
160 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
161}
162
163static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
142{ 164{
143 u32 *event = __evt; 165 u32 *event = __evt;
144 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; 166 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
@@ -147,7 +169,7 @@ static void iommu_print_event(void *__evt)
147 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; 169 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
148 u64 address = (u64)(((u64)event[3]) << 32) | event[2]; 170 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
149 171
150 printk(KERN_ERR "AMD IOMMU: Event logged ["); 172 printk(KERN_ERR "AMD-Vi: Event logged [");
151 173
152 switch (type) { 174 switch (type) {
153 case EVENT_TYPE_ILL_DEV: 175 case EVENT_TYPE_ILL_DEV:
@@ -155,6 +177,7 @@ static void iommu_print_event(void *__evt)
155 "address=0x%016llx flags=0x%04x]\n", 177 "address=0x%016llx flags=0x%04x]\n",
156 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), 178 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
157 address, flags); 179 address, flags);
180 dump_dte_entry(devid);
158 break; 181 break;
159 case EVENT_TYPE_IO_FAULT: 182 case EVENT_TYPE_IO_FAULT:
160 printk("IO_PAGE_FAULT device=%02x:%02x.%x " 183 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
@@ -176,6 +199,8 @@ static void iommu_print_event(void *__evt)
176 break; 199 break;
177 case EVENT_TYPE_ILL_CMD: 200 case EVENT_TYPE_ILL_CMD:
178 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 201 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
202 reset_iommu_command_buffer(iommu);
203 dump_command(address);
179 break; 204 break;
180 case EVENT_TYPE_CMD_HARD_ERR: 205 case EVENT_TYPE_CMD_HARD_ERR:
181 printk("COMMAND_HARDWARE_ERROR address=0x%016llx " 206 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
@@ -209,7 +234,7 @@ static void iommu_poll_events(struct amd_iommu *iommu)
209 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 234 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
210 235
211 while (head != tail) { 236 while (head != tail) {
212 iommu_print_event(iommu->evt_buf + head); 237 iommu_print_event(iommu, iommu->evt_buf + head);
213 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; 238 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
214 } 239 }
215 240
@@ -296,8 +321,11 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
296 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 321 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
297 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 322 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
298 323
299 if (unlikely(i == EXIT_LOOP_COUNT)) 324 if (unlikely(i == EXIT_LOOP_COUNT)) {
300 panic("AMD IOMMU: Completion wait loop failed\n"); 325 spin_unlock(&iommu->lock);
326 reset_iommu_command_buffer(iommu);
327 spin_lock(&iommu->lock);
328 }
301} 329}
302 330
303/* 331/*
@@ -445,47 +473,78 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
445} 473}
446 474
447/* 475/*
476 * This function flushes one domain on one IOMMU
477 */
478static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid)
479{
480 struct iommu_cmd cmd;
481 unsigned long flags;
482
483 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
484 domid, 1, 1);
485
486 spin_lock_irqsave(&iommu->lock, flags);
487 __iommu_queue_command(iommu, &cmd);
488 __iommu_completion_wait(iommu);
489 __iommu_wait_for_completion(iommu);
490 spin_unlock_irqrestore(&iommu->lock, flags);
491}
492
493static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
494{
495 int i;
496
497 for (i = 1; i < MAX_DOMAIN_ID; ++i) {
498 if (!test_bit(i, amd_iommu_pd_alloc_bitmap))
499 continue;
500 flush_domain_on_iommu(iommu, i);
501 }
502
503}
504
505/*
448 * This function is used to flush the IO/TLB for a given protection domain 506 * This function is used to flush the IO/TLB for a given protection domain
449 * on every IOMMU in the system 507 * on every IOMMU in the system
450 */ 508 */
451static void iommu_flush_domain(u16 domid) 509static void iommu_flush_domain(u16 domid)
452{ 510{
453 unsigned long flags;
454 struct amd_iommu *iommu; 511 struct amd_iommu *iommu;
455 struct iommu_cmd cmd;
456 512
457 INC_STATS_COUNTER(domain_flush_all); 513 INC_STATS_COUNTER(domain_flush_all);
458 514
459 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 515 for_each_iommu(iommu)
460 domid, 1, 1); 516 flush_domain_on_iommu(iommu, domid);
461
462 for_each_iommu(iommu) {
463 spin_lock_irqsave(&iommu->lock, flags);
464 __iommu_queue_command(iommu, &cmd);
465 __iommu_completion_wait(iommu);
466 __iommu_wait_for_completion(iommu);
467 spin_unlock_irqrestore(&iommu->lock, flags);
468 }
469} 517}
470 518
471void amd_iommu_flush_all_domains(void) 519void amd_iommu_flush_all_domains(void)
472{ 520{
521 struct amd_iommu *iommu;
522
523 for_each_iommu(iommu)
524 flush_all_domains_on_iommu(iommu);
525}
526
527static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
528{
473 int i; 529 int i;
474 530
475 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 531 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
476 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 532 if (iommu != amd_iommu_rlookup_table[i])
477 continue; 533 continue;
478 iommu_flush_domain(i); 534
535 iommu_queue_inv_dev_entry(iommu, i);
536 iommu_completion_wait(iommu);
479 } 537 }
480} 538}
481 539
482void amd_iommu_flush_all_devices(void) 540static void flush_devices_by_domain(struct protection_domain *domain)
483{ 541{
484 struct amd_iommu *iommu; 542 struct amd_iommu *iommu;
485 int i; 543 int i;
486 544
487 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 545 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
488 if (amd_iommu_pd_table[i] == NULL) 546 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
547 (amd_iommu_pd_table[i] != domain))
489 continue; 548 continue;
490 549
491 iommu = amd_iommu_rlookup_table[i]; 550 iommu = amd_iommu_rlookup_table[i];
@@ -497,6 +556,27 @@ void amd_iommu_flush_all_devices(void)
497 } 556 }
498} 557}
499 558
559static void reset_iommu_command_buffer(struct amd_iommu *iommu)
560{
561 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
562
563 if (iommu->reset_in_progress)
564 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
565
566 iommu->reset_in_progress = true;
567
568 amd_iommu_reset_cmd_buffer(iommu);
569 flush_all_devices_for_iommu(iommu);
570 flush_all_domains_on_iommu(iommu);
571
572 iommu->reset_in_progress = false;
573}
574
575void amd_iommu_flush_all_devices(void)
576{
577 flush_devices_by_domain(NULL);
578}
579
500/**************************************************************************** 580/****************************************************************************
501 * 581 *
502 * The functions below are used the create the page table mappings for 582 * The functions below are used the create the page table mappings for
@@ -514,18 +594,21 @@ void amd_iommu_flush_all_devices(void)
514static int iommu_map_page(struct protection_domain *dom, 594static int iommu_map_page(struct protection_domain *dom,
515 unsigned long bus_addr, 595 unsigned long bus_addr,
516 unsigned long phys_addr, 596 unsigned long phys_addr,
517 int prot) 597 int prot,
598 int map_size)
518{ 599{
519 u64 __pte, *pte; 600 u64 __pte, *pte;
520 601
521 bus_addr = PAGE_ALIGN(bus_addr); 602 bus_addr = PAGE_ALIGN(bus_addr);
522 phys_addr = PAGE_ALIGN(phys_addr); 603 phys_addr = PAGE_ALIGN(phys_addr);
523 604
524 /* only support 512GB address spaces for now */ 605 BUG_ON(!PM_ALIGNED(map_size, bus_addr));
525 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 606 BUG_ON(!PM_ALIGNED(map_size, phys_addr));
607
608 if (!(prot & IOMMU_PROT_MASK))
526 return -EINVAL; 609 return -EINVAL;
527 610
528 pte = alloc_pte(dom, bus_addr, NULL, GFP_KERNEL); 611 pte = alloc_pte(dom, bus_addr, map_size, NULL, GFP_KERNEL);
529 612
530 if (IOMMU_PTE_PRESENT(*pte)) 613 if (IOMMU_PTE_PRESENT(*pte))
531 return -EBUSY; 614 return -EBUSY;
@@ -538,29 +621,18 @@ static int iommu_map_page(struct protection_domain *dom,
538 621
539 *pte = __pte; 622 *pte = __pte;
540 623
624 update_domain(dom);
625
541 return 0; 626 return 0;
542} 627}
543 628
544static void iommu_unmap_page(struct protection_domain *dom, 629static void iommu_unmap_page(struct protection_domain *dom,
545 unsigned long bus_addr) 630 unsigned long bus_addr, int map_size)
546{ 631{
547 u64 *pte; 632 u64 *pte = fetch_pte(dom, bus_addr, map_size);
548
549 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
550
551 if (!IOMMU_PTE_PRESENT(*pte))
552 return;
553
554 pte = IOMMU_PTE_PAGE(*pte);
555 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
556 633
557 if (!IOMMU_PTE_PRESENT(*pte)) 634 if (pte)
558 return; 635 *pte = 0;
559
560 pte = IOMMU_PTE_PAGE(*pte);
561 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
562
563 *pte = 0;
564} 636}
565 637
566/* 638/*
@@ -615,7 +687,8 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
615 687
616 for (addr = e->address_start; addr < e->address_end; 688 for (addr = e->address_start; addr < e->address_end;
617 addr += PAGE_SIZE) { 689 addr += PAGE_SIZE) {
618 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot); 690 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
691 PM_MAP_4k);
619 if (ret) 692 if (ret)
620 return ret; 693 return ret;
621 /* 694 /*
@@ -670,24 +743,29 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
670 * This function checks if there is a PTE for a given dma address. If 743 * This function checks if there is a PTE for a given dma address. If
671 * there is one, it returns the pointer to it. 744 * there is one, it returns the pointer to it.
672 */ 745 */
673static u64* fetch_pte(struct protection_domain *domain, 746static u64 *fetch_pte(struct protection_domain *domain,
674 unsigned long address) 747 unsigned long address, int map_size)
675{ 748{
749 int level;
676 u64 *pte; 750 u64 *pte;
677 751
678 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; 752 level = domain->mode - 1;
753 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
679 754
680 if (!IOMMU_PTE_PRESENT(*pte)) 755 while (level > map_size) {
681 return NULL; 756 if (!IOMMU_PTE_PRESENT(*pte))
757 return NULL;
682 758
683 pte = IOMMU_PTE_PAGE(*pte); 759 level -= 1;
684 pte = &pte[IOMMU_PTE_L1_INDEX(address)];
685 760
686 if (!IOMMU_PTE_PRESENT(*pte)) 761 pte = IOMMU_PTE_PAGE(*pte);
687 return NULL; 762 pte = &pte[PM_LEVEL_INDEX(level, address)];
688 763
689 pte = IOMMU_PTE_PAGE(*pte); 764 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
690 pte = &pte[IOMMU_PTE_L0_INDEX(address)]; 765 pte = NULL;
766 break;
767 }
768 }
691 769
692 return pte; 770 return pte;
693} 771}
@@ -727,7 +805,7 @@ static int alloc_new_range(struct amd_iommu *iommu,
727 u64 *pte, *pte_page; 805 u64 *pte, *pte_page;
728 806
729 for (i = 0; i < num_ptes; ++i) { 807 for (i = 0; i < num_ptes; ++i) {
730 pte = alloc_pte(&dma_dom->domain, address, 808 pte = alloc_pte(&dma_dom->domain, address, PM_MAP_4k,
731 &pte_page, gfp); 809 &pte_page, gfp);
732 if (!pte) 810 if (!pte)
733 goto out_free; 811 goto out_free;
@@ -760,16 +838,20 @@ static int alloc_new_range(struct amd_iommu *iommu,
760 for (i = dma_dom->aperture[index]->offset; 838 for (i = dma_dom->aperture[index]->offset;
761 i < dma_dom->aperture_size; 839 i < dma_dom->aperture_size;
762 i += PAGE_SIZE) { 840 i += PAGE_SIZE) {
763 u64 *pte = fetch_pte(&dma_dom->domain, i); 841 u64 *pte = fetch_pte(&dma_dom->domain, i, PM_MAP_4k);
764 if (!pte || !IOMMU_PTE_PRESENT(*pte)) 842 if (!pte || !IOMMU_PTE_PRESENT(*pte))
765 continue; 843 continue;
766 844
767 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); 845 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
768 } 846 }
769 847
848 update_domain(&dma_dom->domain);
849
770 return 0; 850 return 0;
771 851
772out_free: 852out_free:
853 update_domain(&dma_dom->domain);
854
773 free_page((unsigned long)dma_dom->aperture[index]->bitmap); 855 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
774 856
775 kfree(dma_dom->aperture[index]); 857 kfree(dma_dom->aperture[index]);
@@ -1009,7 +1091,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1009 dma_dom->domain.id = domain_id_alloc(); 1091 dma_dom->domain.id = domain_id_alloc();
1010 if (dma_dom->domain.id == 0) 1092 if (dma_dom->domain.id == 0)
1011 goto free_dma_dom; 1093 goto free_dma_dom;
1012 dma_dom->domain.mode = PAGE_MODE_3_LEVEL; 1094 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1013 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 1095 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1014 dma_dom->domain.flags = PD_DMA_OPS_MASK; 1096 dma_dom->domain.flags = PD_DMA_OPS_MASK;
1015 dma_dom->domain.priv = dma_dom; 1097 dma_dom->domain.priv = dma_dom;
@@ -1063,6 +1145,41 @@ static struct protection_domain *domain_for_device(u16 devid)
1063 return dom; 1145 return dom;
1064} 1146}
1065 1147
1148static void set_dte_entry(u16 devid, struct protection_domain *domain)
1149{
1150 u64 pte_root = virt_to_phys(domain->pt_root);
1151
1152 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1153 << DEV_ENTRY_MODE_SHIFT;
1154 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1155
1156 amd_iommu_dev_table[devid].data[2] = domain->id;
1157 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1158 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1159
1160 amd_iommu_pd_table[devid] = domain;
1161}
1162
1163/*
1164 * If a device is not yet associated with a domain, this function does
1165 * assigns it visible for the hardware
1166 */
1167static void __attach_device(struct amd_iommu *iommu,
1168 struct protection_domain *domain,
1169 u16 devid)
1170{
1171 /* lock domain */
1172 spin_lock(&domain->lock);
1173
1174 /* update DTE entry */
1175 set_dte_entry(devid, domain);
1176
1177 domain->dev_cnt += 1;
1178
1179 /* ready */
1180 spin_unlock(&domain->lock);
1181}
1182
1066/* 1183/*
1067 * If a device is not yet associated with a domain, this function does 1184 * If a device is not yet associated with a domain, this function does
1068 * assigns it visible for the hardware 1185 * assigns it visible for the hardware
@@ -1072,27 +1189,16 @@ static void attach_device(struct amd_iommu *iommu,
1072 u16 devid) 1189 u16 devid)
1073{ 1190{
1074 unsigned long flags; 1191 unsigned long flags;
1075 u64 pte_root = virt_to_phys(domain->pt_root);
1076
1077 domain->dev_cnt += 1;
1078
1079 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1080 << DEV_ENTRY_MODE_SHIFT;
1081 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
1082 1192
1083 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1193 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1084 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1194 __attach_device(iommu, domain, devid);
1085 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1086 amd_iommu_dev_table[devid].data[2] = domain->id;
1087
1088 amd_iommu_pd_table[devid] = domain;
1089 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1195 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1090 1196
1091 /* 1197 /*
1092 * We might boot into a crash-kernel here. The crashed kernel 1198 * We might boot into a crash-kernel here. The crashed kernel
1093 * left the caches in the IOMMU dirty. So we have to flush 1199 * left the caches in the IOMMU dirty. So we have to flush
1094 * here to evict all dirty stuff. 1200 * here to evict all dirty stuff.
1095 */ 1201 */
1096 iommu_queue_inv_dev_entry(iommu, devid); 1202 iommu_queue_inv_dev_entry(iommu, devid);
1097 iommu_flush_tlb_pde(iommu, domain->id); 1203 iommu_flush_tlb_pde(iommu, domain->id);
1098} 1204}
@@ -1119,6 +1225,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
1119 1225
1120 /* ready */ 1226 /* ready */
1121 spin_unlock(&domain->lock); 1227 spin_unlock(&domain->lock);
1228
1229 /*
1230 * If we run in passthrough mode the device must be assigned to the
1231 * passthrough domain if it is detached from any other domain
1232 */
1233 if (iommu_pass_through) {
1234 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1235 __attach_device(iommu, pt_domain, devid);
1236 }
1122} 1237}
1123 1238
1124/* 1239/*
@@ -1164,6 +1279,8 @@ static int device_change_notifier(struct notifier_block *nb,
1164 case BUS_NOTIFY_UNBOUND_DRIVER: 1279 case BUS_NOTIFY_UNBOUND_DRIVER:
1165 if (!domain) 1280 if (!domain)
1166 goto out; 1281 goto out;
1282 if (iommu_pass_through)
1283 break;
1167 detach_device(domain, devid); 1284 detach_device(domain, devid);
1168 break; 1285 break;
1169 case BUS_NOTIFY_ADD_DEVICE: 1286 case BUS_NOTIFY_ADD_DEVICE:
@@ -1292,39 +1409,91 @@ static int get_device_resources(struct device *dev,
1292 return 1; 1409 return 1;
1293} 1410}
1294 1411
1412static void update_device_table(struct protection_domain *domain)
1413{
1414 unsigned long flags;
1415 int i;
1416
1417 for (i = 0; i <= amd_iommu_last_bdf; ++i) {
1418 if (amd_iommu_pd_table[i] != domain)
1419 continue;
1420 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1421 set_dte_entry(i, domain);
1422 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1423 }
1424}
1425
1426static void update_domain(struct protection_domain *domain)
1427{
1428 if (!domain->updated)
1429 return;
1430
1431 update_device_table(domain);
1432 flush_devices_by_domain(domain);
1433 iommu_flush_domain(domain->id);
1434
1435 domain->updated = false;
1436}
1437
1295/* 1438/*
1296 * If the pte_page is not yet allocated this function is called 1439 * This function is used to add another level to an IO page table. Adding
1440 * another level increases the size of the address space by 9 bits to a size up
1441 * to 64 bits.
1297 */ 1442 */
1298static u64* alloc_pte(struct protection_domain *dom, 1443static bool increase_address_space(struct protection_domain *domain,
1299 unsigned long address, u64 **pte_page, gfp_t gfp) 1444 gfp_t gfp)
1445{
1446 u64 *pte;
1447
1448 if (domain->mode == PAGE_MODE_6_LEVEL)
1449 /* address space already 64 bit large */
1450 return false;
1451
1452 pte = (void *)get_zeroed_page(gfp);
1453 if (!pte)
1454 return false;
1455
1456 *pte = PM_LEVEL_PDE(domain->mode,
1457 virt_to_phys(domain->pt_root));
1458 domain->pt_root = pte;
1459 domain->mode += 1;
1460 domain->updated = true;
1461
1462 return true;
1463}
1464
1465static u64 *alloc_pte(struct protection_domain *domain,
1466 unsigned long address,
1467 int end_lvl,
1468 u64 **pte_page,
1469 gfp_t gfp)
1300{ 1470{
1301 u64 *pte, *page; 1471 u64 *pte, *page;
1472 int level;
1302 1473
1303 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(address)]; 1474 while (address > PM_LEVEL_SIZE(domain->mode))
1475 increase_address_space(domain, gfp);
1304 1476
1305 if (!IOMMU_PTE_PRESENT(*pte)) { 1477 level = domain->mode - 1;
1306 page = (u64 *)get_zeroed_page(gfp); 1478 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1307 if (!page)
1308 return NULL;
1309 *pte = IOMMU_L2_PDE(virt_to_phys(page));
1310 }
1311 1479
1312 pte = IOMMU_PTE_PAGE(*pte); 1480 while (level > end_lvl) {
1313 pte = &pte[IOMMU_PTE_L1_INDEX(address)]; 1481 if (!IOMMU_PTE_PRESENT(*pte)) {
1482 page = (u64 *)get_zeroed_page(gfp);
1483 if (!page)
1484 return NULL;
1485 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1486 }
1314 1487
1315 if (!IOMMU_PTE_PRESENT(*pte)) { 1488 level -= 1;
1316 page = (u64 *)get_zeroed_page(gfp);
1317 if (!page)
1318 return NULL;
1319 *pte = IOMMU_L1_PDE(virt_to_phys(page));
1320 }
1321 1489
1322 pte = IOMMU_PTE_PAGE(*pte); 1490 pte = IOMMU_PTE_PAGE(*pte);
1323 1491
1324 if (pte_page) 1492 if (pte_page && level == end_lvl)
1325 *pte_page = pte; 1493 *pte_page = pte;
1326 1494
1327 pte = &pte[IOMMU_PTE_L0_INDEX(address)]; 1495 pte = &pte[PM_LEVEL_INDEX(level, address)];
1496 }
1328 1497
1329 return pte; 1498 return pte;
1330} 1499}
@@ -1344,10 +1513,13 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1344 1513
1345 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)]; 1514 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
1346 if (!pte) { 1515 if (!pte) {
1347 pte = alloc_pte(&dom->domain, address, &pte_page, GFP_ATOMIC); 1516 pte = alloc_pte(&dom->domain, address, PM_MAP_4k, &pte_page,
1517 GFP_ATOMIC);
1348 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page; 1518 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
1349 } else 1519 } else
1350 pte += IOMMU_PTE_L0_INDEX(address); 1520 pte += PM_LEVEL_INDEX(0, address);
1521
1522 update_domain(&dom->domain);
1351 1523
1352 return pte; 1524 return pte;
1353} 1525}
@@ -1409,7 +1581,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
1409 if (!pte) 1581 if (!pte)
1410 return; 1582 return;
1411 1583
1412 pte += IOMMU_PTE_L0_INDEX(address); 1584 pte += PM_LEVEL_INDEX(0, address);
1413 1585
1414 WARN_ON(!*pte); 1586 WARN_ON(!*pte);
1415 1587
@@ -1988,19 +2160,47 @@ static void cleanup_domain(struct protection_domain *domain)
1988 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2160 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1989} 2161}
1990 2162
1991static int amd_iommu_domain_init(struct iommu_domain *dom) 2163static void protection_domain_free(struct protection_domain *domain)
2164{
2165 if (!domain)
2166 return;
2167
2168 if (domain->id)
2169 domain_id_free(domain->id);
2170
2171 kfree(domain);
2172}
2173
2174static struct protection_domain *protection_domain_alloc(void)
1992{ 2175{
1993 struct protection_domain *domain; 2176 struct protection_domain *domain;
1994 2177
1995 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 2178 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1996 if (!domain) 2179 if (!domain)
1997 return -ENOMEM; 2180 return NULL;
1998 2181
1999 spin_lock_init(&domain->lock); 2182 spin_lock_init(&domain->lock);
2000 domain->mode = PAGE_MODE_3_LEVEL;
2001 domain->id = domain_id_alloc(); 2183 domain->id = domain_id_alloc();
2002 if (!domain->id) 2184 if (!domain->id)
2185 goto out_err;
2186
2187 return domain;
2188
2189out_err:
2190 kfree(domain);
2191
2192 return NULL;
2193}
2194
2195static int amd_iommu_domain_init(struct iommu_domain *dom)
2196{
2197 struct protection_domain *domain;
2198
2199 domain = protection_domain_alloc();
2200 if (!domain)
2003 goto out_free; 2201 goto out_free;
2202
2203 domain->mode = PAGE_MODE_3_LEVEL;
2004 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); 2204 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2005 if (!domain->pt_root) 2205 if (!domain->pt_root)
2006 goto out_free; 2206 goto out_free;
@@ -2010,7 +2210,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
2010 return 0; 2210 return 0;
2011 2211
2012out_free: 2212out_free:
2013 kfree(domain); 2213 protection_domain_free(domain);
2014 2214
2015 return -ENOMEM; 2215 return -ENOMEM;
2016} 2216}
@@ -2115,7 +2315,7 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2115 paddr &= PAGE_MASK; 2315 paddr &= PAGE_MASK;
2116 2316
2117 for (i = 0; i < npages; ++i) { 2317 for (i = 0; i < npages; ++i) {
2118 ret = iommu_map_page(domain, iova, paddr, prot); 2318 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2119 if (ret) 2319 if (ret)
2120 return ret; 2320 return ret;
2121 2321
@@ -2136,7 +2336,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2136 iova &= PAGE_MASK; 2336 iova &= PAGE_MASK;
2137 2337
2138 for (i = 0; i < npages; ++i) { 2338 for (i = 0; i < npages; ++i) {
2139 iommu_unmap_page(domain, iova); 2339 iommu_unmap_page(domain, iova, PM_MAP_4k);
2140 iova += PAGE_SIZE; 2340 iova += PAGE_SIZE;
2141 } 2341 }
2142 2342
@@ -2151,21 +2351,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
2151 phys_addr_t paddr; 2351 phys_addr_t paddr;
2152 u64 *pte; 2352 u64 *pte;
2153 2353
2154 pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(iova)]; 2354 pte = fetch_pte(domain, iova, PM_MAP_4k);
2155
2156 if (!IOMMU_PTE_PRESENT(*pte))
2157 return 0;
2158
2159 pte = IOMMU_PTE_PAGE(*pte);
2160 pte = &pte[IOMMU_PTE_L1_INDEX(iova)];
2161
2162 if (!IOMMU_PTE_PRESENT(*pte))
2163 return 0;
2164
2165 pte = IOMMU_PTE_PAGE(*pte);
2166 pte = &pte[IOMMU_PTE_L0_INDEX(iova)];
2167 2355
2168 if (!IOMMU_PTE_PRESENT(*pte)) 2356 if (!pte || !IOMMU_PTE_PRESENT(*pte))
2169 return 0; 2357 return 0;
2170 2358
2171 paddr = *pte & IOMMU_PAGE_MASK; 2359 paddr = *pte & IOMMU_PAGE_MASK;
@@ -2191,3 +2379,46 @@ static struct iommu_ops amd_iommu_ops = {
2191 .domain_has_cap = amd_iommu_domain_has_cap, 2379 .domain_has_cap = amd_iommu_domain_has_cap,
2192}; 2380};
2193 2381
2382/*****************************************************************************
2383 *
2384 * The next functions do a basic initialization of IOMMU for pass through
2385 * mode
2386 *
2387 * In passthrough mode the IOMMU is initialized and enabled but not used for
2388 * DMA-API translation.
2389 *
2390 *****************************************************************************/
2391
2392int __init amd_iommu_init_passthrough(void)
2393{
2394 struct pci_dev *dev = NULL;
2395 u16 devid, devid2;
2396
2397 /* allocate passthroug domain */
2398 pt_domain = protection_domain_alloc();
2399 if (!pt_domain)
2400 return -ENOMEM;
2401
2402 pt_domain->mode |= PAGE_MODE_NONE;
2403
2404 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2405 struct amd_iommu *iommu;
2406
2407 devid = calc_devid(dev->bus->number, dev->devfn);
2408 if (devid > amd_iommu_last_bdf)
2409 continue;
2410
2411 devid2 = amd_iommu_alias_table[devid];
2412
2413 iommu = amd_iommu_rlookup_table[devid2];
2414 if (!iommu)
2415 continue;
2416
2417 __attach_device(iommu, pt_domain, devid);
2418 __attach_device(iommu, pt_domain, devid2);
2419 }
2420
2421 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2422
2423 return 0;
2424}
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c1b17e97252e..b4b61d462dcc 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
252/* Function to enable the hardware */ 252/* Function to enable the hardware */
253static void iommu_enable(struct amd_iommu *iommu) 253static void iommu_enable(struct amd_iommu *iommu)
254{ 254{
255 printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", 255 printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n",
256 dev_name(&iommu->dev->dev), iommu->cap_ptr); 256 dev_name(&iommu->dev->dev), iommu->cap_ptr);
257 257
258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 258 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
@@ -435,6 +435,20 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
435} 435}
436 436
437/* 437/*
438 * This function resets the command buffer if the IOMMU stopped fetching
439 * commands from it.
440 */
441void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
442{
443 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
444
445 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
446 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
447
448 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
449}
450
451/*
438 * This function writes the command buffer address to the hardware and 452 * This function writes the command buffer address to the hardware and
439 * enables it. 453 * enables it.
440 */ 454 */
@@ -450,11 +464,7 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
450 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 464 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
451 &entry, sizeof(entry)); 465 &entry, sizeof(entry));
452 466
453 /* set head and tail to zero manually */ 467 amd_iommu_reset_cmd_buffer(iommu);
454 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
455 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
456
457 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
458} 468}
459 469
460static void __init free_command_buffer(struct amd_iommu *iommu) 470static void __init free_command_buffer(struct amd_iommu *iommu)
@@ -858,7 +868,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
858 switch (*p) { 868 switch (*p) {
859 case ACPI_IVHD_TYPE: 869 case ACPI_IVHD_TYPE:
860 870
861 DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " 871 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
862 "seg: %d flags: %01x info %04x\n", 872 "seg: %d flags: %01x info %04x\n",
863 PCI_BUS(h->devid), PCI_SLOT(h->devid), 873 PCI_BUS(h->devid), PCI_SLOT(h->devid),
864 PCI_FUNC(h->devid), h->cap_ptr, 874 PCI_FUNC(h->devid), h->cap_ptr,
@@ -902,7 +912,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
902 912
903 r = request_irq(iommu->dev->irq, amd_iommu_int_handler, 913 r = request_irq(iommu->dev->irq, amd_iommu_int_handler,
904 IRQF_SAMPLE_RANDOM, 914 IRQF_SAMPLE_RANDOM,
905 "AMD IOMMU", 915 "AMD-Vi",
906 NULL); 916 NULL);
907 917
908 if (r) { 918 if (r) {
@@ -1150,7 +1160,7 @@ int __init amd_iommu_init(void)
1150 1160
1151 1161
1152 if (no_iommu) { 1162 if (no_iommu) {
1153 printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); 1163 printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1154 return 0; 1164 return 0;
1155 } 1165 }
1156 1166
@@ -1242,22 +1252,28 @@ int __init amd_iommu_init(void)
1242 if (ret) 1252 if (ret)
1243 goto free; 1253 goto free;
1244 1254
1245 ret = amd_iommu_init_dma_ops(); 1255 if (iommu_pass_through)
1256 ret = amd_iommu_init_passthrough();
1257 else
1258 ret = amd_iommu_init_dma_ops();
1246 if (ret) 1259 if (ret)
1247 goto free; 1260 goto free;
1248 1261
1249 enable_iommus(); 1262 enable_iommus();
1250 1263
1251 printk(KERN_INFO "AMD IOMMU: device isolation "); 1264 if (iommu_pass_through)
1265 goto out;
1266
1267 printk(KERN_INFO "AMD-Vi: device isolation ");
1252 if (amd_iommu_isolate) 1268 if (amd_iommu_isolate)
1253 printk("enabled\n"); 1269 printk("enabled\n");
1254 else 1270 else
1255 printk("disabled\n"); 1271 printk("disabled\n");
1256 1272
1257 if (amd_iommu_unmap_flush) 1273 if (amd_iommu_unmap_flush)
1258 printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); 1274 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1259 else 1275 else
1260 printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); 1276 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1261 1277
1262out: 1278out:
1263 return ret; 1279 return ret;
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 676debfc1702..128111d8ffe0 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,6 +20,7 @@
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/ioport.h> 21#include <linux/ioport.h>
22#include <linux/suspend.h> 22#include <linux/suspend.h>
23#include <linux/kmemleak.h>
23#include <asm/e820.h> 24#include <asm/e820.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/iommu.h> 26#include <asm/iommu.h>
@@ -94,6 +95,11 @@ static u32 __init allocate_aperture(void)
94 * code for safe 95 * code for safe
95 */ 96 */
96 p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20); 97 p = __alloc_bootmem_nopanic(aper_size, aper_size, 512ULL<<20);
98 /*
99 * Kmemleak should not scan this block as it may not be mapped via the
100 * kernel direct mapping.
101 */
102 kmemleak_ignore(p);
97 if (!p || __pa(p)+aper_size > 0xffffffff) { 103 if (!p || __pa(p)+aper_size > 0xffffffff) {
98 printk(KERN_ERR 104 printk(KERN_ERR
99 "Cannot allocate aperture memory hole (%p,%uK)\n", 105 "Cannot allocate aperture memory hole (%p,%uK)\n",
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index b3025b43b63a..db7220220d09 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -39,7 +39,7 @@
39int unknown_nmi_panic; 39int unknown_nmi_panic;
40int nmi_watchdog_enabled; 40int nmi_watchdog_enabled;
41 41
42static cpumask_var_t backtrace_mask; 42static cpumask_t backtrace_mask __read_mostly;
43 43
44/* nmi_active: 44/* nmi_active:
45 * >0: the lapic NMI watchdog is active, but can be disabled 45 * >0: the lapic NMI watchdog is active, but can be disabled
@@ -138,7 +138,6 @@ int __init check_nmi_watchdog(void)
138 if (!prev_nmi_count) 138 if (!prev_nmi_count)
139 goto error; 139 goto error;
140 140
141 alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO);
142 printk(KERN_INFO "Testing NMI watchdog ... "); 141 printk(KERN_INFO "Testing NMI watchdog ... ");
143 142
144#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
@@ -415,14 +414,17 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
415 } 414 }
416 415
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 416 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { 417 if (cpumask_test_cpu(cpu, &backtrace_mask)) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 418 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
420 419
421 spin_lock(&lock); 420 spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 421 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
422 show_regs(regs);
423 dump_stack(); 423 dump_stack();
424 spin_unlock(&lock); 424 spin_unlock(&lock);
425 cpumask_clear_cpu(cpu, backtrace_mask); 425 cpumask_clear_cpu(cpu, &backtrace_mask);
426
427 rc = 1;
426 } 428 }
427 429
428 /* Could check oops_in_progress here too, but it's safer not to */ 430 /* Could check oops_in_progress here too, but it's safer not to */
@@ -552,14 +554,18 @@ int do_nmi_callback(struct pt_regs *regs, int cpu)
552 return 0; 554 return 0;
553} 555}
554 556
555void __trigger_all_cpu_backtrace(void) 557void arch_trigger_all_cpu_backtrace(void)
556{ 558{
557 int i; 559 int i;
558 560
559 cpumask_copy(backtrace_mask, cpu_online_mask); 561 cpumask_copy(&backtrace_mask, cpu_online_mask);
562
563 printk(KERN_INFO "sending NMI to all CPUs:\n");
564 apic->send_IPI_all(NMI_VECTOR);
565
560 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 566 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
561 for (i = 0; i < 10 * 1000; i++) { 567 for (i = 0; i < 10 * 1000; i++) {
562 if (cpumask_empty(backtrace_mask)) 568 if (cpumask_empty(&backtrace_mask))
563 break; 569 break;
564 mdelay(1); 570 mdelay(1);
565 } 571 }
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 898ecc47e129..4a6aeedcd965 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -3,6 +3,7 @@
3 * This code generates raw asm output which is post-processed to extract 3 * This code generates raw asm output which is post-processed to extract
4 * and format the required data. 4 * and format the required data.
5 */ 5 */
6#define COMPILE_OFFSETS
6 7
7#include <linux/crypto.h> 8#include <linux/crypto.h>
8#include <linux/sched.h> 9#include <linux/sched.h>
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 900332b800f8..f9cd0849bd42 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
9 * 10 *
10 * For licencing details see kernel-base/COPYING 11 * For licencing details see kernel-base/COPYING
11 */ 12 */
@@ -20,6 +21,7 @@
20#include <linux/sched.h> 21#include <linux/sched.h>
21#include <linux/uaccess.h> 22#include <linux/uaccess.h>
22#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/cpu.h>
23 25
24#include <asm/apic.h> 26#include <asm/apic.h>
25#include <asm/stacktrace.h> 27#include <asm/stacktrace.h>
@@ -27,12 +29,52 @@
27 29
28static u64 perf_counter_mask __read_mostly; 30static u64 perf_counter_mask __read_mostly;
29 31
32/* The maximal number of PEBS counters: */
33#define MAX_PEBS_COUNTERS 4
34
35/* The size of a BTS record in bytes: */
36#define BTS_RECORD_SIZE 24
37
38/* The size of a per-cpu BTS buffer in bytes: */
39#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024)
40
41/* The BTS overflow threshold in bytes from the end of the buffer: */
42#define BTS_OVFL_TH (BTS_RECORD_SIZE * 64)
43
44
45/*
46 * Bits in the debugctlmsr controlling branch tracing.
47 */
48#define X86_DEBUGCTL_TR (1 << 6)
49#define X86_DEBUGCTL_BTS (1 << 7)
50#define X86_DEBUGCTL_BTINT (1 << 8)
51#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
52#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
53
54/*
55 * A debug store configuration.
56 *
57 * We only support architectures that use 64bit fields.
58 */
59struct debug_store {
60 u64 bts_buffer_base;
61 u64 bts_index;
62 u64 bts_absolute_maximum;
63 u64 bts_interrupt_threshold;
64 u64 pebs_buffer_base;
65 u64 pebs_index;
66 u64 pebs_absolute_maximum;
67 u64 pebs_interrupt_threshold;
68 u64 pebs_counter_reset[MAX_PEBS_COUNTERS];
69};
70
30struct cpu_hw_counters { 71struct cpu_hw_counters {
31 struct perf_counter *counters[X86_PMC_IDX_MAX]; 72 struct perf_counter *counters[X86_PMC_IDX_MAX];
32 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
33 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
34 unsigned long interrupts; 75 unsigned long interrupts;
35 int enabled; 76 int enabled;
77 struct debug_store *ds;
36}; 78};
37 79
38/* 80/*
@@ -58,6 +100,8 @@ struct x86_pmu {
58 int apic; 100 int apic;
59 u64 max_period; 101 u64 max_period;
60 u64 intel_ctrl; 102 u64 intel_ctrl;
103 void (*enable_bts)(u64 config);
104 void (*disable_bts)(void);
61}; 105};
62 106
63static struct x86_pmu x86_pmu __read_mostly; 107static struct x86_pmu x86_pmu __read_mostly;
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter,
577 u64 prev_raw_count, new_raw_count; 621 u64 prev_raw_count, new_raw_count;
578 s64 delta; 622 s64 delta;
579 623
624 if (idx == X86_PMC_IDX_FIXED_BTS)
625 return 0;
626
580 /* 627 /*
581 * Careful: an NMI might modify the previous counter value. 628 * Careful: an NMI might modify the previous counter value.
582 * 629 *
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void)
666#endif 713#endif
667} 714}
668 715
716static inline bool bts_available(void)
717{
718 return x86_pmu.enable_bts != NULL;
719}
720
721static inline void init_debug_store_on_cpu(int cpu)
722{
723 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
724
725 if (!ds)
726 return;
727
728 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
729 (u32)((u64)(unsigned long)ds),
730 (u32)((u64)(unsigned long)ds >> 32));
731}
732
733static inline void fini_debug_store_on_cpu(int cpu)
734{
735 if (!per_cpu(cpu_hw_counters, cpu).ds)
736 return;
737
738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
739}
740
741static void release_bts_hardware(void)
742{
743 int cpu;
744
745 if (!bts_available())
746 return;
747
748 get_online_cpus();
749
750 for_each_online_cpu(cpu)
751 fini_debug_store_on_cpu(cpu);
752
753 for_each_possible_cpu(cpu) {
754 struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds;
755
756 if (!ds)
757 continue;
758
759 per_cpu(cpu_hw_counters, cpu).ds = NULL;
760
761 kfree((void *)(unsigned long)ds->bts_buffer_base);
762 kfree(ds);
763 }
764
765 put_online_cpus();
766}
767
768static int reserve_bts_hardware(void)
769{
770 int cpu, err = 0;
771
772 if (!bts_available())
773 return 0;
774
775 get_online_cpus();
776
777 for_each_possible_cpu(cpu) {
778 struct debug_store *ds;
779 void *buffer;
780
781 err = -ENOMEM;
782 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
783 if (unlikely(!buffer))
784 break;
785
786 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
787 if (unlikely(!ds)) {
788 kfree(buffer);
789 break;
790 }
791
792 ds->bts_buffer_base = (u64)(unsigned long)buffer;
793 ds->bts_index = ds->bts_buffer_base;
794 ds->bts_absolute_maximum =
795 ds->bts_buffer_base + BTS_BUFFER_SIZE;
796 ds->bts_interrupt_threshold =
797 ds->bts_absolute_maximum - BTS_OVFL_TH;
798
799 per_cpu(cpu_hw_counters, cpu).ds = ds;
800 err = 0;
801 }
802
803 if (err)
804 release_bts_hardware();
805 else {
806 for_each_online_cpu(cpu)
807 init_debug_store_on_cpu(cpu);
808 }
809
810 put_online_cpus();
811
812 return err;
813}
814
669static void hw_perf_counter_destroy(struct perf_counter *counter) 815static void hw_perf_counter_destroy(struct perf_counter *counter)
670{ 816{
671 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { 817 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
672 release_pmc_hardware(); 818 release_pmc_hardware();
819 release_bts_hardware();
673 mutex_unlock(&pmc_reserve_mutex); 820 mutex_unlock(&pmc_reserve_mutex);
674 } 821 }
675} 822}
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
712 return 0; 859 return 0;
713} 860}
714 861
862static void intel_pmu_enable_bts(u64 config)
863{
864 unsigned long debugctlmsr;
865
866 debugctlmsr = get_debugctlmsr();
867
868 debugctlmsr |= X86_DEBUGCTL_TR;
869 debugctlmsr |= X86_DEBUGCTL_BTS;
870 debugctlmsr |= X86_DEBUGCTL_BTINT;
871
872 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
873 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
874
875 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
876 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
877
878 update_debugctlmsr(debugctlmsr);
879}
880
881static void intel_pmu_disable_bts(void)
882{
883 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
884 unsigned long debugctlmsr;
885
886 if (!cpuc->ds)
887 return;
888
889 debugctlmsr = get_debugctlmsr();
890
891 debugctlmsr &=
892 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
893 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
894
895 update_debugctlmsr(debugctlmsr);
896}
897
715/* 898/*
716 * Setup the hardware configuration for a given attr_type 899 * Setup the hardware configuration for a given attr_type
717 */ 900 */
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
728 err = 0; 911 err = 0;
729 if (!atomic_inc_not_zero(&active_counters)) { 912 if (!atomic_inc_not_zero(&active_counters)) {
730 mutex_lock(&pmc_reserve_mutex); 913 mutex_lock(&pmc_reserve_mutex);
731 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) 914 if (atomic_read(&active_counters) == 0) {
732 err = -EBUSY; 915 if (!reserve_pmc_hardware())
733 else 916 err = -EBUSY;
917 else
918 err = reserve_bts_hardware();
919 }
920 if (!err)
734 atomic_inc(&active_counters); 921 atomic_inc(&active_counters);
735 mutex_unlock(&pmc_reserve_mutex); 922 mutex_unlock(&pmc_reserve_mutex);
736 } 923 }
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
793 if (config == -1LL) 980 if (config == -1LL)
794 return -EINVAL; 981 return -EINVAL;
795 982
983 /*
984 * Branch tracing:
985 */
986 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
987 (hwc->sample_period == 1)) {
988 /* BTS is not supported by this architecture. */
989 if (!bts_available())
990 return -EOPNOTSUPP;
991
992 /* BTS is currently only allowed for user-mode. */
993 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
994 return -EOPNOTSUPP;
995 }
996
796 hwc->config |= config; 997 hwc->config |= config;
797 998
798 return 0; 999 return 0;
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void)
817 1018
818static void intel_pmu_disable_all(void) 1019static void intel_pmu_disable_all(void)
819{ 1020{
1021 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1022
1023 if (!cpuc->enabled)
1024 return;
1025
1026 cpuc->enabled = 0;
1027 barrier();
1028
820 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 1029 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1030
1031 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1032 intel_pmu_disable_bts();
821} 1033}
822 1034
823static void amd_pmu_disable_all(void) 1035static void amd_pmu_disable_all(void)
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void)
875 1087
876static void intel_pmu_enable_all(void) 1088static void intel_pmu_enable_all(void)
877{ 1089{
1090 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1091
1092 if (cpuc->enabled)
1093 return;
1094
1095 cpuc->enabled = 1;
1096 barrier();
1097
878 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1099
1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1101 struct perf_counter *counter =
1102 cpuc->counters[X86_PMC_IDX_FIXED_BTS];
1103
1104 if (WARN_ON_ONCE(!counter))
1105 return;
1106
1107 intel_pmu_enable_bts(counter->hw.config);
1108 }
879} 1109}
880 1110
881static void amd_pmu_enable_all(void) 1111static void amd_pmu_enable_all(void)
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
962static inline void 1192static inline void
963intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1193intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
964{ 1194{
1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1196 intel_pmu_disable_bts();
1197 return;
1198 }
1199
965 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1200 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
966 intel_pmu_disable_fixed(hwc, idx); 1201 intel_pmu_disable_fixed(hwc, idx);
967 return; 1202 return;
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter,
990 s64 period = hwc->sample_period; 1225 s64 period = hwc->sample_period;
991 int err, ret = 0; 1226 int err, ret = 0;
992 1227
1228 if (idx == X86_PMC_IDX_FIXED_BTS)
1229 return 0;
1230
993 /* 1231 /*
994 * If we are way outside a reasoable range then just skip forward: 1232 * If we are way outside a reasoable range then just skip forward:
995 */ 1233 */
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1072 1310
1073static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1311static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1074{ 1312{
1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1314 if (!__get_cpu_var(cpu_hw_counters).enabled)
1315 return;
1316
1317 intel_pmu_enable_bts(hwc->config);
1318 return;
1319 }
1320
1075 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1321 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1076 intel_pmu_enable_fixed(hwc, idx); 1322 intel_pmu_enable_fixed(hwc, idx);
1077 return; 1323 return;
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
1093{ 1339{
1094 unsigned int event; 1340 unsigned int event;
1095 1341
1342 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1343
1344 if (unlikely((event ==
1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS;
1348
1096 if (!x86_pmu.num_counters_fixed) 1349 if (!x86_pmu.num_counters_fixed)
1097 return -1; 1350 return -1;
1098 1351
1099 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1100
1101 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1102 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1103 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1354 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter)
1118 int idx; 1369 int idx;
1119 1370
1120 idx = fixed_mode_idx(counter, hwc); 1371 idx = fixed_mode_idx(counter, hwc);
1121 if (idx >= 0) { 1372 if (idx == X86_PMC_IDX_FIXED_BTS) {
1373 /* BTS is already occupied. */
1374 if (test_and_set_bit(idx, cpuc->used_mask))
1375 return -EAGAIN;
1376
1377 hwc->config_base = 0;
1378 hwc->counter_base = 0;
1379 hwc->idx = idx;
1380 } else if (idx >= 0) {
1122 /* 1381 /*
1123 * Try to get the fixed counter, if that is already taken 1382 * Try to get the fixed counter, if that is already taken
1124 * then try to get a generic counter: 1383 * then try to get a generic counter:
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void)
1229 local_irq_restore(flags); 1488 local_irq_restore(flags);
1230} 1489}
1231 1490
1491static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc,
1492 struct perf_sample_data *data)
1493{
1494 struct debug_store *ds = cpuc->ds;
1495 struct bts_record {
1496 u64 from;
1497 u64 to;
1498 u64 flags;
1499 };
1500 struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS];
1501 unsigned long orig_ip = data->regs->ip;
1502 struct bts_record *at, *top;
1503
1504 if (!counter)
1505 return;
1506
1507 if (!ds)
1508 return;
1509
1510 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1511 top = (struct bts_record *)(unsigned long)ds->bts_index;
1512
1513 ds->bts_index = ds->bts_buffer_base;
1514
1515 for (; at < top; at++) {
1516 data->regs->ip = at->from;
1517 data->addr = at->to;
1518
1519 perf_counter_output(counter, 1, data);
1520 }
1521
1522 data->regs->ip = orig_ip;
1523 data->addr = 0;
1524
1525 /* There's new data available. */
1526 counter->pending_kill = POLL_IN;
1527}
1528
1232static void x86_pmu_disable(struct perf_counter *counter) 1529static void x86_pmu_disable(struct perf_counter *counter)
1233{ 1530{
1234 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1531 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter)
1253 * that we are disabling: 1550 * that we are disabling:
1254 */ 1551 */
1255 x86_perf_counter_update(counter, hwc, idx); 1552 x86_perf_counter_update(counter, hwc, idx);
1553
1554 /* Drain the remaining BTS records. */
1555 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1556 struct perf_sample_data data;
1557 struct pt_regs regs;
1558
1559 data.regs = &regs;
1560 intel_pmu_drain_bts_buffer(cpuc, &data);
1561 }
1256 cpuc->counters[idx] = NULL; 1562 cpuc->counters[idx] = NULL;
1257 clear_bit(idx, cpuc->used_mask); 1563 clear_bit(idx, cpuc->used_mask);
1258 1564
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter)
1280 1586
1281static void intel_pmu_reset(void) 1587static void intel_pmu_reset(void)
1282{ 1588{
1589 struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds;
1283 unsigned long flags; 1590 unsigned long flags;
1284 int idx; 1591 int idx;
1285 1592
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void)
1297 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1604 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1298 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1605 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1299 } 1606 }
1607 if (ds)
1608 ds->bts_index = ds->bts_buffer_base;
1300 1609
1301 local_irq_restore(flags); 1610 local_irq_restore(flags);
1302} 1611}
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1362 cpuc = &__get_cpu_var(cpu_hw_counters); 1671 cpuc = &__get_cpu_var(cpu_hw_counters);
1363 1672
1364 perf_disable(); 1673 perf_disable();
1674 intel_pmu_drain_bts_buffer(cpuc, &data);
1365 status = intel_pmu_get_status(); 1675 status = intel_pmu_get_status();
1366 if (!status) { 1676 if (!status) {
1367 perf_enable(); 1677 perf_enable();
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = {
1571 * the generic counter period: 1881 * the generic counter period:
1572 */ 1882 */
1573 .max_period = (1ULL << 31) - 1, 1883 .max_period = (1ULL << 31) - 1,
1884 .enable_bts = intel_pmu_enable_bts,
1885 .disable_bts = intel_pmu_disable_bts,
1574}; 1886};
1575 1887
1576static struct x86_pmu amd_pmu = { 1888static struct x86_pmu amd_pmu = {
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1962 2274
1963 return entry; 2275 return entry;
1964} 2276}
2277
2278void hw_perf_counter_setup_online(int cpu)
2279{
2280 init_debug_store_on_cpu(cpu);
2281}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d94e1ea3b9fe..9dbb527e1652 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -417,10 +417,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
417 unsigned long return_hooker = (unsigned long) 417 unsigned long return_hooker = (unsigned long)
418 &return_to_handler; 418 &return_to_handler;
419 419
420 /* Nmi's are currently unsupported */
421 if (unlikely(in_nmi()))
422 return;
423
424 if (unlikely(atomic_read(&current->tracing_graph_pause))) 420 if (unlikely(atomic_read(&current->tracing_graph_pause)))
425 return; 421 return;
426 422
@@ -498,37 +494,56 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
498 494
499struct syscall_metadata *syscall_nr_to_meta(int nr) 495struct syscall_metadata *syscall_nr_to_meta(int nr)
500{ 496{
501 if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) 497 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
502 return NULL; 498 return NULL;
503 499
504 return syscalls_metadata[nr]; 500 return syscalls_metadata[nr];
505} 501}
506 502
507void arch_init_ftrace_syscalls(void) 503int syscall_name_to_nr(char *name)
504{
505 int i;
506
507 if (!syscalls_metadata)
508 return -1;
509
510 for (i = 0; i < NR_syscalls; i++) {
511 if (syscalls_metadata[i]) {
512 if (!strcmp(syscalls_metadata[i]->name, name))
513 return i;
514 }
515 }
516 return -1;
517}
518
519void set_syscall_enter_id(int num, int id)
520{
521 syscalls_metadata[num]->enter_id = id;
522}
523
524void set_syscall_exit_id(int num, int id)
525{
526 syscalls_metadata[num]->exit_id = id;
527}
528
529static int __init arch_init_ftrace_syscalls(void)
508{ 530{
509 int i; 531 int i;
510 struct syscall_metadata *meta; 532 struct syscall_metadata *meta;
511 unsigned long **psys_syscall_table = &sys_call_table; 533 unsigned long **psys_syscall_table = &sys_call_table;
512 static atomic_t refs;
513
514 if (atomic_inc_return(&refs) != 1)
515 goto end;
516 534
517 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * 535 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
518 FTRACE_SYSCALL_MAX, GFP_KERNEL); 536 NR_syscalls, GFP_KERNEL);
519 if (!syscalls_metadata) { 537 if (!syscalls_metadata) {
520 WARN_ON(1); 538 WARN_ON(1);
521 return; 539 return -ENOMEM;
522 } 540 }
523 541
524 for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { 542 for (i = 0; i < NR_syscalls; i++) {
525 meta = find_syscall_meta(psys_syscall_table[i]); 543 meta = find_syscall_meta(psys_syscall_table[i]);
526 syscalls_metadata[i] = meta; 544 syscalls_metadata[i] = meta;
527 } 545 }
528 return; 546 return 0;
529
530 /* Paranoid: avoid overflow */
531end:
532 atomic_dec(&refs);
533} 547}
548arch_initcall(arch_init_ftrace_syscalls);
534#endif 549#endif
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 1a041bcf506b..d71c8655905b 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -3,6 +3,7 @@
3#include <linux/dmar.h> 3#include <linux/dmar.h>
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/kmemleak.h>
6 7
7#include <asm/proto.h> 8#include <asm/proto.h>
8#include <asm/dma.h> 9#include <asm/dma.h>
@@ -32,7 +33,14 @@ int no_iommu __read_mostly;
32/* Set this to 1 if there is a HW IOMMU in the system */ 33/* Set this to 1 if there is a HW IOMMU in the system */
33int iommu_detected __read_mostly = 0; 34int iommu_detected __read_mostly = 0;
34 35
35int iommu_pass_through; 36/*
37 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
38 * If this variable is 1, IOMMU implementations do no DMA ranslation for
39 * devices and allow every device to access to whole physical memory. This is
40 * useful if a user want to use an IOMMU only for KVM device assignment to
41 * guests and not for driver dma translation.
42 */
43int iommu_pass_through __read_mostly;
36 44
37dma_addr_t bad_dma_address __read_mostly = 0; 45dma_addr_t bad_dma_address __read_mostly = 0;
38EXPORT_SYMBOL(bad_dma_address); 46EXPORT_SYMBOL(bad_dma_address);
@@ -88,6 +96,11 @@ void __init dma32_reserve_bootmem(void)
88 size = roundup(dma32_bootmem_size, align); 96 size = roundup(dma32_bootmem_size, align);
89 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 97 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
90 512ULL<<20); 98 512ULL<<20);
99 /*
100 * Kmemleak should not scan this block as it may not be mapped via the
101 * kernel direct mapping.
102 */
103 kmemleak_ignore(dma32_bootmem_ptr);
91 if (dma32_bootmem_ptr) 104 if (dma32_bootmem_ptr)
92 dma32_bootmem_size = size; 105 dma32_bootmem_size = size;
93 else 106 else
@@ -147,7 +160,7 @@ again:
147 return NULL; 160 return NULL;
148 161
149 addr = page_to_phys(page); 162 addr = page_to_phys(page);
150 if (!is_buffer_dma_capable(dma_mask, addr, size)) { 163 if (addr + size > dma_mask) {
151 __free_pages(page, get_order(size)); 164 __free_pages(page, get_order(size));
152 165
153 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { 166 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index d2e56b8f48e7..98a827ee9ed7 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -190,14 +190,13 @@ static void iommu_full(struct device *dev, size_t size, int dir)
190static inline int 190static inline int
191need_iommu(struct device *dev, unsigned long addr, size_t size) 191need_iommu(struct device *dev, unsigned long addr, size_t size)
192{ 192{
193 return force_iommu || 193 return force_iommu || !dma_capable(dev, addr, size);
194 !is_buffer_dma_capable(*dev->dma_mask, addr, size);
195} 194}
196 195
197static inline int 196static inline int
198nonforced_iommu(struct device *dev, unsigned long addr, size_t size) 197nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
199{ 198{
200 return !is_buffer_dma_capable(*dev->dma_mask, addr, size); 199 return !dma_capable(dev, addr, size);
201} 200}
202 201
203/* Map a single continuous physical area into the IOMMU. 202/* Map a single continuous physical area into the IOMMU.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 71d412a09f30..a3933d4330cd 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -14,7 +14,7 @@
14static int 14static int
15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) 15check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
16{ 16{
17 if (hwdev && !is_buffer_dma_capable(*hwdev->dma_mask, bus, size)) { 17 if (hwdev && !dma_capable(hwdev, bus, size)) {
18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) 18 if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
19 printk(KERN_ERR 19 printk(KERN_ERR
20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", 20 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
@@ -79,12 +79,29 @@ static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
79 free_pages((unsigned long)vaddr, get_order(size)); 79 free_pages((unsigned long)vaddr, get_order(size));
80} 80}
81 81
82static void nommu_sync_single_for_device(struct device *dev,
83 dma_addr_t addr, size_t size,
84 enum dma_data_direction dir)
85{
86 flush_write_buffers();
87}
88
89
90static void nommu_sync_sg_for_device(struct device *dev,
91 struct scatterlist *sg, int nelems,
92 enum dma_data_direction dir)
93{
94 flush_write_buffers();
95}
96
82struct dma_map_ops nommu_dma_ops = { 97struct dma_map_ops nommu_dma_ops = {
83 .alloc_coherent = dma_generic_alloc_coherent, 98 .alloc_coherent = dma_generic_alloc_coherent,
84 .free_coherent = nommu_free_coherent, 99 .free_coherent = nommu_free_coherent,
85 .map_sg = nommu_map_sg, 100 .map_sg = nommu_map_sg,
86 .map_page = nommu_map_page, 101 .map_page = nommu_map_page,
87 .is_phys = 1, 102 .sync_single_for_device = nommu_sync_single_for_device,
103 .sync_sg_for_device = nommu_sync_sg_for_device,
104 .is_phys = 1,
88}; 105};
89 106
90void __init no_iommu_init(void) 107void __init no_iommu_init(void)
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 6af96ee44200..e8a35016115f 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -13,31 +13,6 @@
13 13
14int swiotlb __read_mostly; 14int swiotlb __read_mostly;
15 15
16void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
17{
18 return alloc_bootmem_low_pages(size);
19}
20
21void *swiotlb_alloc(unsigned order, unsigned long nslabs)
22{
23 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
24}
25
26dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
27{
28 return paddr;
29}
30
31phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
32{
33 return baddr;
34}
35
36int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
37{
38 return 0;
39}
40
41static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 16static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
42 dma_addr_t *dma_handle, gfp_t flags) 17 dma_addr_t *dma_handle, gfp_t flags)
43{ 18{
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 09ecbde91c13..8d7d5c9c1be3 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -35,10 +35,11 @@
35#include <asm/proto.h> 35#include <asm/proto.h>
36#include <asm/ds.h> 36#include <asm/ds.h>
37 37
38#include <trace/syscall.h>
39
40#include "tls.h" 38#include "tls.h"
41 39
40#define CREATE_TRACE_POINTS
41#include <trace/events/syscalls.h>
42
42enum x86_regset { 43enum x86_regset {
43 REGSET_GENERAL, 44 REGSET_GENERAL,
44 REGSET_FP, 45 REGSET_FP,
@@ -1497,8 +1498,8 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
1497 tracehook_report_syscall_entry(regs)) 1498 tracehook_report_syscall_entry(regs))
1498 ret = -1L; 1499 ret = -1L;
1499 1500
1500 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1501 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1501 ftrace_syscall_enter(regs); 1502 trace_sys_enter(regs, regs->orig_ax);
1502 1503
1503 if (unlikely(current->audit_context)) { 1504 if (unlikely(current->audit_context)) {
1504 if (IS_IA32) 1505 if (IS_IA32)
@@ -1523,8 +1524,8 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
1523 if (unlikely(current->audit_context)) 1524 if (unlikely(current->audit_context))
1524 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax); 1525 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1525 1526
1526 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) 1527 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1527 ftrace_syscall_exit(regs); 1528 trace_sys_exit(regs, regs->ax);
1528 1529
1529 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1530 if (test_thread_flag(TIF_SYSCALL_TRACE))
1530 tracehook_report_syscall_exit(regs, 0); 1531 tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 4c578751e94e..81e58238c4ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -869,6 +869,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
869 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 869 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
870 clear_thread_flag(TIF_NOTIFY_RESUME); 870 clear_thread_flag(TIF_NOTIFY_RESUME);
871 tracehook_notify_resume(regs); 871 tracehook_notify_resume(regs);
872 if (current->replacement_session_keyring)
873 key_replace_session_keyring();
872 } 874 }
873 875
874#ifdef CONFIG_X86_32 876#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 6bc211accf08..45e00eb09c3a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -18,9 +18,9 @@
18#include <asm/ia32.h> 18#include <asm/ia32.h>
19#include <asm/syscalls.h> 19#include <asm/syscalls.h>
20 20
21asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 21SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
22 unsigned long prot, unsigned long flags, 22 unsigned long, prot, unsigned long, flags,
23 unsigned long fd, unsigned long off) 23 unsigned long, fd, unsigned long, off)
24{ 24{
25 long error; 25 long error;
26 struct file *file; 26 struct file *file;
@@ -226,7 +226,7 @@ bottomup:
226} 226}
227 227
228 228
229asmlinkage long sys_uname(struct new_utsname __user *name) 229SYSCALL_DEFINE1(uname, struct new_utsname __user *, name)
230{ 230{
231 int err; 231 int err;
232 down_read(&uts_sem); 232 down_read(&uts_sem);
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index 2c55ed098654..528bf954eb74 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -331,6 +331,20 @@ static void kmemcheck_read_strict(struct pt_regs *regs,
331 kmemcheck_shadow_set(shadow, size); 331 kmemcheck_shadow_set(shadow, size);
332} 332}
333 333
334bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
335{
336 enum kmemcheck_shadow status;
337 void *shadow;
338
339 shadow = kmemcheck_shadow_lookup(addr);
340 if (!shadow)
341 return true;
342
343 status = kmemcheck_shadow_test(shadow, size);
344
345 return status == KMEMCHECK_SHADOW_INITIALIZED;
346}
347
334/* Access may cross page boundary */ 348/* Access may cross page boundary */
335static void kmemcheck_read(struct pt_regs *regs, 349static void kmemcheck_read(struct pt_regs *regs,
336 unsigned long addr, unsigned int size) 350 unsigned long addr, unsigned int size)
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 89b9a5cd63da..cb88b1a0bd5f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -1,11 +1,14 @@
1/** 1/**
2 * @file nmi_int.c 2 * @file nmi_int.c
3 * 3 *
4 * @remark Copyright 2002-2008 OProfile authors 4 * @remark Copyright 2002-2009 OProfile authors
5 * @remark Read the file COPYING 5 * @remark Read the file COPYING
6 * 6 *
7 * @author John Levon <levon@movementarian.org> 7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com> 8 * @author Robert Richter <robert.richter@amd.com>
9 * @author Barry Kasindorf <barry.kasindorf@amd.com>
10 * @author Jason Yeh <jason.yeh@amd.com>
11 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
9 */ 12 */
10 13
11#include <linux/init.h> 14#include <linux/init.h>
@@ -24,13 +27,35 @@
24#include "op_counter.h" 27#include "op_counter.h"
25#include "op_x86_model.h" 28#include "op_x86_model.h"
26 29
27static struct op_x86_model_spec const *model; 30static struct op_x86_model_spec *model;
28static DEFINE_PER_CPU(struct op_msrs, cpu_msrs); 31static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
29static DEFINE_PER_CPU(unsigned long, saved_lvtpc); 32static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
30 33
31/* 0 == registered but off, 1 == registered and on */ 34/* 0 == registered but off, 1 == registered and on */
32static int nmi_enabled = 0; 35static int nmi_enabled = 0;
33 36
37struct op_counter_config counter_config[OP_MAX_COUNTER];
38
39/* common functions */
40
41u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
42 struct op_counter_config *counter_config)
43{
44 u64 val = 0;
45 u16 event = (u16)counter_config->event;
46
47 val |= ARCH_PERFMON_EVENTSEL_INT;
48 val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
49 val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
50 val |= (counter_config->unit_mask & 0xFF) << 8;
51 event &= model->event_mask ? model->event_mask : 0xFF;
52 val |= event & 0xFF;
53 val |= (event & 0x0F00) << 24;
54
55 return val;
56}
57
58
34static int profile_exceptions_notify(struct notifier_block *self, 59static int profile_exceptions_notify(struct notifier_block *self,
35 unsigned long val, void *data) 60 unsigned long val, void *data)
36{ 61{
@@ -52,36 +77,214 @@ static int profile_exceptions_notify(struct notifier_block *self,
52 77
53static void nmi_cpu_save_registers(struct op_msrs *msrs) 78static void nmi_cpu_save_registers(struct op_msrs *msrs)
54{ 79{
55 unsigned int const nr_ctrs = model->num_counters;
56 unsigned int const nr_ctrls = model->num_controls;
57 struct op_msr *counters = msrs->counters; 80 struct op_msr *counters = msrs->counters;
58 struct op_msr *controls = msrs->controls; 81 struct op_msr *controls = msrs->controls;
59 unsigned int i; 82 unsigned int i;
60 83
61 for (i = 0; i < nr_ctrs; ++i) { 84 for (i = 0; i < model->num_counters; ++i) {
62 if (counters[i].addr) { 85 if (counters[i].addr)
63 rdmsr(counters[i].addr, 86 rdmsrl(counters[i].addr, counters[i].saved);
64 counters[i].saved.low, 87 }
65 counters[i].saved.high); 88
66 } 89 for (i = 0; i < model->num_controls; ++i) {
90 if (controls[i].addr)
91 rdmsrl(controls[i].addr, controls[i].saved);
92 }
93}
94
95static void nmi_cpu_start(void *dummy)
96{
97 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
98 model->start(msrs);
99}
100
101static int nmi_start(void)
102{
103 on_each_cpu(nmi_cpu_start, NULL, 1);
104 return 0;
105}
106
107static void nmi_cpu_stop(void *dummy)
108{
109 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
110 model->stop(msrs);
111}
112
113static void nmi_stop(void)
114{
115 on_each_cpu(nmi_cpu_stop, NULL, 1);
116}
117
118#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
119
120static DEFINE_PER_CPU(int, switch_index);
121
122static inline int has_mux(void)
123{
124 return !!model->switch_ctrl;
125}
126
127inline int op_x86_phys_to_virt(int phys)
128{
129 return __get_cpu_var(switch_index) + phys;
130}
131
132inline int op_x86_virt_to_phys(int virt)
133{
134 return virt % model->num_counters;
135}
136
137static void nmi_shutdown_mux(void)
138{
139 int i;
140
141 if (!has_mux())
142 return;
143
144 for_each_possible_cpu(i) {
145 kfree(per_cpu(cpu_msrs, i).multiplex);
146 per_cpu(cpu_msrs, i).multiplex = NULL;
147 per_cpu(switch_index, i) = 0;
67 } 148 }
149}
150
151static int nmi_setup_mux(void)
152{
153 size_t multiplex_size =
154 sizeof(struct op_msr) * model->num_virt_counters;
155 int i;
156
157 if (!has_mux())
158 return 1;
159
160 for_each_possible_cpu(i) {
161 per_cpu(cpu_msrs, i).multiplex =
162 kmalloc(multiplex_size, GFP_KERNEL);
163 if (!per_cpu(cpu_msrs, i).multiplex)
164 return 0;
165 }
166
167 return 1;
168}
169
170static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
171{
172 int i;
173 struct op_msr *multiplex = msrs->multiplex;
174
175 if (!has_mux())
176 return;
68 177
69 for (i = 0; i < nr_ctrls; ++i) { 178 for (i = 0; i < model->num_virt_counters; ++i) {
70 if (controls[i].addr) { 179 if (counter_config[i].enabled) {
71 rdmsr(controls[i].addr, 180 multiplex[i].saved = -(u64)counter_config[i].count;
72 controls[i].saved.low, 181 } else {
73 controls[i].saved.high); 182 multiplex[i].addr = 0;
183 multiplex[i].saved = 0;
74 } 184 }
75 } 185 }
186
187 per_cpu(switch_index, cpu) = 0;
188}
189
190static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
191{
192 struct op_msr *multiplex = msrs->multiplex;
193 int i;
194
195 for (i = 0; i < model->num_counters; ++i) {
196 int virt = op_x86_phys_to_virt(i);
197 if (multiplex[virt].addr)
198 rdmsrl(multiplex[virt].addr, multiplex[virt].saved);
199 }
200}
201
202static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
203{
204 struct op_msr *multiplex = msrs->multiplex;
205 int i;
206
207 for (i = 0; i < model->num_counters; ++i) {
208 int virt = op_x86_phys_to_virt(i);
209 if (multiplex[virt].addr)
210 wrmsrl(multiplex[virt].addr, multiplex[virt].saved);
211 }
76} 212}
77 213
78static void nmi_save_registers(void *dummy) 214static void nmi_cpu_switch(void *dummy)
79{ 215{
80 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
217 int si = per_cpu(switch_index, cpu);
81 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 218 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
82 nmi_cpu_save_registers(msrs); 219
220 nmi_cpu_stop(NULL);
221 nmi_cpu_save_mpx_registers(msrs);
222
223 /* move to next set */
224 si += model->num_counters;
225 if ((si > model->num_virt_counters) || (counter_config[si].count == 0))
226 per_cpu(switch_index, cpu) = 0;
227 else
228 per_cpu(switch_index, cpu) = si;
229
230 model->switch_ctrl(model, msrs);
231 nmi_cpu_restore_mpx_registers(msrs);
232
233 nmi_cpu_start(NULL);
234}
235
236
237/*
238 * Quick check to see if multiplexing is necessary.
239 * The check should be sufficient since counters are used
240 * in ordre.
241 */
242static int nmi_multiplex_on(void)
243{
244 return counter_config[model->num_counters].count ? 0 : -EINVAL;
245}
246
247static int nmi_switch_event(void)
248{
249 if (!has_mux())
250 return -ENOSYS; /* not implemented */
251 if (nmi_multiplex_on() < 0)
252 return -EINVAL; /* not necessary */
253
254 on_each_cpu(nmi_cpu_switch, NULL, 1);
255
256 return 0;
257}
258
259static inline void mux_init(struct oprofile_operations *ops)
260{
261 if (has_mux())
262 ops->switch_events = nmi_switch_event;
263}
264
265static void mux_clone(int cpu)
266{
267 if (!has_mux())
268 return;
269
270 memcpy(per_cpu(cpu_msrs, cpu).multiplex,
271 per_cpu(cpu_msrs, 0).multiplex,
272 sizeof(struct op_msr) * model->num_virt_counters);
83} 273}
84 274
275#else
276
277inline int op_x86_phys_to_virt(int phys) { return phys; }
278inline int op_x86_virt_to_phys(int virt) { return virt; }
279static inline void nmi_shutdown_mux(void) { }
280static inline int nmi_setup_mux(void) { return 1; }
281static inline void
282nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
283static inline void mux_init(struct oprofile_operations *ops) { }
284static void mux_clone(int cpu) { }
285
286#endif
287
85static void free_msrs(void) 288static void free_msrs(void)
86{ 289{
87 int i; 290 int i;
@@ -95,38 +298,32 @@ static void free_msrs(void)
95 298
96static int allocate_msrs(void) 299static int allocate_msrs(void)
97{ 300{
98 int success = 1;
99 size_t controls_size = sizeof(struct op_msr) * model->num_controls; 301 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
100 size_t counters_size = sizeof(struct op_msr) * model->num_counters; 302 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
101 303
102 int i; 304 int i;
103 for_each_possible_cpu(i) { 305 for_each_possible_cpu(i) {
104 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size, 306 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
105 GFP_KERNEL); 307 GFP_KERNEL);
106 if (!per_cpu(cpu_msrs, i).counters) { 308 if (!per_cpu(cpu_msrs, i).counters)
107 success = 0; 309 return 0;
108 break;
109 }
110 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size, 310 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
111 GFP_KERNEL); 311 GFP_KERNEL);
112 if (!per_cpu(cpu_msrs, i).controls) { 312 if (!per_cpu(cpu_msrs, i).controls)
113 success = 0; 313 return 0;
114 break;
115 }
116 } 314 }
117 315
118 if (!success) 316 return 1;
119 free_msrs();
120
121 return success;
122} 317}
123 318
124static void nmi_cpu_setup(void *dummy) 319static void nmi_cpu_setup(void *dummy)
125{ 320{
126 int cpu = smp_processor_id(); 321 int cpu = smp_processor_id();
127 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 322 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
323 nmi_cpu_save_registers(msrs);
128 spin_lock(&oprofilefs_lock); 324 spin_lock(&oprofilefs_lock);
129 model->setup_ctrs(msrs); 325 model->setup_ctrs(model, msrs);
326 nmi_cpu_setup_mux(cpu, msrs);
130 spin_unlock(&oprofilefs_lock); 327 spin_unlock(&oprofilefs_lock);
131 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); 328 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
132 apic_write(APIC_LVTPC, APIC_DM_NMI); 329 apic_write(APIC_LVTPC, APIC_DM_NMI);
@@ -144,11 +341,15 @@ static int nmi_setup(void)
144 int cpu; 341 int cpu;
145 342
146 if (!allocate_msrs()) 343 if (!allocate_msrs())
147 return -ENOMEM; 344 err = -ENOMEM;
345 else if (!nmi_setup_mux())
346 err = -ENOMEM;
347 else
348 err = register_die_notifier(&profile_exceptions_nb);
148 349
149 err = register_die_notifier(&profile_exceptions_nb);
150 if (err) { 350 if (err) {
151 free_msrs(); 351 free_msrs();
352 nmi_shutdown_mux();
152 return err; 353 return err;
153 } 354 }
154 355
@@ -159,45 +360,38 @@ static int nmi_setup(void)
159 /* Assume saved/restored counters are the same on all CPUs */ 360 /* Assume saved/restored counters are the same on all CPUs */
160 model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); 361 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
161 for_each_possible_cpu(cpu) { 362 for_each_possible_cpu(cpu) {
162 if (cpu != 0) { 363 if (!cpu)
163 memcpy(per_cpu(cpu_msrs, cpu).counters, 364 continue;
164 per_cpu(cpu_msrs, 0).counters, 365
165 sizeof(struct op_msr) * model->num_counters); 366 memcpy(per_cpu(cpu_msrs, cpu).counters,
166 367 per_cpu(cpu_msrs, 0).counters,
167 memcpy(per_cpu(cpu_msrs, cpu).controls, 368 sizeof(struct op_msr) * model->num_counters);
168 per_cpu(cpu_msrs, 0).controls, 369
169 sizeof(struct op_msr) * model->num_controls); 370 memcpy(per_cpu(cpu_msrs, cpu).controls,
170 } 371 per_cpu(cpu_msrs, 0).controls,
372 sizeof(struct op_msr) * model->num_controls);
171 373
374 mux_clone(cpu);
172 } 375 }
173 on_each_cpu(nmi_save_registers, NULL, 1);
174 on_each_cpu(nmi_cpu_setup, NULL, 1); 376 on_each_cpu(nmi_cpu_setup, NULL, 1);
175 nmi_enabled = 1; 377 nmi_enabled = 1;
176 return 0; 378 return 0;
177} 379}
178 380
179static void nmi_restore_registers(struct op_msrs *msrs) 381static void nmi_cpu_restore_registers(struct op_msrs *msrs)
180{ 382{
181 unsigned int const nr_ctrs = model->num_counters;
182 unsigned int const nr_ctrls = model->num_controls;
183 struct op_msr *counters = msrs->counters; 383 struct op_msr *counters = msrs->counters;
184 struct op_msr *controls = msrs->controls; 384 struct op_msr *controls = msrs->controls;
185 unsigned int i; 385 unsigned int i;
186 386
187 for (i = 0; i < nr_ctrls; ++i) { 387 for (i = 0; i < model->num_controls; ++i) {
188 if (controls[i].addr) { 388 if (controls[i].addr)
189 wrmsr(controls[i].addr, 389 wrmsrl(controls[i].addr, controls[i].saved);
190 controls[i].saved.low,
191 controls[i].saved.high);
192 }
193 } 390 }
194 391
195 for (i = 0; i < nr_ctrs; ++i) { 392 for (i = 0; i < model->num_counters; ++i) {
196 if (counters[i].addr) { 393 if (counters[i].addr)
197 wrmsr(counters[i].addr, 394 wrmsrl(counters[i].addr, counters[i].saved);
198 counters[i].saved.low,
199 counters[i].saved.high);
200 }
201 } 395 }
202} 396}
203 397
@@ -205,7 +399,7 @@ static void nmi_cpu_shutdown(void *dummy)
205{ 399{
206 unsigned int v; 400 unsigned int v;
207 int cpu = smp_processor_id(); 401 int cpu = smp_processor_id();
208 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs); 402 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
209 403
210 /* restoring APIC_LVTPC can trigger an apic error because the delivery 404 /* restoring APIC_LVTPC can trigger an apic error because the delivery
211 * mode and vector nr combination can be illegal. That's by design: on 405 * mode and vector nr combination can be illegal. That's by design: on
@@ -216,7 +410,7 @@ static void nmi_cpu_shutdown(void *dummy)
216 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED); 410 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
217 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); 411 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
218 apic_write(APIC_LVTERR, v); 412 apic_write(APIC_LVTERR, v);
219 nmi_restore_registers(msrs); 413 nmi_cpu_restore_registers(msrs);
220} 414}
221 415
222static void nmi_shutdown(void) 416static void nmi_shutdown(void)
@@ -226,42 +420,18 @@ static void nmi_shutdown(void)
226 nmi_enabled = 0; 420 nmi_enabled = 0;
227 on_each_cpu(nmi_cpu_shutdown, NULL, 1); 421 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
228 unregister_die_notifier(&profile_exceptions_nb); 422 unregister_die_notifier(&profile_exceptions_nb);
423 nmi_shutdown_mux();
229 msrs = &get_cpu_var(cpu_msrs); 424 msrs = &get_cpu_var(cpu_msrs);
230 model->shutdown(msrs); 425 model->shutdown(msrs);
231 free_msrs(); 426 free_msrs();
232 put_cpu_var(cpu_msrs); 427 put_cpu_var(cpu_msrs);
233} 428}
234 429
235static void nmi_cpu_start(void *dummy)
236{
237 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
238 model->start(msrs);
239}
240
241static int nmi_start(void)
242{
243 on_each_cpu(nmi_cpu_start, NULL, 1);
244 return 0;
245}
246
247static void nmi_cpu_stop(void *dummy)
248{
249 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
250 model->stop(msrs);
251}
252
253static void nmi_stop(void)
254{
255 on_each_cpu(nmi_cpu_stop, NULL, 1);
256}
257
258struct op_counter_config counter_config[OP_MAX_COUNTER];
259
260static int nmi_create_files(struct super_block *sb, struct dentry *root) 430static int nmi_create_files(struct super_block *sb, struct dentry *root)
261{ 431{
262 unsigned int i; 432 unsigned int i;
263 433
264 for (i = 0; i < model->num_counters; ++i) { 434 for (i = 0; i < model->num_virt_counters; ++i) {
265 struct dentry *dir; 435 struct dentry *dir;
266 char buf[4]; 436 char buf[4];
267 437
@@ -270,7 +440,7 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
270 * NOTE: assumes 1:1 mapping here (that counters are organized 440 * NOTE: assumes 1:1 mapping here (that counters are organized
271 * sequentially in their struct assignment). 441 * sequentially in their struct assignment).
272 */ 442 */
273 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i))) 443 if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
274 continue; 444 continue;
275 445
276 snprintf(buf, sizeof(buf), "%d", i); 446 snprintf(buf, sizeof(buf), "%d", i);
@@ -402,6 +572,7 @@ module_param_call(cpu_type, force_cpu_type, NULL, NULL, 0);
402static int __init ppro_init(char **cpu_type) 572static int __init ppro_init(char **cpu_type)
403{ 573{
404 __u8 cpu_model = boot_cpu_data.x86_model; 574 __u8 cpu_model = boot_cpu_data.x86_model;
575 struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
405 576
406 if (force_arch_perfmon && cpu_has_arch_perfmon) 577 if (force_arch_perfmon && cpu_has_arch_perfmon)
407 return 0; 578 return 0;
@@ -428,7 +599,7 @@ static int __init ppro_init(char **cpu_type)
428 *cpu_type = "i386/core_2"; 599 *cpu_type = "i386/core_2";
429 break; 600 break;
430 case 26: 601 case 26:
431 arch_perfmon_setup_counters(); 602 spec = &op_arch_perfmon_spec;
432 *cpu_type = "i386/core_i7"; 603 *cpu_type = "i386/core_i7";
433 break; 604 break;
434 case 28: 605 case 28:
@@ -439,17 +610,7 @@ static int __init ppro_init(char **cpu_type)
439 return 0; 610 return 0;
440 } 611 }
441 612
442 model = &op_ppro_spec; 613 model = spec;
443 return 1;
444}
445
446static int __init arch_perfmon_init(char **cpu_type)
447{
448 if (!cpu_has_arch_perfmon)
449 return 0;
450 *cpu_type = "i386/arch_perfmon";
451 model = &op_arch_perfmon_spec;
452 arch_perfmon_setup_counters();
453 return 1; 614 return 1;
454} 615}
455 616
@@ -471,27 +632,26 @@ int __init op_nmi_init(struct oprofile_operations *ops)
471 /* Needs to be at least an Athlon (or hammer in 32bit mode) */ 632 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
472 633
473 switch (family) { 634 switch (family) {
474 default:
475 return -ENODEV;
476 case 6: 635 case 6:
477 model = &op_amd_spec;
478 cpu_type = "i386/athlon"; 636 cpu_type = "i386/athlon";
479 break; 637 break;
480 case 0xf: 638 case 0xf:
481 model = &op_amd_spec; 639 /*
482 /* Actually it could be i386/hammer too, but give 640 * Actually it could be i386/hammer too, but
483 user space an consistent name. */ 641 * give user space an consistent name.
642 */
484 cpu_type = "x86-64/hammer"; 643 cpu_type = "x86-64/hammer";
485 break; 644 break;
486 case 0x10: 645 case 0x10:
487 model = &op_amd_spec;
488 cpu_type = "x86-64/family10"; 646 cpu_type = "x86-64/family10";
489 break; 647 break;
490 case 0x11: 648 case 0x11:
491 model = &op_amd_spec;
492 cpu_type = "x86-64/family11h"; 649 cpu_type = "x86-64/family11h";
493 break; 650 break;
651 default:
652 return -ENODEV;
494 } 653 }
654 model = &op_amd_spec;
495 break; 655 break;
496 656
497 case X86_VENDOR_INTEL: 657 case X86_VENDOR_INTEL:
@@ -510,8 +670,15 @@ int __init op_nmi_init(struct oprofile_operations *ops)
510 break; 670 break;
511 } 671 }
512 672
513 if (!cpu_type && !arch_perfmon_init(&cpu_type)) 673 if (cpu_type)
674 break;
675
676 if (!cpu_has_arch_perfmon)
514 return -ENODEV; 677 return -ENODEV;
678
679 /* use arch perfmon as fallback */
680 cpu_type = "i386/arch_perfmon";
681 model = &op_arch_perfmon_spec;
515 break; 682 break;
516 683
517 default: 684 default:
@@ -522,18 +689,23 @@ int __init op_nmi_init(struct oprofile_operations *ops)
522 register_cpu_notifier(&oprofile_cpu_nb); 689 register_cpu_notifier(&oprofile_cpu_nb);
523#endif 690#endif
524 /* default values, can be overwritten by model */ 691 /* default values, can be overwritten by model */
525 ops->create_files = nmi_create_files; 692 ops->create_files = nmi_create_files;
526 ops->setup = nmi_setup; 693 ops->setup = nmi_setup;
527 ops->shutdown = nmi_shutdown; 694 ops->shutdown = nmi_shutdown;
528 ops->start = nmi_start; 695 ops->start = nmi_start;
529 ops->stop = nmi_stop; 696 ops->stop = nmi_stop;
530 ops->cpu_type = cpu_type; 697 ops->cpu_type = cpu_type;
531 698
532 if (model->init) 699 if (model->init)
533 ret = model->init(ops); 700 ret = model->init(ops);
534 if (ret) 701 if (ret)
535 return ret; 702 return ret;
536 703
704 if (!model->num_virt_counters)
705 model->num_virt_counters = model->num_counters;
706
707 mux_init(ops);
708
537 init_sysfs(); 709 init_sysfs();
538 using_nmi = 1; 710 using_nmi = 1;
539 printk(KERN_INFO "oprofile: using NMI interrupt.\n"); 711 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
diff --git a/arch/x86/oprofile/op_counter.h b/arch/x86/oprofile/op_counter.h
index 91b6a116165e..e28398df0df2 100644
--- a/arch/x86/oprofile/op_counter.h
+++ b/arch/x86/oprofile/op_counter.h
@@ -10,7 +10,7 @@
10#ifndef OP_COUNTER_H 10#ifndef OP_COUNTER_H
11#define OP_COUNTER_H 11#define OP_COUNTER_H
12 12
13#define OP_MAX_COUNTER 8 13#define OP_MAX_COUNTER 32
14 14
15/* Per-perfctr configuration as set via 15/* Per-perfctr configuration as set via
16 * oprofilefs. 16 * oprofilefs.
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 8fdf06e4edf9..39686c29f03a 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -9,12 +9,15 @@
9 * @author Philippe Elie 9 * @author Philippe Elie
10 * @author Graydon Hoare 10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com> 11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf 12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
13 */ 15 */
14 16
15#include <linux/oprofile.h> 17#include <linux/oprofile.h>
16#include <linux/device.h> 18#include <linux/device.h>
17#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/percpu.h>
18 21
19#include <asm/ptrace.h> 22#include <asm/ptrace.h>
20#include <asm/msr.h> 23#include <asm/msr.h>
@@ -25,43 +28,36 @@
25 28
26#define NUM_COUNTERS 4 29#define NUM_COUNTERS 4
27#define NUM_CONTROLS 4 30#define NUM_CONTROLS 4
31#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
32#define NUM_VIRT_COUNTERS 32
33#define NUM_VIRT_CONTROLS 32
34#else
35#define NUM_VIRT_COUNTERS NUM_COUNTERS
36#define NUM_VIRT_CONTROLS NUM_CONTROLS
37#endif
38
39#define OP_EVENT_MASK 0x0FFF
40#define OP_CTR_OVERFLOW (1ULL<<31)
28 41
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 42#define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
30#define CTR_READ(l, h, msrs, c) do {rdmsr(msrs->counters[(c)].addr, (l), (h)); } while (0) 43
31#define CTR_WRITE(l, msrs, c) do {wrmsr(msrs->counters[(c)].addr, -(unsigned int)(l), -1); } while (0) 44static unsigned long reset_value[NUM_VIRT_COUNTERS];
32#define CTR_OVERFLOWED(n) (!((n) & (1U<<31)))
33
34#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
35#define CTRL_READ(l, h, msrs, c) do {rdmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
36#define CTRL_WRITE(l, h, msrs, c) do {wrmsr(msrs->controls[(c)].addr, (l), (h)); } while (0)
37#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
38#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
39#define CTRL_CLEAR_LO(x) (x &= (1<<21))
40#define CTRL_CLEAR_HI(x) (x &= 0xfffffcf0)
41#define CTRL_SET_ENABLE(val) (val |= 1<<20)
42#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
43#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
44#define CTRL_SET_UM(val, m) (val |= (m << 8))
45#define CTRL_SET_EVENT_LOW(val, e) (val |= (e & 0xff))
46#define CTRL_SET_EVENT_HIGH(val, e) (val |= ((e >> 8) & 0xf))
47#define CTRL_SET_HOST_ONLY(val, h) (val |= ((h & 1) << 9))
48#define CTRL_SET_GUEST_ONLY(val, h) (val |= ((h & 1) << 8))
49
50static unsigned long reset_value[NUM_COUNTERS];
51 45
52#ifdef CONFIG_OPROFILE_IBS 46#ifdef CONFIG_OPROFILE_IBS
53 47
54/* IbsFetchCtl bits/masks */ 48/* IbsFetchCtl bits/masks */
55#define IBS_FETCH_HIGH_VALID_BIT (1UL << 17) /* bit 49 */ 49#define IBS_FETCH_RAND_EN (1ULL<<57)
56#define IBS_FETCH_HIGH_ENABLE (1UL << 16) /* bit 48 */ 50#define IBS_FETCH_VAL (1ULL<<49)
57#define IBS_FETCH_LOW_MAX_CNT_MASK 0x0000FFFFUL /* MaxCnt mask */ 51#define IBS_FETCH_ENABLE (1ULL<<48)
52#define IBS_FETCH_CNT_MASK 0xFFFF0000ULL
58 53
59/*IbsOpCtl bits */ 54/*IbsOpCtl bits */
60#define IBS_OP_LOW_VALID_BIT (1ULL<<18) /* bit 18 */ 55#define IBS_OP_CNT_CTL (1ULL<<19)
61#define IBS_OP_LOW_ENABLE (1ULL<<17) /* bit 17 */ 56#define IBS_OP_VAL (1ULL<<18)
57#define IBS_OP_ENABLE (1ULL<<17)
62 58
63#define IBS_FETCH_SIZE 6 59#define IBS_FETCH_SIZE 6
64#define IBS_OP_SIZE 12 60#define IBS_OP_SIZE 12
65 61
66static int has_ibs; /* AMD Family10h and later */ 62static int has_ibs; /* AMD Family10h and later */
67 63
@@ -78,6 +74,45 @@ static struct op_ibs_config ibs_config;
78 74
79#endif 75#endif
80 76
77#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
78
79static void op_mux_fill_in_addresses(struct op_msrs * const msrs)
80{
81 int i;
82
83 for (i = 0; i < NUM_VIRT_COUNTERS; i++) {
84 int hw_counter = op_x86_virt_to_phys(i);
85 if (reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
86 msrs->multiplex[i].addr = MSR_K7_PERFCTR0 + hw_counter;
87 else
88 msrs->multiplex[i].addr = 0;
89 }
90}
91
92static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
93 struct op_msrs const * const msrs)
94{
95 u64 val;
96 int i;
97
98 /* enable active counters */
99 for (i = 0; i < NUM_COUNTERS; ++i) {
100 int virt = op_x86_phys_to_virt(i);
101 if (!counter_config[virt].enabled)
102 continue;
103 rdmsrl(msrs->controls[i].addr, val);
104 val &= model->reserved;
105 val |= op_x86_get_ctrl(model, &counter_config[virt]);
106 wrmsrl(msrs->controls[i].addr, val);
107 }
108}
109
110#else
111
112static inline void op_mux_fill_in_addresses(struct op_msrs * const msrs) { }
113
114#endif
115
81/* functions for op_amd_spec */ 116/* functions for op_amd_spec */
82 117
83static void op_amd_fill_in_addresses(struct op_msrs * const msrs) 118static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
@@ -97,150 +132,174 @@ static void op_amd_fill_in_addresses(struct op_msrs * const msrs)
97 else 132 else
98 msrs->controls[i].addr = 0; 133 msrs->controls[i].addr = 0;
99 } 134 }
100}
101 135
136 op_mux_fill_in_addresses(msrs);
137}
102 138
103static void op_amd_setup_ctrs(struct op_msrs const * const msrs) 139static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
140 struct op_msrs const * const msrs)
104{ 141{
105 unsigned int low, high; 142 u64 val;
106 int i; 143 int i;
107 144
145 /* setup reset_value */
146 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
147 if (counter_config[i].enabled)
148 reset_value[i] = counter_config[i].count;
149 else
150 reset_value[i] = 0;
151 }
152
108 /* clear all counters */ 153 /* clear all counters */
109 for (i = 0 ; i < NUM_CONTROLS; ++i) { 154 for (i = 0; i < NUM_CONTROLS; ++i) {
110 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 155 if (unlikely(!msrs->controls[i].addr))
111 continue; 156 continue;
112 CTRL_READ(low, high, msrs, i); 157 rdmsrl(msrs->controls[i].addr, val);
113 CTRL_CLEAR_LO(low); 158 val &= model->reserved;
114 CTRL_CLEAR_HI(high); 159 wrmsrl(msrs->controls[i].addr, val);
115 CTRL_WRITE(low, high, msrs, i);
116 } 160 }
117 161
118 /* avoid a false detection of ctr overflows in NMI handler */ 162 /* avoid a false detection of ctr overflows in NMI handler */
119 for (i = 0; i < NUM_COUNTERS; ++i) { 163 for (i = 0; i < NUM_COUNTERS; ++i) {
120 if (unlikely(!CTR_IS_RESERVED(msrs, i))) 164 if (unlikely(!msrs->counters[i].addr))
121 continue; 165 continue;
122 CTR_WRITE(1, msrs, i); 166 wrmsrl(msrs->counters[i].addr, -1LL);
123 } 167 }
124 168
125 /* enable active counters */ 169 /* enable active counters */
126 for (i = 0; i < NUM_COUNTERS; ++i) { 170 for (i = 0; i < NUM_COUNTERS; ++i) {
127 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { 171 int virt = op_x86_phys_to_virt(i);
128 reset_value[i] = counter_config[i].count; 172 if (!counter_config[virt].enabled)
173 continue;
174 if (!msrs->counters[i].addr)
175 continue;
129 176
130 CTR_WRITE(counter_config[i].count, msrs, i); 177 /* setup counter registers */
131 178 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
132 CTRL_READ(low, high, msrs, i); 179
133 CTRL_CLEAR_LO(low); 180 /* setup control registers */
134 CTRL_CLEAR_HI(high); 181 rdmsrl(msrs->controls[i].addr, val);
135 CTRL_SET_ENABLE(low); 182 val &= model->reserved;
136 CTRL_SET_USR(low, counter_config[i].user); 183 val |= op_x86_get_ctrl(model, &counter_config[virt]);
137 CTRL_SET_KERN(low, counter_config[i].kernel); 184 wrmsrl(msrs->controls[i].addr, val);
138 CTRL_SET_UM(low, counter_config[i].unit_mask);
139 CTRL_SET_EVENT_LOW(low, counter_config[i].event);
140 CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
141 CTRL_SET_HOST_ONLY(high, 0);
142 CTRL_SET_GUEST_ONLY(high, 0);
143
144 CTRL_WRITE(low, high, msrs, i);
145 } else {
146 reset_value[i] = 0;
147 }
148 } 185 }
149} 186}
150 187
151#ifdef CONFIG_OPROFILE_IBS 188#ifdef CONFIG_OPROFILE_IBS
152 189
153static inline int 190static inline void
154op_amd_handle_ibs(struct pt_regs * const regs, 191op_amd_handle_ibs(struct pt_regs * const regs,
155 struct op_msrs const * const msrs) 192 struct op_msrs const * const msrs)
156{ 193{
157 u32 low, high; 194 u64 val, ctl;
158 u64 msr;
159 struct op_entry entry; 195 struct op_entry entry;
160 196
161 if (!has_ibs) 197 if (!has_ibs)
162 return 1; 198 return;
163 199
164 if (ibs_config.fetch_enabled) { 200 if (ibs_config.fetch_enabled) {
165 rdmsr(MSR_AMD64_IBSFETCHCTL, low, high); 201 rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
166 if (high & IBS_FETCH_HIGH_VALID_BIT) { 202 if (ctl & IBS_FETCH_VAL) {
167 rdmsrl(MSR_AMD64_IBSFETCHLINAD, msr); 203 rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
168 oprofile_write_reserve(&entry, regs, msr, 204 oprofile_write_reserve(&entry, regs, val,
169 IBS_FETCH_CODE, IBS_FETCH_SIZE); 205 IBS_FETCH_CODE, IBS_FETCH_SIZE);
170 oprofile_add_data(&entry, (u32)msr); 206 oprofile_add_data64(&entry, val);
171 oprofile_add_data(&entry, (u32)(msr >> 32)); 207 oprofile_add_data64(&entry, ctl);
172 oprofile_add_data(&entry, low); 208 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
173 oprofile_add_data(&entry, high); 209 oprofile_add_data64(&entry, val);
174 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, msr);
175 oprofile_add_data(&entry, (u32)msr);
176 oprofile_add_data(&entry, (u32)(msr >> 32));
177 oprofile_write_commit(&entry); 210 oprofile_write_commit(&entry);
178 211
179 /* reenable the IRQ */ 212 /* reenable the IRQ */
180 high &= ~IBS_FETCH_HIGH_VALID_BIT; 213 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK);
181 high |= IBS_FETCH_HIGH_ENABLE; 214 ctl |= IBS_FETCH_ENABLE;
182 low &= IBS_FETCH_LOW_MAX_CNT_MASK; 215 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
183 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
184 } 216 }
185 } 217 }
186 218
187 if (ibs_config.op_enabled) { 219 if (ibs_config.op_enabled) {
188 rdmsr(MSR_AMD64_IBSOPCTL, low, high); 220 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
189 if (low & IBS_OP_LOW_VALID_BIT) { 221 if (ctl & IBS_OP_VAL) {
190 rdmsrl(MSR_AMD64_IBSOPRIP, msr); 222 rdmsrl(MSR_AMD64_IBSOPRIP, val);
191 oprofile_write_reserve(&entry, regs, msr, 223 oprofile_write_reserve(&entry, regs, val,
192 IBS_OP_CODE, IBS_OP_SIZE); 224 IBS_OP_CODE, IBS_OP_SIZE);
193 oprofile_add_data(&entry, (u32)msr); 225 oprofile_add_data64(&entry, val);
194 oprofile_add_data(&entry, (u32)(msr >> 32)); 226 rdmsrl(MSR_AMD64_IBSOPDATA, val);
195 rdmsrl(MSR_AMD64_IBSOPDATA, msr); 227 oprofile_add_data64(&entry, val);
196 oprofile_add_data(&entry, (u32)msr); 228 rdmsrl(MSR_AMD64_IBSOPDATA2, val);
197 oprofile_add_data(&entry, (u32)(msr >> 32)); 229 oprofile_add_data64(&entry, val);
198 rdmsrl(MSR_AMD64_IBSOPDATA2, msr); 230 rdmsrl(MSR_AMD64_IBSOPDATA3, val);
199 oprofile_add_data(&entry, (u32)msr); 231 oprofile_add_data64(&entry, val);
200 oprofile_add_data(&entry, (u32)(msr >> 32)); 232 rdmsrl(MSR_AMD64_IBSDCLINAD, val);
201 rdmsrl(MSR_AMD64_IBSOPDATA3, msr); 233 oprofile_add_data64(&entry, val);
202 oprofile_add_data(&entry, (u32)msr); 234 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
203 oprofile_add_data(&entry, (u32)(msr >> 32)); 235 oprofile_add_data64(&entry, val);
204 rdmsrl(MSR_AMD64_IBSDCLINAD, msr);
205 oprofile_add_data(&entry, (u32)msr);
206 oprofile_add_data(&entry, (u32)(msr >> 32));
207 rdmsrl(MSR_AMD64_IBSDCPHYSAD, msr);
208 oprofile_add_data(&entry, (u32)msr);
209 oprofile_add_data(&entry, (u32)(msr >> 32));
210 oprofile_write_commit(&entry); 236 oprofile_write_commit(&entry);
211 237
212 /* reenable the IRQ */ 238 /* reenable the IRQ */
213 high = 0; 239 ctl &= ~IBS_OP_VAL & 0xFFFFFFFF;
214 low &= ~IBS_OP_LOW_VALID_BIT; 240 ctl |= IBS_OP_ENABLE;
215 low |= IBS_OP_LOW_ENABLE; 241 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
216 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
217 } 242 }
218 } 243 }
244}
219 245
220 return 1; 246static inline void op_amd_start_ibs(void)
247{
248 u64 val;
249 if (has_ibs && ibs_config.fetch_enabled) {
250 val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF;
251 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
252 val |= IBS_FETCH_ENABLE;
253 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
254 }
255
256 if (has_ibs && ibs_config.op_enabled) {
257 val = (ibs_config.max_cnt_op >> 4) & 0xFFFF;
258 val |= ibs_config.dispatched_ops ? IBS_OP_CNT_CTL : 0;
259 val |= IBS_OP_ENABLE;
260 wrmsrl(MSR_AMD64_IBSOPCTL, val);
261 }
262}
263
264static void op_amd_stop_ibs(void)
265{
266 if (has_ibs && ibs_config.fetch_enabled)
267 /* clear max count and enable */
268 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
269
270 if (has_ibs && ibs_config.op_enabled)
271 /* clear max count and enable */
272 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
221} 273}
222 274
275#else
276
277static inline void op_amd_handle_ibs(struct pt_regs * const regs,
278 struct op_msrs const * const msrs) { }
279static inline void op_amd_start_ibs(void) { }
280static inline void op_amd_stop_ibs(void) { }
281
223#endif 282#endif
224 283
225static int op_amd_check_ctrs(struct pt_regs * const regs, 284static int op_amd_check_ctrs(struct pt_regs * const regs,
226 struct op_msrs const * const msrs) 285 struct op_msrs const * const msrs)
227{ 286{
228 unsigned int low, high; 287 u64 val;
229 int i; 288 int i;
230 289
231 for (i = 0 ; i < NUM_COUNTERS; ++i) { 290 for (i = 0; i < NUM_COUNTERS; ++i) {
232 if (!reset_value[i]) 291 int virt = op_x86_phys_to_virt(i);
292 if (!reset_value[virt])
233 continue; 293 continue;
234 CTR_READ(low, high, msrs, i); 294 rdmsrl(msrs->counters[i].addr, val);
235 if (CTR_OVERFLOWED(low)) { 295 /* bit is clear if overflowed: */
236 oprofile_add_sample(regs, i); 296 if (val & OP_CTR_OVERFLOW)
237 CTR_WRITE(reset_value[i], msrs, i); 297 continue;
238 } 298 oprofile_add_sample(regs, virt);
299 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
239 } 300 }
240 301
241#ifdef CONFIG_OPROFILE_IBS
242 op_amd_handle_ibs(regs, msrs); 302 op_amd_handle_ibs(regs, msrs);
243#endif
244 303
245 /* See op_model_ppro.c */ 304 /* See op_model_ppro.c */
246 return 1; 305 return 1;
@@ -248,79 +307,50 @@ static int op_amd_check_ctrs(struct pt_regs * const regs,
248 307
249static void op_amd_start(struct op_msrs const * const msrs) 308static void op_amd_start(struct op_msrs const * const msrs)
250{ 309{
251 unsigned int low, high; 310 u64 val;
252 int i; 311 int i;
253 for (i = 0 ; i < NUM_COUNTERS ; ++i) {
254 if (reset_value[i]) {
255 CTRL_READ(low, high, msrs, i);
256 CTRL_SET_ACTIVE(low);
257 CTRL_WRITE(low, high, msrs, i);
258 }
259 }
260 312
261#ifdef CONFIG_OPROFILE_IBS 313 for (i = 0; i < NUM_COUNTERS; ++i) {
262 if (has_ibs && ibs_config.fetch_enabled) { 314 if (!reset_value[op_x86_phys_to_virt(i)])
263 low = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; 315 continue;
264 high = ((ibs_config.rand_en & 0x1) << 25) /* bit 57 */ 316 rdmsrl(msrs->controls[i].addr, val);
265 + IBS_FETCH_HIGH_ENABLE; 317 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
266 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high); 318 wrmsrl(msrs->controls[i].addr, val);
267 } 319 }
268 320
269 if (has_ibs && ibs_config.op_enabled) { 321 op_amd_start_ibs();
270 low = ((ibs_config.max_cnt_op >> 4) & 0xFFFF)
271 + ((ibs_config.dispatched_ops & 0x1) << 19) /* bit 19 */
272 + IBS_OP_LOW_ENABLE;
273 high = 0;
274 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
275 }
276#endif
277} 322}
278 323
279
280static void op_amd_stop(struct op_msrs const * const msrs) 324static void op_amd_stop(struct op_msrs const * const msrs)
281{ 325{
282 unsigned int low, high; 326 u64 val;
283 int i; 327 int i;
284 328
285 /* 329 /*
286 * Subtle: stop on all counters to avoid race with setting our 330 * Subtle: stop on all counters to avoid race with setting our
287 * pm callback 331 * pm callback
288 */ 332 */
289 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 333 for (i = 0; i < NUM_COUNTERS; ++i) {
290 if (!reset_value[i]) 334 if (!reset_value[op_x86_phys_to_virt(i)])
291 continue; 335 continue;
292 CTRL_READ(low, high, msrs, i); 336 rdmsrl(msrs->controls[i].addr, val);
293 CTRL_SET_INACTIVE(low); 337 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
294 CTRL_WRITE(low, high, msrs, i); 338 wrmsrl(msrs->controls[i].addr, val);
295 }
296
297#ifdef CONFIG_OPROFILE_IBS
298 if (has_ibs && ibs_config.fetch_enabled) {
299 /* clear max count and enable */
300 low = 0;
301 high = 0;
302 wrmsr(MSR_AMD64_IBSFETCHCTL, low, high);
303 } 339 }
304 340
305 if (has_ibs && ibs_config.op_enabled) { 341 op_amd_stop_ibs();
306 /* clear max count and enable */
307 low = 0;
308 high = 0;
309 wrmsr(MSR_AMD64_IBSOPCTL, low, high);
310 }
311#endif
312} 342}
313 343
314static void op_amd_shutdown(struct op_msrs const * const msrs) 344static void op_amd_shutdown(struct op_msrs const * const msrs)
315{ 345{
316 int i; 346 int i;
317 347
318 for (i = 0 ; i < NUM_COUNTERS ; ++i) { 348 for (i = 0; i < NUM_COUNTERS; ++i) {
319 if (CTR_IS_RESERVED(msrs, i)) 349 if (msrs->counters[i].addr)
320 release_perfctr_nmi(MSR_K7_PERFCTR0 + i); 350 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
321 } 351 }
322 for (i = 0 ; i < NUM_CONTROLS ; ++i) { 352 for (i = 0; i < NUM_CONTROLS; ++i) {
323 if (CTRL_IS_RESERVED(msrs, i)) 353 if (msrs->controls[i].addr)
324 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); 354 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
325 } 355 }
326} 356}
@@ -490,15 +520,21 @@ static void op_amd_exit(void) {}
490 520
491#endif /* CONFIG_OPROFILE_IBS */ 521#endif /* CONFIG_OPROFILE_IBS */
492 522
493struct op_x86_model_spec const op_amd_spec = { 523struct op_x86_model_spec op_amd_spec = {
494 .init = op_amd_init,
495 .exit = op_amd_exit,
496 .num_counters = NUM_COUNTERS, 524 .num_counters = NUM_COUNTERS,
497 .num_controls = NUM_CONTROLS, 525 .num_controls = NUM_CONTROLS,
526 .num_virt_counters = NUM_VIRT_COUNTERS,
527 .reserved = MSR_AMD_EVENTSEL_RESERVED,
528 .event_mask = OP_EVENT_MASK,
529 .init = op_amd_init,
530 .exit = op_amd_exit,
498 .fill_in_addresses = &op_amd_fill_in_addresses, 531 .fill_in_addresses = &op_amd_fill_in_addresses,
499 .setup_ctrs = &op_amd_setup_ctrs, 532 .setup_ctrs = &op_amd_setup_ctrs,
500 .check_ctrs = &op_amd_check_ctrs, 533 .check_ctrs = &op_amd_check_ctrs,
501 .start = &op_amd_start, 534 .start = &op_amd_start,
502 .stop = &op_amd_stop, 535 .stop = &op_amd_stop,
503 .shutdown = &op_amd_shutdown 536 .shutdown = &op_amd_shutdown,
537#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
538 .switch_ctrl = &op_mux_switch_ctrl,
539#endif
504}; 540};
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 819b131fd752..ac6b354becdf 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -32,6 +32,8 @@
32#define NUM_CCCRS_HT2 9 32#define NUM_CCCRS_HT2 9
33#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2) 33#define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
34 34
35#define OP_CTR_OVERFLOW (1ULL<<31)
36
35static unsigned int num_counters = NUM_COUNTERS_NON_HT; 37static unsigned int num_counters = NUM_COUNTERS_NON_HT;
36static unsigned int num_controls = NUM_CONTROLS_NON_HT; 38static unsigned int num_controls = NUM_CONTROLS_NON_HT;
37 39
@@ -350,8 +352,6 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
350#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1)) 352#define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
351#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25)) 353#define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
352#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9)) 354#define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
353#define ESCR_READ(escr, high, ev, i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
354#define ESCR_WRITE(escr, high, ev, i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high)); } while (0)
355 355
356#define CCCR_RESERVED_BITS 0x38030FFF 356#define CCCR_RESERVED_BITS 0x38030FFF
357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS) 357#define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
@@ -361,17 +361,9 @@ static struct p4_event_binding p4_events[NUM_EVENTS] = {
361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27)) 361#define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12)) 362#define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12)) 363#define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
364#define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
365#define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high)); } while (0)
366#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31)) 364#define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
367#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31))) 365#define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
368 366
369#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
370#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0)
371#define CTR_READ(l, h, i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h)); } while (0)
372#define CTR_WRITE(l, i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1); } while (0)
373#define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
374
375 367
376/* this assigns a "stagger" to the current CPU, which is used throughout 368/* this assigns a "stagger" to the current CPU, which is used throughout
377 the code in this module as an extra array offset, to select the "even" 369 the code in this module as an extra array offset, to select the "even"
@@ -515,7 +507,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
515 if (ev->bindings[i].virt_counter & counter_bit) { 507 if (ev->bindings[i].virt_counter & counter_bit) {
516 508
517 /* modify ESCR */ 509 /* modify ESCR */
518 ESCR_READ(escr, high, ev, i); 510 rdmsr(ev->bindings[i].escr_address, escr, high);
519 ESCR_CLEAR(escr); 511 ESCR_CLEAR(escr);
520 if (stag == 0) { 512 if (stag == 0) {
521 ESCR_SET_USR_0(escr, counter_config[ctr].user); 513 ESCR_SET_USR_0(escr, counter_config[ctr].user);
@@ -526,10 +518,11 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
526 } 518 }
527 ESCR_SET_EVENT_SELECT(escr, ev->event_select); 519 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
528 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask); 520 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
529 ESCR_WRITE(escr, high, ev, i); 521 wrmsr(ev->bindings[i].escr_address, escr, high);
530 522
531 /* modify CCCR */ 523 /* modify CCCR */
532 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr)); 524 rdmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
525 cccr, high);
533 CCCR_CLEAR(cccr); 526 CCCR_CLEAR(cccr);
534 CCCR_SET_REQUIRED_BITS(cccr); 527 CCCR_SET_REQUIRED_BITS(cccr);
535 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select); 528 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
@@ -537,7 +530,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
537 CCCR_SET_PMI_OVF_0(cccr); 530 CCCR_SET_PMI_OVF_0(cccr);
538 else 531 else
539 CCCR_SET_PMI_OVF_1(cccr); 532 CCCR_SET_PMI_OVF_1(cccr);
540 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr)); 533 wrmsr(p4_counters[VIRT_CTR(stag, ctr)].cccr_address,
534 cccr, high);
541 return; 535 return;
542 } 536 }
543 } 537 }
@@ -548,7 +542,8 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
548} 542}
549 543
550 544
551static void p4_setup_ctrs(struct op_msrs const * const msrs) 545static void p4_setup_ctrs(struct op_x86_model_spec const *model,
546 struct op_msrs const * const msrs)
552{ 547{
553 unsigned int i; 548 unsigned int i;
554 unsigned int low, high; 549 unsigned int low, high;
@@ -563,8 +558,8 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
563 } 558 }
564 559
565 /* clear the cccrs we will use */ 560 /* clear the cccrs we will use */
566 for (i = 0 ; i < num_counters ; i++) { 561 for (i = 0; i < num_counters; i++) {
567 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 562 if (unlikely(!msrs->controls[i].addr))
568 continue; 563 continue;
569 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high); 564 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
570 CCCR_CLEAR(low); 565 CCCR_CLEAR(low);
@@ -574,17 +569,18 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
574 569
575 /* clear all escrs (including those outside our concern) */ 570 /* clear all escrs (including those outside our concern) */
576 for (i = num_counters; i < num_controls; i++) { 571 for (i = num_counters; i < num_controls; i++) {
577 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 572 if (unlikely(!msrs->controls[i].addr))
578 continue; 573 continue;
579 wrmsr(msrs->controls[i].addr, 0, 0); 574 wrmsr(msrs->controls[i].addr, 0, 0);
580 } 575 }
581 576
582 /* setup all counters */ 577 /* setup all counters */
583 for (i = 0 ; i < num_counters ; ++i) { 578 for (i = 0; i < num_counters; ++i) {
584 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) { 579 if (counter_config[i].enabled && msrs->controls[i].addr) {
585 reset_value[i] = counter_config[i].count; 580 reset_value[i] = counter_config[i].count;
586 pmc_setup_one_p4_counter(i); 581 pmc_setup_one_p4_counter(i);
587 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i)); 582 wrmsrl(p4_counters[VIRT_CTR(stag, i)].counter_address,
583 -(u64)counter_config[i].count);
588 } else { 584 } else {
589 reset_value[i] = 0; 585 reset_value[i] = 0;
590 } 586 }
@@ -624,14 +620,16 @@ static int p4_check_ctrs(struct pt_regs * const regs,
624 620
625 real = VIRT_CTR(stag, i); 621 real = VIRT_CTR(stag, i);
626 622
627 CCCR_READ(low, high, real); 623 rdmsr(p4_counters[real].cccr_address, low, high);
628 CTR_READ(ctr, high, real); 624 rdmsr(p4_counters[real].counter_address, ctr, high);
629 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) { 625 if (CCCR_OVF_P(low) || !(ctr & OP_CTR_OVERFLOW)) {
630 oprofile_add_sample(regs, i); 626 oprofile_add_sample(regs, i);
631 CTR_WRITE(reset_value[i], real); 627 wrmsrl(p4_counters[real].counter_address,
628 -(u64)reset_value[i]);
632 CCCR_CLEAR_OVF(low); 629 CCCR_CLEAR_OVF(low);
633 CCCR_WRITE(low, high, real); 630 wrmsr(p4_counters[real].cccr_address, low, high);
634 CTR_WRITE(reset_value[i], real); 631 wrmsrl(p4_counters[real].counter_address,
632 -(u64)reset_value[i]);
635 } 633 }
636 } 634 }
637 635
@@ -653,9 +651,9 @@ static void p4_start(struct op_msrs const * const msrs)
653 for (i = 0; i < num_counters; ++i) { 651 for (i = 0; i < num_counters; ++i) {
654 if (!reset_value[i]) 652 if (!reset_value[i])
655 continue; 653 continue;
656 CCCR_READ(low, high, VIRT_CTR(stag, i)); 654 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
657 CCCR_SET_ENABLE(low); 655 CCCR_SET_ENABLE(low);
658 CCCR_WRITE(low, high, VIRT_CTR(stag, i)); 656 wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
659 } 657 }
660} 658}
661 659
@@ -670,9 +668,9 @@ static void p4_stop(struct op_msrs const * const msrs)
670 for (i = 0; i < num_counters; ++i) { 668 for (i = 0; i < num_counters; ++i) {
671 if (!reset_value[i]) 669 if (!reset_value[i])
672 continue; 670 continue;
673 CCCR_READ(low, high, VIRT_CTR(stag, i)); 671 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
674 CCCR_SET_DISABLE(low); 672 CCCR_SET_DISABLE(low);
675 CCCR_WRITE(low, high, VIRT_CTR(stag, i)); 673 wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
676 } 674 }
677} 675}
678 676
@@ -680,8 +678,8 @@ static void p4_shutdown(struct op_msrs const * const msrs)
680{ 678{
681 int i; 679 int i;
682 680
683 for (i = 0 ; i < num_counters ; ++i) { 681 for (i = 0; i < num_counters; ++i) {
684 if (CTR_IS_RESERVED(msrs, i)) 682 if (msrs->counters[i].addr)
685 release_perfctr_nmi(msrs->counters[i].addr); 683 release_perfctr_nmi(msrs->counters[i].addr);
686 } 684 }
687 /* 685 /*
@@ -689,15 +687,15 @@ static void p4_shutdown(struct op_msrs const * const msrs)
689 * conjunction with the counter registers (hence the starting offset). 687 * conjunction with the counter registers (hence the starting offset).
690 * This saves a few bits. 688 * This saves a few bits.
691 */ 689 */
692 for (i = num_counters ; i < num_controls ; ++i) { 690 for (i = num_counters; i < num_controls; ++i) {
693 if (CTRL_IS_RESERVED(msrs, i)) 691 if (msrs->controls[i].addr)
694 release_evntsel_nmi(msrs->controls[i].addr); 692 release_evntsel_nmi(msrs->controls[i].addr);
695 } 693 }
696} 694}
697 695
698 696
699#ifdef CONFIG_SMP 697#ifdef CONFIG_SMP
700struct op_x86_model_spec const op_p4_ht2_spec = { 698struct op_x86_model_spec op_p4_ht2_spec = {
701 .num_counters = NUM_COUNTERS_HT2, 699 .num_counters = NUM_COUNTERS_HT2,
702 .num_controls = NUM_CONTROLS_HT2, 700 .num_controls = NUM_CONTROLS_HT2,
703 .fill_in_addresses = &p4_fill_in_addresses, 701 .fill_in_addresses = &p4_fill_in_addresses,
@@ -709,7 +707,7 @@ struct op_x86_model_spec const op_p4_ht2_spec = {
709}; 707};
710#endif 708#endif
711 709
712struct op_x86_model_spec const op_p4_spec = { 710struct op_x86_model_spec op_p4_spec = {
713 .num_counters = NUM_COUNTERS_NON_HT, 711 .num_counters = NUM_COUNTERS_NON_HT,
714 .num_controls = NUM_CONTROLS_NON_HT, 712 .num_controls = NUM_CONTROLS_NON_HT,
715 .fill_in_addresses = &p4_fill_in_addresses, 713 .fill_in_addresses = &p4_fill_in_addresses,
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 4da7230b3d17..4899215999de 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -10,6 +10,7 @@
10 * @author Philippe Elie 10 * @author Philippe Elie
11 * @author Graydon Hoare 11 * @author Graydon Hoare
12 * @author Andi Kleen 12 * @author Andi Kleen
13 * @author Robert Richter <robert.richter@amd.com>
13 */ 14 */
14 15
15#include <linux/oprofile.h> 16#include <linux/oprofile.h>
@@ -18,7 +19,6 @@
18#include <asm/msr.h> 19#include <asm/msr.h>
19#include <asm/apic.h> 20#include <asm/apic.h>
20#include <asm/nmi.h> 21#include <asm/nmi.h>
21#include <asm/perf_counter.h>
22 22
23#include "op_x86_model.h" 23#include "op_x86_model.h"
24#include "op_counter.h" 24#include "op_counter.h"
@@ -26,20 +26,7 @@
26static int num_counters = 2; 26static int num_counters = 2;
27static int counter_width = 32; 27static int counter_width = 32;
28 28
29#define CTR_IS_RESERVED(msrs, c) (msrs->counters[(c)].addr ? 1 : 0) 29#define MSR_PPRO_EVENTSEL_RESERVED ((0xFFFFFFFFULL<<32)|(1ULL<<21))
30#define CTR_OVERFLOWED(n) (!((n) & (1ULL<<(counter_width-1))))
31
32#define CTRL_IS_RESERVED(msrs, c) (msrs->controls[(c)].addr ? 1 : 0)
33#define CTRL_READ(l, h, msrs, c) do {rdmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
34#define CTRL_WRITE(l, h, msrs, c) do {wrmsr((msrs->controls[(c)].addr), (l), (h)); } while (0)
35#define CTRL_SET_ACTIVE(n) (n |= (1<<22))
36#define CTRL_SET_INACTIVE(n) (n &= ~(1<<22))
37#define CTRL_CLEAR(x) (x &= (1<<21))
38#define CTRL_SET_ENABLE(val) (val |= 1<<20)
39#define CTRL_SET_USR(val, u) (val |= ((u & 1) << 16))
40#define CTRL_SET_KERN(val, k) (val |= ((k & 1) << 17))
41#define CTRL_SET_UM(val, m) (val |= (m << 8))
42#define CTRL_SET_EVENT(val, e) (val |= e)
43 30
44static u64 *reset_value; 31static u64 *reset_value;
45 32
@@ -63,9 +50,10 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
63} 50}
64 51
65 52
66static void ppro_setup_ctrs(struct op_msrs const * const msrs) 53static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
54 struct op_msrs const * const msrs)
67{ 55{
68 unsigned int low, high; 56 u64 val;
69 int i; 57 int i;
70 58
71 if (!reset_value) { 59 if (!reset_value) {
@@ -93,36 +81,30 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
93 } 81 }
94 82
95 /* clear all counters */ 83 /* clear all counters */
96 for (i = 0 ; i < num_counters; ++i) { 84 for (i = 0; i < num_counters; ++i) {
97 if (unlikely(!CTRL_IS_RESERVED(msrs, i))) 85 if (unlikely(!msrs->controls[i].addr))
98 continue; 86 continue;
99 CTRL_READ(low, high, msrs, i); 87 rdmsrl(msrs->controls[i].addr, val);
100 CTRL_CLEAR(low); 88 val &= model->reserved;
101 CTRL_WRITE(low, high, msrs, i); 89 wrmsrl(msrs->controls[i].addr, val);
102 } 90 }
103 91
104 /* avoid a false detection of ctr overflows in NMI handler */ 92 /* avoid a false detection of ctr overflows in NMI handler */
105 for (i = 0; i < num_counters; ++i) { 93 for (i = 0; i < num_counters; ++i) {
106 if (unlikely(!CTR_IS_RESERVED(msrs, i))) 94 if (unlikely(!msrs->counters[i].addr))
107 continue; 95 continue;
108 wrmsrl(msrs->counters[i].addr, -1LL); 96 wrmsrl(msrs->counters[i].addr, -1LL);
109 } 97 }
110 98
111 /* enable active counters */ 99 /* enable active counters */
112 for (i = 0; i < num_counters; ++i) { 100 for (i = 0; i < num_counters; ++i) {
113 if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) { 101 if (counter_config[i].enabled && msrs->counters[i].addr) {
114 reset_value[i] = counter_config[i].count; 102 reset_value[i] = counter_config[i].count;
115
116 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 103 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
117 104 rdmsrl(msrs->controls[i].addr, val);
118 CTRL_READ(low, high, msrs, i); 105 val &= model->reserved;
119 CTRL_CLEAR(low); 106 val |= op_x86_get_ctrl(model, &counter_config[i]);
120 CTRL_SET_ENABLE(low); 107 wrmsrl(msrs->controls[i].addr, val);
121 CTRL_SET_USR(low, counter_config[i].user);
122 CTRL_SET_KERN(low, counter_config[i].kernel);
123 CTRL_SET_UM(low, counter_config[i].unit_mask);
124 CTRL_SET_EVENT(low, counter_config[i].event);
125 CTRL_WRITE(low, high, msrs, i);
126 } else { 108 } else {
127 reset_value[i] = 0; 109 reset_value[i] = 0;
128 } 110 }
@@ -143,14 +125,14 @@ static int ppro_check_ctrs(struct pt_regs * const regs,
143 if (unlikely(!reset_value)) 125 if (unlikely(!reset_value))
144 goto out; 126 goto out;
145 127
146 for (i = 0 ; i < num_counters; ++i) { 128 for (i = 0; i < num_counters; ++i) {
147 if (!reset_value[i]) 129 if (!reset_value[i])
148 continue; 130 continue;
149 rdmsrl(msrs->counters[i].addr, val); 131 rdmsrl(msrs->counters[i].addr, val);
150 if (CTR_OVERFLOWED(val)) { 132 if (val & (1ULL << (counter_width - 1)))
151 oprofile_add_sample(regs, i); 133 continue;
152 wrmsrl(msrs->counters[i].addr, -reset_value[i]); 134 oprofile_add_sample(regs, i);
153 } 135 wrmsrl(msrs->counters[i].addr, -reset_value[i]);
154 } 136 }
155 137
156out: 138out:
@@ -171,16 +153,16 @@ out:
171 153
172static void ppro_start(struct op_msrs const * const msrs) 154static void ppro_start(struct op_msrs const * const msrs)
173{ 155{
174 unsigned int low, high; 156 u64 val;
175 int i; 157 int i;
176 158
177 if (!reset_value) 159 if (!reset_value)
178 return; 160 return;
179 for (i = 0; i < num_counters; ++i) { 161 for (i = 0; i < num_counters; ++i) {
180 if (reset_value[i]) { 162 if (reset_value[i]) {
181 CTRL_READ(low, high, msrs, i); 163 rdmsrl(msrs->controls[i].addr, val);
182 CTRL_SET_ACTIVE(low); 164 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
183 CTRL_WRITE(low, high, msrs, i); 165 wrmsrl(msrs->controls[i].addr, val);
184 } 166 }
185 } 167 }
186} 168}
@@ -188,7 +170,7 @@ static void ppro_start(struct op_msrs const * const msrs)
188 170
189static void ppro_stop(struct op_msrs const * const msrs) 171static void ppro_stop(struct op_msrs const * const msrs)
190{ 172{
191 unsigned int low, high; 173 u64 val;
192 int i; 174 int i;
193 175
194 if (!reset_value) 176 if (!reset_value)
@@ -196,9 +178,9 @@ static void ppro_stop(struct op_msrs const * const msrs)
196 for (i = 0; i < num_counters; ++i) { 178 for (i = 0; i < num_counters; ++i) {
197 if (!reset_value[i]) 179 if (!reset_value[i])
198 continue; 180 continue;
199 CTRL_READ(low, high, msrs, i); 181 rdmsrl(msrs->controls[i].addr, val);
200 CTRL_SET_INACTIVE(low); 182 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
201 CTRL_WRITE(low, high, msrs, i); 183 wrmsrl(msrs->controls[i].addr, val);
202 } 184 }
203} 185}
204 186
@@ -206,12 +188,12 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
206{ 188{
207 int i; 189 int i;
208 190
209 for (i = 0 ; i < num_counters ; ++i) { 191 for (i = 0; i < num_counters; ++i) {
210 if (CTR_IS_RESERVED(msrs, i)) 192 if (msrs->counters[i].addr)
211 release_perfctr_nmi(MSR_P6_PERFCTR0 + i); 193 release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
212 } 194 }
213 for (i = 0 ; i < num_counters ; ++i) { 195 for (i = 0; i < num_counters; ++i) {
214 if (CTRL_IS_RESERVED(msrs, i)) 196 if (msrs->controls[i].addr)
215 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i); 197 release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
216 } 198 }
217 if (reset_value) { 199 if (reset_value) {
@@ -222,8 +204,9 @@ static void ppro_shutdown(struct op_msrs const * const msrs)
222 204
223 205
224struct op_x86_model_spec op_ppro_spec = { 206struct op_x86_model_spec op_ppro_spec = {
225 .num_counters = 2, /* can be overriden */ 207 .num_counters = 2,
226 .num_controls = 2, /* dito */ 208 .num_controls = 2,
209 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
227 .fill_in_addresses = &ppro_fill_in_addresses, 210 .fill_in_addresses = &ppro_fill_in_addresses,
228 .setup_ctrs = &ppro_setup_ctrs, 211 .setup_ctrs = &ppro_setup_ctrs,
229 .check_ctrs = &ppro_check_ctrs, 212 .check_ctrs = &ppro_check_ctrs,
@@ -241,7 +224,7 @@ struct op_x86_model_spec op_ppro_spec = {
241 * the specific CPU. 224 * the specific CPU.
242 */ 225 */
243 226
244void arch_perfmon_setup_counters(void) 227static void arch_perfmon_setup_counters(void)
245{ 228{
246 union cpuid10_eax eax; 229 union cpuid10_eax eax;
247 230
@@ -259,11 +242,17 @@ void arch_perfmon_setup_counters(void)
259 242
260 op_arch_perfmon_spec.num_counters = num_counters; 243 op_arch_perfmon_spec.num_counters = num_counters;
261 op_arch_perfmon_spec.num_controls = num_counters; 244 op_arch_perfmon_spec.num_controls = num_counters;
262 op_ppro_spec.num_counters = num_counters; 245}
263 op_ppro_spec.num_controls = num_counters; 246
247static int arch_perfmon_init(struct oprofile_operations *ignore)
248{
249 arch_perfmon_setup_counters();
250 return 0;
264} 251}
265 252
266struct op_x86_model_spec op_arch_perfmon_spec = { 253struct op_x86_model_spec op_arch_perfmon_spec = {
254 .reserved = MSR_PPRO_EVENTSEL_RESERVED,
255 .init = &arch_perfmon_init,
267 /* num_counters/num_controls filled in at runtime */ 256 /* num_counters/num_controls filled in at runtime */
268 .fill_in_addresses = &ppro_fill_in_addresses, 257 .fill_in_addresses = &ppro_fill_in_addresses,
269 /* user space does the cpuid check for available events */ 258 /* user space does the cpuid check for available events */
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
index 825e79064d64..b83776180c7f 100644
--- a/arch/x86/oprofile/op_x86_model.h
+++ b/arch/x86/oprofile/op_x86_model.h
@@ -6,51 +6,66 @@
6 * @remark Read the file COPYING 6 * @remark Read the file COPYING
7 * 7 *
8 * @author Graydon Hoare 8 * @author Graydon Hoare
9 * @author Robert Richter <robert.richter@amd.com>
9 */ 10 */
10 11
11#ifndef OP_X86_MODEL_H 12#ifndef OP_X86_MODEL_H
12#define OP_X86_MODEL_H 13#define OP_X86_MODEL_H
13 14
14struct op_saved_msr { 15#include <asm/types.h>
15 unsigned int high; 16#include <asm/perf_counter.h>
16 unsigned int low;
17};
18 17
19struct op_msr { 18struct op_msr {
20 unsigned long addr; 19 unsigned long addr;
21 struct op_saved_msr saved; 20 u64 saved;
22}; 21};
23 22
24struct op_msrs { 23struct op_msrs {
25 struct op_msr *counters; 24 struct op_msr *counters;
26 struct op_msr *controls; 25 struct op_msr *controls;
26 struct op_msr *multiplex;
27}; 27};
28 28
29struct pt_regs; 29struct pt_regs;
30 30
31struct oprofile_operations;
32
31/* The model vtable abstracts the differences between 33/* The model vtable abstracts the differences between
32 * various x86 CPU models' perfctr support. 34 * various x86 CPU models' perfctr support.
33 */ 35 */
34struct op_x86_model_spec { 36struct op_x86_model_spec {
35 int (*init)(struct oprofile_operations *ops); 37 unsigned int num_counters;
36 void (*exit)(void); 38 unsigned int num_controls;
37 unsigned int num_counters; 39 unsigned int num_virt_counters;
38 unsigned int num_controls; 40 u64 reserved;
39 void (*fill_in_addresses)(struct op_msrs * const msrs); 41 u16 event_mask;
40 void (*setup_ctrs)(struct op_msrs const * const msrs); 42 int (*init)(struct oprofile_operations *ops);
41 int (*check_ctrs)(struct pt_regs * const regs, 43 void (*exit)(void);
42 struct op_msrs const * const msrs); 44 void (*fill_in_addresses)(struct op_msrs * const msrs);
43 void (*start)(struct op_msrs const * const msrs); 45 void (*setup_ctrs)(struct op_x86_model_spec const *model,
44 void (*stop)(struct op_msrs const * const msrs); 46 struct op_msrs const * const msrs);
45 void (*shutdown)(struct op_msrs const * const msrs); 47 int (*check_ctrs)(struct pt_regs * const regs,
48 struct op_msrs const * const msrs);
49 void (*start)(struct op_msrs const * const msrs);
50 void (*stop)(struct op_msrs const * const msrs);
51 void (*shutdown)(struct op_msrs const * const msrs);
52#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
53 void (*switch_ctrl)(struct op_x86_model_spec const *model,
54 struct op_msrs const * const msrs);
55#endif
46}; 56};
47 57
58struct op_counter_config;
59
60extern u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
61 struct op_counter_config *counter_config);
62extern int op_x86_phys_to_virt(int phys);
63extern int op_x86_virt_to_phys(int virt);
64
48extern struct op_x86_model_spec op_ppro_spec; 65extern struct op_x86_model_spec op_ppro_spec;
49extern struct op_x86_model_spec const op_p4_spec; 66extern struct op_x86_model_spec op_p4_spec;
50extern struct op_x86_model_spec const op_p4_ht2_spec; 67extern struct op_x86_model_spec op_p4_ht2_spec;
51extern struct op_x86_model_spec const op_amd_spec; 68extern struct op_x86_model_spec op_amd_spec;
52extern struct op_x86_model_spec op_arch_perfmon_spec; 69extern struct op_x86_model_spec op_arch_perfmon_spec;
53 70
54extern void arch_perfmon_setup_counters(void);
55
56#endif /* OP_X86_MODEL_H */ 71#endif /* OP_X86_MODEL_H */
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
index bd13c3e4c6db..347d882b3bb3 100644
--- a/arch/x86/pci/direct.c
+++ b/arch/x86/pci/direct.c
@@ -192,13 +192,14 @@ struct pci_raw_ops pci_direct_conf2 = {
192static int __init pci_sanity_check(struct pci_raw_ops *o) 192static int __init pci_sanity_check(struct pci_raw_ops *o)
193{ 193{
194 u32 x = 0; 194 u32 x = 0;
195 int devfn; 195 int year, devfn;
196 196
197 if (pci_probe & PCI_NO_CHECKS) 197 if (pci_probe & PCI_NO_CHECKS)
198 return 1; 198 return 1;
199 /* Assume Type 1 works for newer systems. 199 /* Assume Type 1 works for newer systems.
200 This handles machines that don't have anything on PCI Bus 0. */ 200 This handles machines that don't have anything on PCI Bus 0. */
201 if (dmi_get_year(DMI_BIOS_DATE) >= 2001) 201 dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL);
202 if (year >= 2001)
202 return 1; 203 return 1;
203 204
204 for (devfn = 0; devfn < 0x100; devfn++) { 205 for (devfn = 0; devfn < 0x100; devfn++) {
diff --git a/block/blk-core.c b/block/blk-core.c
index e3299a77a0d8..e695634882a6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -501,6 +501,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
501 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 501 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
502 q->backing_dev_info.state = 0; 502 q->backing_dev_info.state = 0;
503 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 503 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
504 q->backing_dev_info.name = "block";
504 505
505 err = bdi_init(&q->backing_dev_info); 506 err = bdi_init(&q->backing_dev_info);
506 if (err) { 507 if (err) {
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 418d63619680..d3aa2aadb3e0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -133,7 +133,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
133 return -EINVAL; 133 return -EINVAL;
134 134
135 spin_lock_irq(q->queue_lock); 135 spin_lock_irq(q->queue_lock);
136 blk_queue_max_sectors(q, max_sectors_kb << 1); 136 q->limits.max_sectors = max_sectors_kb << 1;
137 spin_unlock_irq(q->queue_lock); 137 spin_unlock_irq(q->queue_lock);
138 138
139 return ret; 139 return ret;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 4dfdd03e708f..26b5dd0cb564 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -23,11 +23,13 @@ comment "Crypto core or helper"
23 23
24config CRYPTO_FIPS 24config CRYPTO_FIPS
25 bool "FIPS 200 compliance" 25 bool "FIPS 200 compliance"
26 depends on CRYPTO_ANSI_CPRNG
26 help 27 help
27 This options enables the fips boot option which is 28 This options enables the fips boot option which is
28 required if you want to system to operate in a FIPS 200 29 required if you want to system to operate in a FIPS 200
29 certification. You should say no unless you know what 30 certification. You should say no unless you know what
30 this is. 31 this is. Note that CRYPTO_ANSI_CPRNG is requred if this
32 option is selected
31 33
32config CRYPTO_ALGAPI 34config CRYPTO_ALGAPI
33 tristate 35 tristate
@@ -156,7 +158,7 @@ config CRYPTO_GCM
156 tristate "GCM/GMAC support" 158 tristate "GCM/GMAC support"
157 select CRYPTO_CTR 159 select CRYPTO_CTR
158 select CRYPTO_AEAD 160 select CRYPTO_AEAD
159 select CRYPTO_GF128MUL 161 select CRYPTO_GHASH
160 help 162 help
161 Support for Galois/Counter Mode (GCM) and Galois Message 163 Support for Galois/Counter Mode (GCM) and Galois Message
162 Authentication Code (GMAC). Required for IPSec. 164 Authentication Code (GMAC). Required for IPSec.
@@ -267,6 +269,18 @@ config CRYPTO_XCBC
267 http://csrc.nist.gov/encryption/modes/proposedmodes/ 269 http://csrc.nist.gov/encryption/modes/proposedmodes/
268 xcbc-mac/xcbc-mac-spec.pdf 270 xcbc-mac/xcbc-mac-spec.pdf
269 271
272config CRYPTO_VMAC
273 tristate "VMAC support"
274 depends on EXPERIMENTAL
275 select CRYPTO_HASH
276 select CRYPTO_MANAGER
277 help
278 VMAC is a message authentication algorithm designed for
279 very high speed on 64-bit architectures.
280
281 See also:
282 <http://fastcrypto.org/vmac>
283
270comment "Digest" 284comment "Digest"
271 285
272config CRYPTO_CRC32C 286config CRYPTO_CRC32C
@@ -289,6 +303,13 @@ config CRYPTO_CRC32C_INTEL
289 gain performance compared with software implementation. 303 gain performance compared with software implementation.
290 Module will be crc32c-intel. 304 Module will be crc32c-intel.
291 305
306config CRYPTO_GHASH
307 tristate "GHASH digest algorithm"
308 select CRYPTO_SHASH
309 select CRYPTO_GF128MUL
310 help
311 GHASH is message digest algorithm for GCM (Galois/Counter Mode).
312
292config CRYPTO_MD4 313config CRYPTO_MD4
293 tristate "MD4 digest algorithm" 314 tristate "MD4 digest algorithm"
294 select CRYPTO_HASH 315 select CRYPTO_HASH
@@ -780,13 +801,14 @@ comment "Random Number Generation"
780 801
781config CRYPTO_ANSI_CPRNG 802config CRYPTO_ANSI_CPRNG
782 tristate "Pseudo Random Number Generation for Cryptographic modules" 803 tristate "Pseudo Random Number Generation for Cryptographic modules"
804 default m
783 select CRYPTO_AES 805 select CRYPTO_AES
784 select CRYPTO_RNG 806 select CRYPTO_RNG
785 select CRYPTO_FIPS
786 help 807 help
787 This option enables the generic pseudo random number generator 808 This option enables the generic pseudo random number generator
788 for cryptographic modules. Uses the Algorithm specified in 809 for cryptographic modules. Uses the Algorithm specified in
789 ANSI X9.31 A.2.4 810 ANSI X9.31 A.2.4. Not this option must be enabled if CRYPTO_FIPS
811 is selected
790 812
791source "drivers/crypto/Kconfig" 813source "drivers/crypto/Kconfig"
792 814
diff --git a/crypto/Makefile b/crypto/Makefile
index 673d9f7c1bda..9e8f61908cb5 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_CRYPTO) += crypto.o 5obj-$(CONFIG_CRYPTO) += crypto.o
6crypto-objs := api.o cipher.o digest.o compress.o 6crypto-objs := api.o cipher.o compress.o
7 7
8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o 8obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
9 9
@@ -22,7 +22,6 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o 22obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
23obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o 23obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
24 24
25crypto_hash-objs := hash.o
26crypto_hash-objs += ahash.o 25crypto_hash-objs += ahash.o
27crypto_hash-objs += shash.o 26crypto_hash-objs += shash.o
28obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o 27obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
@@ -33,6 +32,7 @@ cryptomgr-objs := algboss.o testmgr.o
33 32
34obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o 33obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
35obj-$(CONFIG_CRYPTO_HMAC) += hmac.o 34obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
35obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
36obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o 36obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
37obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o 37obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
38obj-$(CONFIG_CRYPTO_MD4) += md4.o 38obj-$(CONFIG_CRYPTO_MD4) += md4.o
@@ -83,6 +83,7 @@ obj-$(CONFIG_CRYPTO_RNG2) += rng.o
83obj-$(CONFIG_CRYPTO_RNG2) += krng.o 83obj-$(CONFIG_CRYPTO_RNG2) += krng.o
84obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o 84obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
85obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o 85obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
86obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
86 87
87# 88#
88# generic algorithms and the async_tx api 89# generic algorithms and the async_tx api
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e11ce37c7104..f6f08336df5d 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <crypto/internal/skcipher.h> 16#include <crypto/internal/skcipher.h>
17#include <linux/cpumask.h>
17#include <linux/err.h> 18#include <linux/err.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/kernel.h> 20#include <linux/kernel.h>
@@ -25,6 +26,8 @@
25 26
26#include "internal.h" 27#include "internal.h"
27 28
29static const char *skcipher_default_geniv __read_mostly;
30
28static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, 31static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
29 unsigned int keylen) 32 unsigned int keylen)
30{ 33{
@@ -180,7 +183,14 @@ EXPORT_SYMBOL_GPL(crypto_givcipher_type);
180 183
181const char *crypto_default_geniv(const struct crypto_alg *alg) 184const char *crypto_default_geniv(const struct crypto_alg *alg)
182{ 185{
183 return alg->cra_flags & CRYPTO_ALG_ASYNC ? "eseqiv" : "chainiv"; 186 if (((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
187 CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
188 alg->cra_ablkcipher.ivsize) !=
189 alg->cra_blocksize)
190 return "chainiv";
191
192 return alg->cra_flags & CRYPTO_ALG_ASYNC ?
193 "eseqiv" : skcipher_default_geniv;
184} 194}
185 195
186static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask) 196static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
@@ -201,8 +211,9 @@ static int crypto_givcipher_default(struct crypto_alg *alg, u32 type, u32 mask)
201 int err; 211 int err;
202 212
203 larval = crypto_larval_lookup(alg->cra_driver_name, 213 larval = crypto_larval_lookup(alg->cra_driver_name,
214 (type & ~CRYPTO_ALG_TYPE_MASK) |
204 CRYPTO_ALG_TYPE_GIVCIPHER, 215 CRYPTO_ALG_TYPE_GIVCIPHER,
205 CRYPTO_ALG_TYPE_MASK); 216 mask | CRYPTO_ALG_TYPE_MASK);
206 err = PTR_ERR(larval); 217 err = PTR_ERR(larval);
207 if (IS_ERR(larval)) 218 if (IS_ERR(larval))
208 goto out; 219 goto out;
@@ -360,3 +371,17 @@ err:
360 return ERR_PTR(err); 371 return ERR_PTR(err);
361} 372}
362EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); 373EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher);
374
375static int __init skcipher_module_init(void)
376{
377 skcipher_default_geniv = num_possible_cpus() > 1 ?
378 "eseqiv" : "chainiv";
379 return 0;
380}
381
382static void skcipher_module_exit(void)
383{
384}
385
386module_init(skcipher_module_init);
387module_exit(skcipher_module_exit);
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index b8b66ec3883b..e78b7ee44a74 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1174,7 +1174,7 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
1174 ctx->key_enc[6 * i + 11] = t; \ 1174 ctx->key_enc[6 * i + 11] = t; \
1175} while (0) 1175} while (0)
1176 1176
1177#define loop8(i) do { \ 1177#define loop8tophalf(i) do { \
1178 t = ror32(t, 8); \ 1178 t = ror32(t, 8); \
1179 t = ls_box(t) ^ rco_tab[i]; \ 1179 t = ls_box(t) ^ rco_tab[i]; \
1180 t ^= ctx->key_enc[8 * i]; \ 1180 t ^= ctx->key_enc[8 * i]; \
@@ -1185,6 +1185,10 @@ EXPORT_SYMBOL_GPL(crypto_il_tab);
1185 ctx->key_enc[8 * i + 10] = t; \ 1185 ctx->key_enc[8 * i + 10] = t; \
1186 t ^= ctx->key_enc[8 * i + 3]; \ 1186 t ^= ctx->key_enc[8 * i + 3]; \
1187 ctx->key_enc[8 * i + 11] = t; \ 1187 ctx->key_enc[8 * i + 11] = t; \
1188} while (0)
1189
1190#define loop8(i) do { \
1191 loop8tophalf(i); \
1188 t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \ 1192 t = ctx->key_enc[8 * i + 4] ^ ls_box(t); \
1189 ctx->key_enc[8 * i + 12] = t; \ 1193 ctx->key_enc[8 * i + 12] = t; \
1190 t ^= ctx->key_enc[8 * i + 5]; \ 1194 t ^= ctx->key_enc[8 * i + 5]; \
@@ -1245,8 +1249,9 @@ int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
1245 ctx->key_enc[5] = le32_to_cpu(key[5]); 1249 ctx->key_enc[5] = le32_to_cpu(key[5]);
1246 ctx->key_enc[6] = le32_to_cpu(key[6]); 1250 ctx->key_enc[6] = le32_to_cpu(key[6]);
1247 t = ctx->key_enc[7] = le32_to_cpu(key[7]); 1251 t = ctx->key_enc[7] = le32_to_cpu(key[7]);
1248 for (i = 0; i < 7; ++i) 1252 for (i = 0; i < 6; ++i)
1249 loop8(i); 1253 loop8(i);
1254 loop8tophalf(i);
1250 break; 1255 break;
1251 } 1256 }
1252 1257
diff --git a/crypto/ahash.c b/crypto/ahash.c
index f3476374f764..33a4ff45f842 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -24,6 +24,19 @@
24 24
25#include "internal.h" 25#include "internal.h"
26 26
27struct ahash_request_priv {
28 crypto_completion_t complete;
29 void *data;
30 u8 *result;
31 void *ubuf[] CRYPTO_MINALIGN_ATTR;
32};
33
34static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
35{
36 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
37 halg);
38}
39
27static int hash_walk_next(struct crypto_hash_walk *walk) 40static int hash_walk_next(struct crypto_hash_walk *walk)
28{ 41{
29 unsigned int alignmask = walk->alignmask; 42 unsigned int alignmask = walk->alignmask;
@@ -132,36 +145,34 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
132static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, 145static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
133 unsigned int keylen) 146 unsigned int keylen)
134{ 147{
135 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
136 unsigned long alignmask = crypto_ahash_alignmask(tfm); 148 unsigned long alignmask = crypto_ahash_alignmask(tfm);
137 int ret; 149 int ret;
138 u8 *buffer, *alignbuffer; 150 u8 *buffer, *alignbuffer;
139 unsigned long absize; 151 unsigned long absize;
140 152
141 absize = keylen + alignmask; 153 absize = keylen + alignmask;
142 buffer = kmalloc(absize, GFP_ATOMIC); 154 buffer = kmalloc(absize, GFP_KERNEL);
143 if (!buffer) 155 if (!buffer)
144 return -ENOMEM; 156 return -ENOMEM;
145 157
146 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 158 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
147 memcpy(alignbuffer, key, keylen); 159 memcpy(alignbuffer, key, keylen);
148 ret = ahash->setkey(tfm, alignbuffer, keylen); 160 ret = tfm->setkey(tfm, alignbuffer, keylen);
149 memset(alignbuffer, 0, keylen); 161 kzfree(buffer);
150 kfree(buffer);
151 return ret; 162 return ret;
152} 163}
153 164
154static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 165int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
155 unsigned int keylen) 166 unsigned int keylen)
156{ 167{
157 struct ahash_alg *ahash = crypto_ahash_alg(tfm);
158 unsigned long alignmask = crypto_ahash_alignmask(tfm); 168 unsigned long alignmask = crypto_ahash_alignmask(tfm);
159 169
160 if ((unsigned long)key & alignmask) 170 if ((unsigned long)key & alignmask)
161 return ahash_setkey_unaligned(tfm, key, keylen); 171 return ahash_setkey_unaligned(tfm, key, keylen);
162 172
163 return ahash->setkey(tfm, key, keylen); 173 return tfm->setkey(tfm, key, keylen);
164} 174}
175EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
165 176
166static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, 177static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
167 unsigned int keylen) 178 unsigned int keylen)
@@ -169,44 +180,221 @@ static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
169 return -ENOSYS; 180 return -ENOSYS;
170} 181}
171 182
172int crypto_ahash_import(struct ahash_request *req, const u8 *in) 183static inline unsigned int ahash_align_buffer_size(unsigned len,
184 unsigned long mask)
185{
186 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
187}
188
189static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
190{
191 struct ahash_request_priv *priv = req->priv;
192
193 if (err == -EINPROGRESS)
194 return;
195
196 if (!err)
197 memcpy(priv->result, req->result,
198 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
199
200 kzfree(priv);
201}
202
203static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
204{
205 struct ahash_request *areq = req->data;
206 struct ahash_request_priv *priv = areq->priv;
207 crypto_completion_t complete = priv->complete;
208 void *data = priv->data;
209
210 ahash_op_unaligned_finish(areq, err);
211
212 complete(data, err);
213}
214
215static int ahash_op_unaligned(struct ahash_request *req,
216 int (*op)(struct ahash_request *))
173{ 217{
174 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 218 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
175 struct ahash_alg *alg = crypto_ahash_alg(tfm); 219 unsigned long alignmask = crypto_ahash_alignmask(tfm);
220 unsigned int ds = crypto_ahash_digestsize(tfm);
221 struct ahash_request_priv *priv;
222 int err;
223
224 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
225 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
226 GFP_KERNEL : GFP_ATOMIC);
227 if (!priv)
228 return -ENOMEM;
176 229
177 memcpy(ahash_request_ctx(req), in, crypto_ahash_reqsize(tfm)); 230 priv->result = req->result;
231 priv->complete = req->base.complete;
232 priv->data = req->base.data;
178 233
179 if (alg->reinit) 234 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
180 alg->reinit(req); 235 req->base.complete = ahash_op_unaligned_done;
236 req->base.data = req;
237 req->priv = priv;
181 238
182 return 0; 239 err = op(req);
240 ahash_op_unaligned_finish(req, err);
241
242 return err;
183} 243}
184EXPORT_SYMBOL_GPL(crypto_ahash_import);
185 244
186static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type, 245static int crypto_ahash_op(struct ahash_request *req,
187 u32 mask) 246 int (*op)(struct ahash_request *))
188{ 247{
189 return alg->cra_ctxsize; 248 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
249 unsigned long alignmask = crypto_ahash_alignmask(tfm);
250
251 if ((unsigned long)req->result & alignmask)
252 return ahash_op_unaligned(req, op);
253
254 return op(req);
190} 255}
191 256
192static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask) 257int crypto_ahash_final(struct ahash_request *req)
193{ 258{
194 struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash; 259 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
195 struct ahash_tfm *crt = &tfm->crt_ahash; 260}
261EXPORT_SYMBOL_GPL(crypto_ahash_final);
196 262
197 if (alg->digestsize > PAGE_SIZE / 8) 263int crypto_ahash_finup(struct ahash_request *req)
198 return -EINVAL; 264{
265 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
266}
267EXPORT_SYMBOL_GPL(crypto_ahash_finup);
268
269int crypto_ahash_digest(struct ahash_request *req)
270{
271 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
272}
273EXPORT_SYMBOL_GPL(crypto_ahash_digest);
274
275static void ahash_def_finup_finish2(struct ahash_request *req, int err)
276{
277 struct ahash_request_priv *priv = req->priv;
278
279 if (err == -EINPROGRESS)
280 return;
281
282 if (!err)
283 memcpy(priv->result, req->result,
284 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
199 285
200 crt->init = alg->init; 286 kzfree(priv);
201 crt->update = alg->update; 287}
202 crt->final = alg->final; 288
203 crt->digest = alg->digest; 289static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
204 crt->setkey = alg->setkey ? ahash_setkey : ahash_nosetkey; 290{
205 crt->digestsize = alg->digestsize; 291 struct ahash_request *areq = req->data;
292 struct ahash_request_priv *priv = areq->priv;
293 crypto_completion_t complete = priv->complete;
294 void *data = priv->data;
295
296 ahash_def_finup_finish2(areq, err);
297
298 complete(data, err);
299}
300
301static int ahash_def_finup_finish1(struct ahash_request *req, int err)
302{
303 if (err)
304 goto out;
305
306 req->base.complete = ahash_def_finup_done2;
307 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
308 err = crypto_ahash_reqtfm(req)->final(req);
309
310out:
311 ahash_def_finup_finish2(req, err);
312 return err;
313}
314
315static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
316{
317 struct ahash_request *areq = req->data;
318 struct ahash_request_priv *priv = areq->priv;
319 crypto_completion_t complete = priv->complete;
320 void *data = priv->data;
321
322 err = ahash_def_finup_finish1(areq, err);
323
324 complete(data, err);
325}
326
327static int ahash_def_finup(struct ahash_request *req)
328{
329 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
330 unsigned long alignmask = crypto_ahash_alignmask(tfm);
331 unsigned int ds = crypto_ahash_digestsize(tfm);
332 struct ahash_request_priv *priv;
333
334 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
335 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
336 GFP_KERNEL : GFP_ATOMIC);
337 if (!priv)
338 return -ENOMEM;
339
340 priv->result = req->result;
341 priv->complete = req->base.complete;
342 priv->data = req->base.data;
343
344 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
345 req->base.complete = ahash_def_finup_done1;
346 req->base.data = req;
347 req->priv = priv;
348
349 return ahash_def_finup_finish1(req, tfm->update(req));
350}
351
352static int ahash_no_export(struct ahash_request *req, void *out)
353{
354 return -ENOSYS;
355}
356
357static int ahash_no_import(struct ahash_request *req, const void *in)
358{
359 return -ENOSYS;
360}
361
362static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
363{
364 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
365 struct ahash_alg *alg = crypto_ahash_alg(hash);
366
367 hash->setkey = ahash_nosetkey;
368 hash->export = ahash_no_export;
369 hash->import = ahash_no_import;
370
371 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
372 return crypto_init_shash_ops_async(tfm);
373
374 hash->init = alg->init;
375 hash->update = alg->update;
376 hash->final = alg->final;
377 hash->finup = alg->finup ?: ahash_def_finup;
378 hash->digest = alg->digest;
379
380 if (alg->setkey)
381 hash->setkey = alg->setkey;
382 if (alg->export)
383 hash->export = alg->export;
384 if (alg->import)
385 hash->import = alg->import;
206 386
207 return 0; 387 return 0;
208} 388}
209 389
390static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
391{
392 if (alg->cra_type == &crypto_ahash_type)
393 return alg->cra_ctxsize;
394
395 return sizeof(struct crypto_shash *);
396}
397
210static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 398static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
211 __attribute__ ((unused)); 399 __attribute__ ((unused));
212static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 400static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
@@ -215,17 +403,101 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
215 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 403 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
216 "yes" : "no"); 404 "yes" : "no");
217 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 405 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
218 seq_printf(m, "digestsize : %u\n", alg->cra_ahash.digestsize); 406 seq_printf(m, "digestsize : %u\n",
407 __crypto_hash_alg_common(alg)->digestsize);
219} 408}
220 409
221const struct crypto_type crypto_ahash_type = { 410const struct crypto_type crypto_ahash_type = {
222 .ctxsize = crypto_ahash_ctxsize, 411 .extsize = crypto_ahash_extsize,
223 .init = crypto_init_ahash_ops, 412 .init_tfm = crypto_ahash_init_tfm,
224#ifdef CONFIG_PROC_FS 413#ifdef CONFIG_PROC_FS
225 .show = crypto_ahash_show, 414 .show = crypto_ahash_show,
226#endif 415#endif
416 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
417 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
418 .type = CRYPTO_ALG_TYPE_AHASH,
419 .tfmsize = offsetof(struct crypto_ahash, base),
227}; 420};
228EXPORT_SYMBOL_GPL(crypto_ahash_type); 421EXPORT_SYMBOL_GPL(crypto_ahash_type);
229 422
423struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
424 u32 mask)
425{
426 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
427}
428EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
429
430static int ahash_prepare_alg(struct ahash_alg *alg)
431{
432 struct crypto_alg *base = &alg->halg.base;
433
434 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
435 alg->halg.statesize > PAGE_SIZE / 8)
436 return -EINVAL;
437
438 base->cra_type = &crypto_ahash_type;
439 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
440 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
441
442 return 0;
443}
444
445int crypto_register_ahash(struct ahash_alg *alg)
446{
447 struct crypto_alg *base = &alg->halg.base;
448 int err;
449
450 err = ahash_prepare_alg(alg);
451 if (err)
452 return err;
453
454 return crypto_register_alg(base);
455}
456EXPORT_SYMBOL_GPL(crypto_register_ahash);
457
458int crypto_unregister_ahash(struct ahash_alg *alg)
459{
460 return crypto_unregister_alg(&alg->halg.base);
461}
462EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
463
464int ahash_register_instance(struct crypto_template *tmpl,
465 struct ahash_instance *inst)
466{
467 int err;
468
469 err = ahash_prepare_alg(&inst->alg);
470 if (err)
471 return err;
472
473 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
474}
475EXPORT_SYMBOL_GPL(ahash_register_instance);
476
477void ahash_free_instance(struct crypto_instance *inst)
478{
479 crypto_drop_spawn(crypto_instance_ctx(inst));
480 kfree(ahash_instance(inst));
481}
482EXPORT_SYMBOL_GPL(ahash_free_instance);
483
484int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
485 struct hash_alg_common *alg,
486 struct crypto_instance *inst)
487{
488 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
489 &crypto_ahash_type);
490}
491EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
492
493struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
494{
495 struct crypto_alg *alg;
496
497 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
498 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
499}
500EXPORT_SYMBOL_GPL(ahash_attr_alg);
501
230MODULE_LICENSE("GPL"); 502MODULE_LICENSE("GPL");
231MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 503MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 56c62e2858d5..f149b1c8b76d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -81,16 +81,35 @@ static void crypto_destroy_instance(struct crypto_alg *alg)
81 crypto_tmpl_put(tmpl); 81 crypto_tmpl_put(tmpl);
82} 82}
83 83
84static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
85 struct list_head *stack,
86 struct list_head *top,
87 struct list_head *secondary_spawns)
88{
89 struct crypto_spawn *spawn, *n;
90
91 if (list_empty(stack))
92 return NULL;
93
94 spawn = list_first_entry(stack, struct crypto_spawn, list);
95 n = list_entry(spawn->list.next, struct crypto_spawn, list);
96
97 if (spawn->alg && &n->list != stack && !n->alg)
98 n->alg = (n->list.next == stack) ? alg :
99 &list_entry(n->list.next, struct crypto_spawn,
100 list)->inst->alg;
101
102 list_move(&spawn->list, secondary_spawns);
103
104 return &n->list == stack ? top : &n->inst->alg.cra_users;
105}
106
84static void crypto_remove_spawn(struct crypto_spawn *spawn, 107static void crypto_remove_spawn(struct crypto_spawn *spawn,
85 struct list_head *list, 108 struct list_head *list)
86 struct list_head *secondary_spawns)
87{ 109{
88 struct crypto_instance *inst = spawn->inst; 110 struct crypto_instance *inst = spawn->inst;
89 struct crypto_template *tmpl = inst->tmpl; 111 struct crypto_template *tmpl = inst->tmpl;
90 112
91 list_del_init(&spawn->list);
92 spawn->alg = NULL;
93
94 if (crypto_is_dead(&inst->alg)) 113 if (crypto_is_dead(&inst->alg))
95 return; 114 return;
96 115
@@ -106,25 +125,55 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
106 hlist_del(&inst->list); 125 hlist_del(&inst->list);
107 inst->alg.cra_destroy = crypto_destroy_instance; 126 inst->alg.cra_destroy = crypto_destroy_instance;
108 127
109 list_splice(&inst->alg.cra_users, secondary_spawns); 128 BUG_ON(!list_empty(&inst->alg.cra_users));
110} 129}
111 130
112static void crypto_remove_spawns(struct list_head *spawns, 131static void crypto_remove_spawns(struct crypto_alg *alg,
113 struct list_head *list, u32 new_type) 132 struct list_head *list,
133 struct crypto_alg *nalg)
114{ 134{
135 u32 new_type = (nalg ?: alg)->cra_flags;
115 struct crypto_spawn *spawn, *n; 136 struct crypto_spawn *spawn, *n;
116 LIST_HEAD(secondary_spawns); 137 LIST_HEAD(secondary_spawns);
138 struct list_head *spawns;
139 LIST_HEAD(stack);
140 LIST_HEAD(top);
117 141
142 spawns = &alg->cra_users;
118 list_for_each_entry_safe(spawn, n, spawns, list) { 143 list_for_each_entry_safe(spawn, n, spawns, list) {
119 if ((spawn->alg->cra_flags ^ new_type) & spawn->mask) 144 if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
120 continue; 145 continue;
121 146
122 crypto_remove_spawn(spawn, list, &secondary_spawns); 147 list_move(&spawn->list, &top);
123 } 148 }
124 149
125 while (!list_empty(&secondary_spawns)) { 150 spawns = &top;
126 list_for_each_entry_safe(spawn, n, &secondary_spawns, list) 151 do {
127 crypto_remove_spawn(spawn, list, &secondary_spawns); 152 while (!list_empty(spawns)) {
153 struct crypto_instance *inst;
154
155 spawn = list_first_entry(spawns, struct crypto_spawn,
156 list);
157 inst = spawn->inst;
158
159 BUG_ON(&inst->alg == alg);
160
161 list_move(&spawn->list, &stack);
162
163 if (&inst->alg == nalg)
164 break;
165
166 spawn->alg = NULL;
167 spawns = &inst->alg.cra_users;
168 }
169 } while ((spawns = crypto_more_spawns(alg, &stack, &top,
170 &secondary_spawns)));
171
172 list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
173 if (spawn->alg)
174 list_move(&spawn->list, &spawn->alg->cra_users);
175 else
176 crypto_remove_spawn(spawn, list);
128 } 177 }
129} 178}
130 179
@@ -258,7 +307,7 @@ found:
258 q->cra_priority > alg->cra_priority) 307 q->cra_priority > alg->cra_priority)
259 continue; 308 continue;
260 309
261 crypto_remove_spawns(&q->cra_users, &list, alg->cra_flags); 310 crypto_remove_spawns(q, &list, alg);
262 } 311 }
263 312
264complete: 313complete:
@@ -330,7 +379,7 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
330 379
331 crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); 380 crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
332 list_del_init(&alg->cra_list); 381 list_del_init(&alg->cra_list);
333 crypto_remove_spawns(&alg->cra_users, list, alg->cra_flags); 382 crypto_remove_spawns(alg, list, NULL);
334 383
335 return 0; 384 return 0;
336} 385}
@@ -488,20 +537,38 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
488} 537}
489EXPORT_SYMBOL_GPL(crypto_init_spawn); 538EXPORT_SYMBOL_GPL(crypto_init_spawn);
490 539
540int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
541 struct crypto_instance *inst,
542 const struct crypto_type *frontend)
543{
544 int err = -EINVAL;
545
546 if (frontend && (alg->cra_flags ^ frontend->type) & frontend->maskset)
547 goto out;
548
549 spawn->frontend = frontend;
550 err = crypto_init_spawn(spawn, alg, inst, frontend->maskset);
551
552out:
553 return err;
554}
555EXPORT_SYMBOL_GPL(crypto_init_spawn2);
556
491void crypto_drop_spawn(struct crypto_spawn *spawn) 557void crypto_drop_spawn(struct crypto_spawn *spawn)
492{ 558{
559 if (!spawn->alg)
560 return;
561
493 down_write(&crypto_alg_sem); 562 down_write(&crypto_alg_sem);
494 list_del(&spawn->list); 563 list_del(&spawn->list);
495 up_write(&crypto_alg_sem); 564 up_write(&crypto_alg_sem);
496} 565}
497EXPORT_SYMBOL_GPL(crypto_drop_spawn); 566EXPORT_SYMBOL_GPL(crypto_drop_spawn);
498 567
499struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, 568static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
500 u32 mask)
501{ 569{
502 struct crypto_alg *alg; 570 struct crypto_alg *alg;
503 struct crypto_alg *alg2; 571 struct crypto_alg *alg2;
504 struct crypto_tfm *tfm;
505 572
506 down_read(&crypto_alg_sem); 573 down_read(&crypto_alg_sem);
507 alg = spawn->alg; 574 alg = spawn->alg;
@@ -516,6 +583,19 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
516 return ERR_PTR(-EAGAIN); 583 return ERR_PTR(-EAGAIN);
517 } 584 }
518 585
586 return alg;
587}
588
589struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
590 u32 mask)
591{
592 struct crypto_alg *alg;
593 struct crypto_tfm *tfm;
594
595 alg = crypto_spawn_alg(spawn);
596 if (IS_ERR(alg))
597 return ERR_CAST(alg);
598
519 tfm = ERR_PTR(-EINVAL); 599 tfm = ERR_PTR(-EINVAL);
520 if (unlikely((alg->cra_flags ^ type) & mask)) 600 if (unlikely((alg->cra_flags ^ type) & mask))
521 goto out_put_alg; 601 goto out_put_alg;
@@ -532,6 +612,27 @@ out_put_alg:
532} 612}
533EXPORT_SYMBOL_GPL(crypto_spawn_tfm); 613EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
534 614
615void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
616{
617 struct crypto_alg *alg;
618 struct crypto_tfm *tfm;
619
620 alg = crypto_spawn_alg(spawn);
621 if (IS_ERR(alg))
622 return ERR_CAST(alg);
623
624 tfm = crypto_create_tfm(alg, spawn->frontend);
625 if (IS_ERR(tfm))
626 goto out_put_alg;
627
628 return tfm;
629
630out_put_alg:
631 crypto_mod_put(alg);
632 return tfm;
633}
634EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
635
535int crypto_register_notifier(struct notifier_block *nb) 636int crypto_register_notifier(struct notifier_block *nb)
536{ 637{
537 return blocking_notifier_chain_register(&crypto_chain, nb); 638 return blocking_notifier_chain_register(&crypto_chain, nb);
@@ -595,7 +696,9 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
595} 696}
596EXPORT_SYMBOL_GPL(crypto_attr_alg_name); 697EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
597 698
598struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask) 699struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
700 const struct crypto_type *frontend,
701 u32 type, u32 mask)
599{ 702{
600 const char *name; 703 const char *name;
601 int err; 704 int err;
@@ -605,9 +708,9 @@ struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask)
605 if (IS_ERR(name)) 708 if (IS_ERR(name))
606 return ERR_PTR(err); 709 return ERR_PTR(err);
607 710
608 return crypto_alg_mod_lookup(name, type, mask); 711 return crypto_find_alg(name, frontend, type, mask);
609} 712}
610EXPORT_SYMBOL_GPL(crypto_attr_alg); 713EXPORT_SYMBOL_GPL(crypto_attr_alg2);
611 714
612int crypto_attr_u32(struct rtattr *rta, u32 *num) 715int crypto_attr_u32(struct rtattr *rta, u32 *num)
613{ 716{
@@ -627,17 +730,20 @@ int crypto_attr_u32(struct rtattr *rta, u32 *num)
627} 730}
628EXPORT_SYMBOL_GPL(crypto_attr_u32); 731EXPORT_SYMBOL_GPL(crypto_attr_u32);
629 732
630struct crypto_instance *crypto_alloc_instance(const char *name, 733void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
631 struct crypto_alg *alg) 734 unsigned int head)
632{ 735{
633 struct crypto_instance *inst; 736 struct crypto_instance *inst;
634 struct crypto_spawn *spawn; 737 char *p;
635 int err; 738 int err;
636 739
637 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 740 p = kzalloc(head + sizeof(*inst) + sizeof(struct crypto_spawn),
638 if (!inst) 741 GFP_KERNEL);
742 if (!p)
639 return ERR_PTR(-ENOMEM); 743 return ERR_PTR(-ENOMEM);
640 744
745 inst = (void *)(p + head);
746
641 err = -ENAMETOOLONG; 747 err = -ENAMETOOLONG;
642 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, 748 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
643 alg->cra_name) >= CRYPTO_MAX_ALG_NAME) 749 alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
@@ -647,6 +753,25 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
647 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 753 name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
648 goto err_free_inst; 754 goto err_free_inst;
649 755
756 return p;
757
758err_free_inst:
759 kfree(p);
760 return ERR_PTR(err);
761}
762EXPORT_SYMBOL_GPL(crypto_alloc_instance2);
763
764struct crypto_instance *crypto_alloc_instance(const char *name,
765 struct crypto_alg *alg)
766{
767 struct crypto_instance *inst;
768 struct crypto_spawn *spawn;
769 int err;
770
771 inst = crypto_alloc_instance2(name, alg, 0);
772 if (IS_ERR(inst))
773 goto out;
774
650 spawn = crypto_instance_ctx(inst); 775 spawn = crypto_instance_ctx(inst);
651 err = crypto_init_spawn(spawn, alg, inst, 776 err = crypto_init_spawn(spawn, alg, inst,
652 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); 777 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
@@ -658,7 +783,10 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
658 783
659err_free_inst: 784err_free_inst:
660 kfree(inst); 785 kfree(inst);
661 return ERR_PTR(err); 786 inst = ERR_PTR(err);
787
788out:
789 return inst;
662} 790}
663EXPORT_SYMBOL_GPL(crypto_alloc_instance); 791EXPORT_SYMBOL_GPL(crypto_alloc_instance);
664 792
@@ -692,7 +820,7 @@ out:
692} 820}
693EXPORT_SYMBOL_GPL(crypto_enqueue_request); 821EXPORT_SYMBOL_GPL(crypto_enqueue_request);
694 822
695struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) 823void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
696{ 824{
697 struct list_head *request; 825 struct list_head *request;
698 826
@@ -707,7 +835,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
707 request = queue->list.next; 835 request = queue->list.next;
708 list_del(request); 836 list_del(request);
709 837
710 return list_entry(request, struct crypto_async_request, list); 838 return (char *)list_entry(request, struct crypto_async_request, list) -
839 offset;
840}
841EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
842
843struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
844{
845 return __crypto_dequeue_request(queue, 0);
711} 846}
712EXPORT_SYMBOL_GPL(crypto_dequeue_request); 847EXPORT_SYMBOL_GPL(crypto_dequeue_request);
713 848
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 9908dd830c26..412241ce4cfa 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -68,6 +68,11 @@ static int cryptomgr_probe(void *data)
68 goto err; 68 goto err;
69 69
70 do { 70 do {
71 if (tmpl->create) {
72 err = tmpl->create(tmpl, param->tb);
73 continue;
74 }
75
71 inst = tmpl->alloc(param->tb); 76 inst = tmpl->alloc(param->tb);
72 if (IS_ERR(inst)) 77 if (IS_ERR(inst))
73 err = PTR_ERR(inst); 78 err = PTR_ERR(inst);
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index d80ed4c1e009..3aa6e3834bfe 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -187,7 +187,6 @@ static int _get_more_prng_bytes(struct prng_context *ctx)
187/* Our exported functions */ 187/* Our exported functions */
188static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx) 188static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
189{ 189{
190 unsigned long flags;
191 unsigned char *ptr = buf; 190 unsigned char *ptr = buf;
192 unsigned int byte_count = (unsigned int)nbytes; 191 unsigned int byte_count = (unsigned int)nbytes;
193 int err; 192 int err;
@@ -196,7 +195,7 @@ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx)
196 if (nbytes < 0) 195 if (nbytes < 0)
197 return -EINVAL; 196 return -EINVAL;
198 197
199 spin_lock_irqsave(&ctx->prng_lock, flags); 198 spin_lock_bh(&ctx->prng_lock);
200 199
201 err = -EINVAL; 200 err = -EINVAL;
202 if (ctx->flags & PRNG_NEED_RESET) 201 if (ctx->flags & PRNG_NEED_RESET)
@@ -268,7 +267,7 @@ empty_rbuf:
268 goto remainder; 267 goto remainder;
269 268
270done: 269done:
271 spin_unlock_irqrestore(&ctx->prng_lock, flags); 270 spin_unlock_bh(&ctx->prng_lock);
272 dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", 271 dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
273 err, ctx); 272 err, ctx);
274 return err; 273 return err;
@@ -284,10 +283,9 @@ static int reset_prng_context(struct prng_context *ctx,
284 unsigned char *V, unsigned char *DT) 283 unsigned char *V, unsigned char *DT)
285{ 284{
286 int ret; 285 int ret;
287 int rc = -EINVAL;
288 unsigned char *prng_key; 286 unsigned char *prng_key;
289 287
290 spin_lock(&ctx->prng_lock); 288 spin_lock_bh(&ctx->prng_lock);
291 ctx->flags |= PRNG_NEED_RESET; 289 ctx->flags |= PRNG_NEED_RESET;
292 290
293 prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; 291 prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY;
@@ -308,34 +306,20 @@ static int reset_prng_context(struct prng_context *ctx,
308 memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); 306 memset(ctx->rand_data, 0, DEFAULT_BLK_SZ);
309 memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); 307 memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ);
310 308
311 if (ctx->tfm)
312 crypto_free_cipher(ctx->tfm);
313
314 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
315 if (IS_ERR(ctx->tfm)) {
316 dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
317 ctx);
318 ctx->tfm = NULL;
319 goto out;
320 }
321
322 ctx->rand_data_valid = DEFAULT_BLK_SZ; 309 ctx->rand_data_valid = DEFAULT_BLK_SZ;
323 310
324 ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); 311 ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen);
325 if (ret) { 312 if (ret) {
326 dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", 313 dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
327 crypto_cipher_get_flags(ctx->tfm)); 314 crypto_cipher_get_flags(ctx->tfm));
328 crypto_free_cipher(ctx->tfm);
329 goto out; 315 goto out;
330 } 316 }
331 317
332 rc = 0; 318 ret = 0;
333 ctx->flags &= ~PRNG_NEED_RESET; 319 ctx->flags &= ~PRNG_NEED_RESET;
334out: 320out:
335 spin_unlock(&ctx->prng_lock); 321 spin_unlock_bh(&ctx->prng_lock);
336 322 return ret;
337 return rc;
338
339} 323}
340 324
341static int cprng_init(struct crypto_tfm *tfm) 325static int cprng_init(struct crypto_tfm *tfm)
@@ -343,6 +327,12 @@ static int cprng_init(struct crypto_tfm *tfm)
343 struct prng_context *ctx = crypto_tfm_ctx(tfm); 327 struct prng_context *ctx = crypto_tfm_ctx(tfm);
344 328
345 spin_lock_init(&ctx->prng_lock); 329 spin_lock_init(&ctx->prng_lock);
330 ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
331 if (IS_ERR(ctx->tfm)) {
332 dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n",
333 ctx);
334 return PTR_ERR(ctx->tfm);
335 }
346 336
347 if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) 337 if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0)
348 return -EINVAL; 338 return -EINVAL;
@@ -418,17 +408,10 @@ static struct crypto_alg rng_alg = {
418/* Module initalization */ 408/* Module initalization */
419static int __init prng_mod_init(void) 409static int __init prng_mod_init(void)
420{ 410{
421 int ret = 0;
422
423 if (fips_enabled) 411 if (fips_enabled)
424 rng_alg.cra_priority += 200; 412 rng_alg.cra_priority += 200;
425 413
426 ret = crypto_register_alg(&rng_alg); 414 return crypto_register_alg(&rng_alg);
427
428 if (ret)
429 goto out;
430out:
431 return 0;
432} 415}
433 416
434static void __exit prng_mod_fini(void) 417static void __exit prng_mod_fini(void)
diff --git a/crypto/api.c b/crypto/api.c
index d5944f92b416..798526d90538 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -285,13 +285,6 @@ static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
285 switch (crypto_tfm_alg_type(tfm)) { 285 switch (crypto_tfm_alg_type(tfm)) {
286 case CRYPTO_ALG_TYPE_CIPHER: 286 case CRYPTO_ALG_TYPE_CIPHER:
287 return crypto_init_cipher_ops(tfm); 287 return crypto_init_cipher_ops(tfm);
288
289 case CRYPTO_ALG_TYPE_DIGEST:
290 if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
291 CRYPTO_ALG_TYPE_HASH_MASK)
292 return crypto_init_digest_ops_async(tfm);
293 else
294 return crypto_init_digest_ops(tfm);
295 288
296 case CRYPTO_ALG_TYPE_COMPRESS: 289 case CRYPTO_ALG_TYPE_COMPRESS:
297 return crypto_init_compress_ops(tfm); 290 return crypto_init_compress_ops(tfm);
@@ -318,11 +311,7 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
318 case CRYPTO_ALG_TYPE_CIPHER: 311 case CRYPTO_ALG_TYPE_CIPHER:
319 crypto_exit_cipher_ops(tfm); 312 crypto_exit_cipher_ops(tfm);
320 break; 313 break;
321 314
322 case CRYPTO_ALG_TYPE_DIGEST:
323 crypto_exit_digest_ops(tfm);
324 break;
325
326 case CRYPTO_ALG_TYPE_COMPRESS: 315 case CRYPTO_ALG_TYPE_COMPRESS:
327 crypto_exit_compress_ops(tfm); 316 crypto_exit_compress_ops(tfm);
328 break; 317 break;
@@ -349,11 +338,7 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
349 case CRYPTO_ALG_TYPE_CIPHER: 338 case CRYPTO_ALG_TYPE_CIPHER:
350 len += crypto_cipher_ctxsize(alg); 339 len += crypto_cipher_ctxsize(alg);
351 break; 340 break;
352 341
353 case CRYPTO_ALG_TYPE_DIGEST:
354 len += crypto_digest_ctxsize(alg);
355 break;
356
357 case CRYPTO_ALG_TYPE_COMPRESS: 342 case CRYPTO_ALG_TYPE_COMPRESS:
358 len += crypto_compress_ctxsize(alg); 343 len += crypto_compress_ctxsize(alg);
359 break; 344 break;
@@ -472,7 +457,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
472 int err = -ENOMEM; 457 int err = -ENOMEM;
473 458
474 tfmsize = frontend->tfmsize; 459 tfmsize = frontend->tfmsize;
475 total = tfmsize + sizeof(*tfm) + frontend->extsize(alg, frontend); 460 total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
476 461
477 mem = kzalloc(total, GFP_KERNEL); 462 mem = kzalloc(total, GFP_KERNEL);
478 if (mem == NULL) 463 if (mem == NULL)
@@ -481,7 +466,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
481 tfm = (struct crypto_tfm *)(mem + tfmsize); 466 tfm = (struct crypto_tfm *)(mem + tfmsize);
482 tfm->__crt_alg = alg; 467 tfm->__crt_alg = alg;
483 468
484 err = frontend->init_tfm(tfm, frontend); 469 err = frontend->init_tfm(tfm);
485 if (err) 470 if (err)
486 goto out_free_tfm; 471 goto out_free_tfm;
487 472
@@ -503,6 +488,27 @@ out:
503} 488}
504EXPORT_SYMBOL_GPL(crypto_create_tfm); 489EXPORT_SYMBOL_GPL(crypto_create_tfm);
505 490
491struct crypto_alg *crypto_find_alg(const char *alg_name,
492 const struct crypto_type *frontend,
493 u32 type, u32 mask)
494{
495 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask) =
496 crypto_alg_mod_lookup;
497
498 if (frontend) {
499 type &= frontend->maskclear;
500 mask &= frontend->maskclear;
501 type |= frontend->type;
502 mask |= frontend->maskset;
503
504 if (frontend->lookup)
505 lookup = frontend->lookup;
506 }
507
508 return lookup(alg_name, type, mask);
509}
510EXPORT_SYMBOL_GPL(crypto_find_alg);
511
506/* 512/*
507 * crypto_alloc_tfm - Locate algorithm and allocate transform 513 * crypto_alloc_tfm - Locate algorithm and allocate transform
508 * @alg_name: Name of algorithm 514 * @alg_name: Name of algorithm
@@ -526,21 +532,13 @@ EXPORT_SYMBOL_GPL(crypto_create_tfm);
526void *crypto_alloc_tfm(const char *alg_name, 532void *crypto_alloc_tfm(const char *alg_name,
527 const struct crypto_type *frontend, u32 type, u32 mask) 533 const struct crypto_type *frontend, u32 type, u32 mask)
528{ 534{
529 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
530 void *tfm; 535 void *tfm;
531 int err; 536 int err;
532 537
533 type &= frontend->maskclear;
534 mask &= frontend->maskclear;
535 type |= frontend->type;
536 mask |= frontend->maskset;
537
538 lookup = frontend->lookup ?: crypto_alg_mod_lookup;
539
540 for (;;) { 538 for (;;) {
541 struct crypto_alg *alg; 539 struct crypto_alg *alg;
542 540
543 alg = lookup(alg_name, type, mask); 541 alg = crypto_find_alg(alg_name, frontend, type, mask);
544 if (IS_ERR(alg)) { 542 if (IS_ERR(alg)) {
545 err = PTR_ERR(alg); 543 err = PTR_ERR(alg);
546 goto err; 544 goto err;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 5793b64c81a8..4d6f49a5daeb 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -23,24 +23,36 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25 25
26typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
27
26struct authenc_instance_ctx { 28struct authenc_instance_ctx {
27 struct crypto_spawn auth; 29 struct crypto_ahash_spawn auth;
28 struct crypto_skcipher_spawn enc; 30 struct crypto_skcipher_spawn enc;
29}; 31};
30 32
31struct crypto_authenc_ctx { 33struct crypto_authenc_ctx {
32 spinlock_t auth_lock; 34 unsigned int reqoff;
33 struct crypto_hash *auth; 35 struct crypto_ahash *auth;
34 struct crypto_ablkcipher *enc; 36 struct crypto_ablkcipher *enc;
35}; 37};
36 38
39struct authenc_request_ctx {
40 unsigned int cryptlen;
41 struct scatterlist *sg;
42 struct scatterlist asg[2];
43 struct scatterlist cipher[2];
44 crypto_completion_t complete;
45 crypto_completion_t update_complete;
46 char tail[];
47};
48
37static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, 49static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
38 unsigned int keylen) 50 unsigned int keylen)
39{ 51{
40 unsigned int authkeylen; 52 unsigned int authkeylen;
41 unsigned int enckeylen; 53 unsigned int enckeylen;
42 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 54 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
43 struct crypto_hash *auth = ctx->auth; 55 struct crypto_ahash *auth = ctx->auth;
44 struct crypto_ablkcipher *enc = ctx->enc; 56 struct crypto_ablkcipher *enc = ctx->enc;
45 struct rtattr *rta = (void *)key; 57 struct rtattr *rta = (void *)key;
46 struct crypto_authenc_key_param *param; 58 struct crypto_authenc_key_param *param;
@@ -64,11 +76,11 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
64 76
65 authkeylen = keylen - enckeylen; 77 authkeylen = keylen - enckeylen;
66 78
67 crypto_hash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); 79 crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
68 crypto_hash_set_flags(auth, crypto_aead_get_flags(authenc) & 80 crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
69 CRYPTO_TFM_REQ_MASK); 81 CRYPTO_TFM_REQ_MASK);
70 err = crypto_hash_setkey(auth, key, authkeylen); 82 err = crypto_ahash_setkey(auth, key, authkeylen);
71 crypto_aead_set_flags(authenc, crypto_hash_get_flags(auth) & 83 crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
72 CRYPTO_TFM_RES_MASK); 84 CRYPTO_TFM_RES_MASK);
73 85
74 if (err) 86 if (err)
@@ -103,40 +115,198 @@ static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
103 sg_mark_end(head); 115 sg_mark_end(head);
104} 116}
105 117
106static u8 *crypto_authenc_hash(struct aead_request *req, unsigned int flags, 118static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
107 struct scatterlist *cipher, 119 int err)
108 unsigned int cryptlen) 120{
121 struct aead_request *req = areq->data;
122 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
123 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
124 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
125 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
126
127 if (err)
128 goto out;
129
130 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
131 areq_ctx->cryptlen);
132 ahash_request_set_callback(ahreq, aead_request_flags(req) &
133 CRYPTO_TFM_REQ_MAY_SLEEP,
134 areq_ctx->complete, req);
135
136 err = crypto_ahash_finup(ahreq);
137 if (err)
138 goto out;
139
140 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
141 areq_ctx->cryptlen,
142 crypto_aead_authsize(authenc), 1);
143
144out:
145 aead_request_complete(req, err);
146}
147
148static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
149{
150 struct aead_request *req = areq->data;
151 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
152 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
153 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
154 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
155
156 if (err)
157 goto out;
158
159 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
160 areq_ctx->cryptlen,
161 crypto_aead_authsize(authenc), 1);
162
163out:
164 aead_request_complete(req, err);
165}
166
167static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
168 int err)
109{ 169{
170 u8 *ihash;
171 unsigned int authsize;
172 struct ablkcipher_request *abreq;
173 struct aead_request *req = areq->data;
110 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 174 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
111 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc); 175 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
112 struct crypto_hash *auth = ctx->auth; 176 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
113 struct hash_desc desc = { 177 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
114 .tfm = auth, 178
115 .flags = aead_request_flags(req) & flags, 179 if (err)
116 }; 180 goto out;
117 u8 *hash = aead_request_ctx(req); 181
182 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
183 areq_ctx->cryptlen);
184 ahash_request_set_callback(ahreq, aead_request_flags(req) &
185 CRYPTO_TFM_REQ_MAY_SLEEP,
186 areq_ctx->complete, req);
187
188 err = crypto_ahash_finup(ahreq);
189 if (err)
190 goto out;
191
192 authsize = crypto_aead_authsize(authenc);
193 ihash = ahreq->result + authsize;
194 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
195 authsize, 0);
196
197 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
198 if (err)
199 goto out;
200
201 abreq = aead_request_ctx(req);
202 ablkcipher_request_set_tfm(abreq, ctx->enc);
203 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
204 req->base.complete, req->base.data);
205 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
206 req->cryptlen, req->iv);
207
208 err = crypto_ablkcipher_decrypt(abreq);
209
210out:
211 aead_request_complete(req, err);
212}
213
214static void authenc_verify_ahash_done(struct crypto_async_request *areq,
215 int err)
216{
217 u8 *ihash;
218 unsigned int authsize;
219 struct ablkcipher_request *abreq;
220 struct aead_request *req = areq->data;
221 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
222 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
223 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
224 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
225
226 if (err)
227 goto out;
228
229 authsize = crypto_aead_authsize(authenc);
230 ihash = ahreq->result + authsize;
231 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
232 authsize, 0);
233
234 err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG: 0;
235 if (err)
236 goto out;
237
238 abreq = aead_request_ctx(req);
239 ablkcipher_request_set_tfm(abreq, ctx->enc);
240 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
241 req->base.complete, req->base.data);
242 ablkcipher_request_set_crypt(abreq, req->src, req->dst,
243 req->cryptlen, req->iv);
244
245 err = crypto_ablkcipher_decrypt(abreq);
246
247out:
248 aead_request_complete(req, err);
249}
250
251static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
252{
253 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
254 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
255 struct crypto_ahash *auth = ctx->auth;
256 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
257 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
258 u8 *hash = areq_ctx->tail;
118 int err; 259 int err;
119 260
120 hash = (u8 *)ALIGN((unsigned long)hash + crypto_hash_alignmask(auth), 261 hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
121 crypto_hash_alignmask(auth) + 1); 262 crypto_ahash_alignmask(auth) + 1);
263
264 ahash_request_set_tfm(ahreq, auth);
122 265
123 spin_lock_bh(&ctx->auth_lock); 266 err = crypto_ahash_init(ahreq);
124 err = crypto_hash_init(&desc);
125 if (err) 267 if (err)
126 goto auth_unlock; 268 return ERR_PTR(err);
269
270 ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
271 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
272 areq_ctx->update_complete, req);
127 273
128 err = crypto_hash_update(&desc, req->assoc, req->assoclen); 274 err = crypto_ahash_update(ahreq);
129 if (err) 275 if (err)
130 goto auth_unlock; 276 return ERR_PTR(err);
277
278 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
279 areq_ctx->cryptlen);
280 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
281 areq_ctx->complete, req);
131 282
132 err = crypto_hash_update(&desc, cipher, cryptlen); 283 err = crypto_ahash_finup(ahreq);
133 if (err) 284 if (err)
134 goto auth_unlock; 285 return ERR_PTR(err);
135 286
136 err = crypto_hash_final(&desc, hash); 287 return hash;
137auth_unlock: 288}
138 spin_unlock_bh(&ctx->auth_lock); 289
290static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
291{
292 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
293 struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
294 struct crypto_ahash *auth = ctx->auth;
295 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
296 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
297 u8 *hash = areq_ctx->tail;
298 int err;
139 299
300 hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
301 crypto_ahash_alignmask(auth) + 1);
302
303 ahash_request_set_tfm(ahreq, auth);
304 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
305 areq_ctx->cryptlen);
306 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
307 areq_ctx->complete, req);
308
309 err = crypto_ahash_digest(ahreq);
140 if (err) 310 if (err)
141 return ERR_PTR(err); 311 return ERR_PTR(err);
142 312
@@ -147,11 +317,15 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
147 unsigned int flags) 317 unsigned int flags)
148{ 318{
149 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 319 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
320 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
150 struct scatterlist *dst = req->dst; 321 struct scatterlist *dst = req->dst;
151 struct scatterlist cipher[2]; 322 struct scatterlist *assoc = req->assoc;
152 struct page *dstp; 323 struct scatterlist *cipher = areq_ctx->cipher;
324 struct scatterlist *asg = areq_ctx->asg;
153 unsigned int ivsize = crypto_aead_ivsize(authenc); 325 unsigned int ivsize = crypto_aead_ivsize(authenc);
154 unsigned int cryptlen; 326 unsigned int cryptlen = req->cryptlen;
327 authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
328 struct page *dstp;
155 u8 *vdst; 329 u8 *vdst;
156 u8 *hash; 330 u8 *hash;
157 331
@@ -163,10 +337,25 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
163 sg_set_buf(cipher, iv, ivsize); 337 sg_set_buf(cipher, iv, ivsize);
164 authenc_chain(cipher, dst, vdst == iv + ivsize); 338 authenc_chain(cipher, dst, vdst == iv + ivsize);
165 dst = cipher; 339 dst = cipher;
340 cryptlen += ivsize;
166 } 341 }
167 342
168 cryptlen = req->cryptlen + ivsize; 343 if (sg_is_last(assoc)) {
169 hash = crypto_authenc_hash(req, flags, dst, cryptlen); 344 authenc_ahash_fn = crypto_authenc_ahash;
345 sg_init_table(asg, 2);
346 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
347 authenc_chain(asg, dst, 0);
348 dst = asg;
349 cryptlen += req->assoclen;
350 }
351
352 areq_ctx->cryptlen = cryptlen;
353 areq_ctx->sg = dst;
354
355 areq_ctx->complete = authenc_geniv_ahash_done;
356 areq_ctx->update_complete = authenc_geniv_ahash_update_done;
357
358 hash = authenc_ahash_fn(req, flags);
170 if (IS_ERR(hash)) 359 if (IS_ERR(hash))
171 return PTR_ERR(hash); 360 return PTR_ERR(hash);
172 361
@@ -256,22 +445,25 @@ static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
256} 445}
257 446
258static int crypto_authenc_verify(struct aead_request *req, 447static int crypto_authenc_verify(struct aead_request *req,
259 struct scatterlist *cipher, 448 authenc_ahash_t authenc_ahash_fn)
260 unsigned int cryptlen)
261{ 449{
262 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 450 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
451 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
263 u8 *ohash; 452 u8 *ohash;
264 u8 *ihash; 453 u8 *ihash;
265 unsigned int authsize; 454 unsigned int authsize;
266 455
267 ohash = crypto_authenc_hash(req, CRYPTO_TFM_REQ_MAY_SLEEP, cipher, 456 areq_ctx->complete = authenc_verify_ahash_done;
268 cryptlen); 457 areq_ctx->complete = authenc_verify_ahash_update_done;
458
459 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
269 if (IS_ERR(ohash)) 460 if (IS_ERR(ohash))
270 return PTR_ERR(ohash); 461 return PTR_ERR(ohash);
271 462
272 authsize = crypto_aead_authsize(authenc); 463 authsize = crypto_aead_authsize(authenc);
273 ihash = ohash + authsize; 464 ihash = ohash + authsize;
274 scatterwalk_map_and_copy(ihash, cipher, cryptlen, authsize, 0); 465 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
466 authsize, 0);
275 return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0; 467 return memcmp(ihash, ohash, authsize) ? -EBADMSG: 0;
276} 468}
277 469
@@ -279,10 +471,14 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
279 unsigned int cryptlen) 471 unsigned int cryptlen)
280{ 472{
281 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 473 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
474 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
282 struct scatterlist *src = req->src; 475 struct scatterlist *src = req->src;
283 struct scatterlist cipher[2]; 476 struct scatterlist *assoc = req->assoc;
284 struct page *srcp; 477 struct scatterlist *cipher = areq_ctx->cipher;
478 struct scatterlist *asg = areq_ctx->asg;
285 unsigned int ivsize = crypto_aead_ivsize(authenc); 479 unsigned int ivsize = crypto_aead_ivsize(authenc);
480 authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
481 struct page *srcp;
286 u8 *vsrc; 482 u8 *vsrc;
287 483
288 srcp = sg_page(src); 484 srcp = sg_page(src);
@@ -293,9 +489,22 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
293 sg_set_buf(cipher, iv, ivsize); 489 sg_set_buf(cipher, iv, ivsize);
294 authenc_chain(cipher, src, vsrc == iv + ivsize); 490 authenc_chain(cipher, src, vsrc == iv + ivsize);
295 src = cipher; 491 src = cipher;
492 cryptlen += ivsize;
493 }
494
495 if (sg_is_last(assoc)) {
496 authenc_ahash_fn = crypto_authenc_ahash;
497 sg_init_table(asg, 2);
498 sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
499 authenc_chain(asg, src, 0);
500 src = asg;
501 cryptlen += req->assoclen;
296 } 502 }
297 503
298 return crypto_authenc_verify(req, src, cryptlen + ivsize); 504 areq_ctx->cryptlen = cryptlen;
505 areq_ctx->sg = src;
506
507 return crypto_authenc_verify(req, authenc_ahash_fn);
299} 508}
300 509
301static int crypto_authenc_decrypt(struct aead_request *req) 510static int crypto_authenc_decrypt(struct aead_request *req)
@@ -326,38 +535,41 @@ static int crypto_authenc_decrypt(struct aead_request *req)
326 535
327static int crypto_authenc_init_tfm(struct crypto_tfm *tfm) 536static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
328{ 537{
329 struct crypto_instance *inst = (void *)tfm->__crt_alg; 538 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
330 struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst); 539 struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
331 struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); 540 struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
332 struct crypto_hash *auth; 541 struct crypto_ahash *auth;
333 struct crypto_ablkcipher *enc; 542 struct crypto_ablkcipher *enc;
334 int err; 543 int err;
335 544
336 auth = crypto_spawn_hash(&ictx->auth); 545 auth = crypto_spawn_ahash(&ictx->auth);
337 if (IS_ERR(auth)) 546 if (IS_ERR(auth))
338 return PTR_ERR(auth); 547 return PTR_ERR(auth);
339 548
549 ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
550 crypto_ahash_alignmask(auth),
551 crypto_ahash_alignmask(auth) + 1);
552
340 enc = crypto_spawn_skcipher(&ictx->enc); 553 enc = crypto_spawn_skcipher(&ictx->enc);
341 err = PTR_ERR(enc); 554 err = PTR_ERR(enc);
342 if (IS_ERR(enc)) 555 if (IS_ERR(enc))
343 goto err_free_hash; 556 goto err_free_ahash;
344 557
345 ctx->auth = auth; 558 ctx->auth = auth;
346 ctx->enc = enc; 559 ctx->enc = enc;
560
347 tfm->crt_aead.reqsize = max_t(unsigned int, 561 tfm->crt_aead.reqsize = max_t(unsigned int,
348 (crypto_hash_alignmask(auth) & 562 crypto_ahash_reqsize(auth) + ctx->reqoff +
349 ~(crypto_tfm_ctx_alignment() - 1)) + 563 sizeof(struct authenc_request_ctx) +
350 crypto_hash_digestsize(auth) * 2, 564 sizeof(struct ahash_request),
351 sizeof(struct skcipher_givcrypt_request) + 565 sizeof(struct skcipher_givcrypt_request) +
352 crypto_ablkcipher_reqsize(enc) + 566 crypto_ablkcipher_reqsize(enc) +
353 crypto_ablkcipher_ivsize(enc)); 567 crypto_ablkcipher_ivsize(enc));
354
355 spin_lock_init(&ctx->auth_lock);
356 568
357 return 0; 569 return 0;
358 570
359err_free_hash: 571err_free_ahash:
360 crypto_free_hash(auth); 572 crypto_free_ahash(auth);
361 return err; 573 return err;
362} 574}
363 575
@@ -365,7 +577,7 @@ static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
365{ 577{
366 struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm); 578 struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
367 579
368 crypto_free_hash(ctx->auth); 580 crypto_free_ahash(ctx->auth);
369 crypto_free_ablkcipher(ctx->enc); 581 crypto_free_ablkcipher(ctx->enc);
370} 582}
371 583
@@ -373,7 +585,8 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
373{ 585{
374 struct crypto_attr_type *algt; 586 struct crypto_attr_type *algt;
375 struct crypto_instance *inst; 587 struct crypto_instance *inst;
376 struct crypto_alg *auth; 588 struct hash_alg_common *auth;
589 struct crypto_alg *auth_base;
377 struct crypto_alg *enc; 590 struct crypto_alg *enc;
378 struct authenc_instance_ctx *ctx; 591 struct authenc_instance_ctx *ctx;
379 const char *enc_name; 592 const char *enc_name;
@@ -387,11 +600,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
387 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) 600 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
388 return ERR_PTR(-EINVAL); 601 return ERR_PTR(-EINVAL);
389 602
390 auth = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH, 603 auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
391 CRYPTO_ALG_TYPE_HASH_MASK); 604 CRYPTO_ALG_TYPE_AHASH_MASK);
392 if (IS_ERR(auth)) 605 if (IS_ERR(auth))
393 return ERR_PTR(PTR_ERR(auth)); 606 return ERR_PTR(PTR_ERR(auth));
394 607
608 auth_base = &auth->base;
609
395 enc_name = crypto_attr_alg_name(tb[2]); 610 enc_name = crypto_attr_alg_name(tb[2]);
396 err = PTR_ERR(enc_name); 611 err = PTR_ERR(enc_name);
397 if (IS_ERR(enc_name)) 612 if (IS_ERR(enc_name))
@@ -404,7 +619,7 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
404 619
405 ctx = crypto_instance_ctx(inst); 620 ctx = crypto_instance_ctx(inst);
406 621
407 err = crypto_init_spawn(&ctx->auth, auth, inst, CRYPTO_ALG_TYPE_MASK); 622 err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
408 if (err) 623 if (err)
409 goto err_free_inst; 624 goto err_free_inst;
410 625
@@ -419,28 +634,25 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
419 634
420 err = -ENAMETOOLONG; 635 err = -ENAMETOOLONG;
421 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, 636 if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
422 "authenc(%s,%s)", auth->cra_name, enc->cra_name) >= 637 "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
423 CRYPTO_MAX_ALG_NAME) 638 CRYPTO_MAX_ALG_NAME)
424 goto err_drop_enc; 639 goto err_drop_enc;
425 640
426 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 641 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
427 "authenc(%s,%s)", auth->cra_driver_name, 642 "authenc(%s,%s)", auth_base->cra_driver_name,
428 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 643 enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
429 goto err_drop_enc; 644 goto err_drop_enc;
430 645
431 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; 646 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
432 inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC; 647 inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
433 inst->alg.cra_priority = enc->cra_priority * 10 + auth->cra_priority; 648 inst->alg.cra_priority = enc->cra_priority *
649 10 + auth_base->cra_priority;
434 inst->alg.cra_blocksize = enc->cra_blocksize; 650 inst->alg.cra_blocksize = enc->cra_blocksize;
435 inst->alg.cra_alignmask = auth->cra_alignmask | enc->cra_alignmask; 651 inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
436 inst->alg.cra_type = &crypto_aead_type; 652 inst->alg.cra_type = &crypto_aead_type;
437 653
438 inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize; 654 inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
439 inst->alg.cra_aead.maxauthsize = auth->cra_type == &crypto_hash_type ? 655 inst->alg.cra_aead.maxauthsize = auth->digestsize;
440 auth->cra_hash.digestsize :
441 auth->cra_type ?
442 __crypto_shash_alg(auth)->digestsize :
443 auth->cra_digest.dia_digestsize;
444 656
445 inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx); 657 inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
446 658
@@ -453,13 +665,13 @@ static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
453 inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt; 665 inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
454 666
455out: 667out:
456 crypto_mod_put(auth); 668 crypto_mod_put(auth_base);
457 return inst; 669 return inst;
458 670
459err_drop_enc: 671err_drop_enc:
460 crypto_drop_skcipher(&ctx->enc); 672 crypto_drop_skcipher(&ctx->enc);
461err_drop_auth: 673err_drop_auth:
462 crypto_drop_spawn(&ctx->auth); 674 crypto_drop_ahash(&ctx->auth);
463err_free_inst: 675err_free_inst:
464 kfree(inst); 676 kfree(inst);
465out_put_auth: 677out_put_auth:
@@ -472,7 +684,7 @@ static void crypto_authenc_free(struct crypto_instance *inst)
472 struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst); 684 struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
473 685
474 crypto_drop_skcipher(&ctx->enc); 686 crypto_drop_skcipher(&ctx->enc);
475 crypto_drop_spawn(&ctx->auth); 687 crypto_drop_ahash(&ctx->auth);
476 kfree(inst); 688 kfree(inst);
477} 689}
478 690
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index ae5fa99d5d36..35335825a4ef 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,11 @@ struct cryptd_instance_ctx {
39 struct cryptd_queue *queue; 39 struct cryptd_queue *queue;
40}; 40};
41 41
42struct hashd_instance_ctx {
43 struct crypto_shash_spawn spawn;
44 struct cryptd_queue *queue;
45};
46
42struct cryptd_blkcipher_ctx { 47struct cryptd_blkcipher_ctx {
43 struct crypto_blkcipher *child; 48 struct crypto_blkcipher *child;
44}; 49};
@@ -48,11 +53,12 @@ struct cryptd_blkcipher_request_ctx {
48}; 53};
49 54
50struct cryptd_hash_ctx { 55struct cryptd_hash_ctx {
51 struct crypto_hash *child; 56 struct crypto_shash *child;
52}; 57};
53 58
54struct cryptd_hash_request_ctx { 59struct cryptd_hash_request_ctx {
55 crypto_completion_t complete; 60 crypto_completion_t complete;
61 struct shash_desc desc;
56}; 62};
57 63
58static void cryptd_queue_worker(struct work_struct *work); 64static void cryptd_queue_worker(struct work_struct *work);
@@ -249,32 +255,24 @@ static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
249 crypto_free_blkcipher(ctx->child); 255 crypto_free_blkcipher(ctx->child);
250} 256}
251 257
252static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, 258static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
253 struct cryptd_queue *queue) 259 unsigned int tail)
254{ 260{
261 char *p;
255 struct crypto_instance *inst; 262 struct crypto_instance *inst;
256 struct cryptd_instance_ctx *ctx;
257 int err; 263 int err;
258 264
259 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 265 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
260 if (!inst) { 266 if (!p)
261 inst = ERR_PTR(-ENOMEM); 267 return ERR_PTR(-ENOMEM);
262 goto out; 268
263 } 269 inst = (void *)(p + head);
264 270
265 err = -ENAMETOOLONG; 271 err = -ENAMETOOLONG;
266 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 272 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
267 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) 273 "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
268 goto out_free_inst; 274 goto out_free_inst;
269 275
270 ctx = crypto_instance_ctx(inst);
271 err = crypto_init_spawn(&ctx->spawn, alg, inst,
272 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
273 if (err)
274 goto out_free_inst;
275
276 ctx->queue = queue;
277
278 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); 276 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
279 277
280 inst->alg.cra_priority = alg->cra_priority + 50; 278 inst->alg.cra_priority = alg->cra_priority + 50;
@@ -282,29 +280,41 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
282 inst->alg.cra_alignmask = alg->cra_alignmask; 280 inst->alg.cra_alignmask = alg->cra_alignmask;
283 281
284out: 282out:
285 return inst; 283 return p;
286 284
287out_free_inst: 285out_free_inst:
288 kfree(inst); 286 kfree(p);
289 inst = ERR_PTR(err); 287 p = ERR_PTR(err);
290 goto out; 288 goto out;
291} 289}
292 290
293static struct crypto_instance *cryptd_alloc_blkcipher( 291static int cryptd_create_blkcipher(struct crypto_template *tmpl,
294 struct rtattr **tb, struct cryptd_queue *queue) 292 struct rtattr **tb,
293 struct cryptd_queue *queue)
295{ 294{
295 struct cryptd_instance_ctx *ctx;
296 struct crypto_instance *inst; 296 struct crypto_instance *inst;
297 struct crypto_alg *alg; 297 struct crypto_alg *alg;
298 int err;
298 299
299 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, 300 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
300 CRYPTO_ALG_TYPE_MASK); 301 CRYPTO_ALG_TYPE_MASK);
301 if (IS_ERR(alg)) 302 if (IS_ERR(alg))
302 return ERR_CAST(alg); 303 return PTR_ERR(alg);
303 304
304 inst = cryptd_alloc_instance(alg, queue); 305 inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
306 err = PTR_ERR(inst);
305 if (IS_ERR(inst)) 307 if (IS_ERR(inst))
306 goto out_put_alg; 308 goto out_put_alg;
307 309
310 ctx = crypto_instance_ctx(inst);
311 ctx->queue = queue;
312
313 err = crypto_init_spawn(&ctx->spawn, alg, inst,
314 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
315 if (err)
316 goto out_free_inst;
317
308 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 318 inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
309 inst->alg.cra_type = &crypto_ablkcipher_type; 319 inst->alg.cra_type = &crypto_ablkcipher_type;
310 320
@@ -323,26 +333,34 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
323 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; 333 inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
324 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; 334 inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
325 335
336 err = crypto_register_instance(tmpl, inst);
337 if (err) {
338 crypto_drop_spawn(&ctx->spawn);
339out_free_inst:
340 kfree(inst);
341 }
342
326out_put_alg: 343out_put_alg:
327 crypto_mod_put(alg); 344 crypto_mod_put(alg);
328 return inst; 345 return err;
329} 346}
330 347
331static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) 348static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
332{ 349{
333 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); 350 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
334 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); 351 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
335 struct crypto_spawn *spawn = &ictx->spawn; 352 struct crypto_shash_spawn *spawn = &ictx->spawn;
336 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 353 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
337 struct crypto_hash *cipher; 354 struct crypto_shash *hash;
338 355
339 cipher = crypto_spawn_hash(spawn); 356 hash = crypto_spawn_shash(spawn);
340 if (IS_ERR(cipher)) 357 if (IS_ERR(hash))
341 return PTR_ERR(cipher); 358 return PTR_ERR(hash);
342 359
343 ctx->child = cipher; 360 ctx->child = hash;
344 tfm->crt_ahash.reqsize = 361 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
345 sizeof(struct cryptd_hash_request_ctx); 362 sizeof(struct cryptd_hash_request_ctx) +
363 crypto_shash_descsize(hash));
346 return 0; 364 return 0;
347} 365}
348 366
@@ -350,22 +368,22 @@ static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
350{ 368{
351 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); 369 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
352 370
353 crypto_free_hash(ctx->child); 371 crypto_free_shash(ctx->child);
354} 372}
355 373
356static int cryptd_hash_setkey(struct crypto_ahash *parent, 374static int cryptd_hash_setkey(struct crypto_ahash *parent,
357 const u8 *key, unsigned int keylen) 375 const u8 *key, unsigned int keylen)
358{ 376{
359 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); 377 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
360 struct crypto_hash *child = ctx->child; 378 struct crypto_shash *child = ctx->child;
361 int err; 379 int err;
362 380
363 crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); 381 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
364 crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & 382 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
365 CRYPTO_TFM_REQ_MASK); 383 CRYPTO_TFM_REQ_MASK);
366 err = crypto_hash_setkey(child, key, keylen); 384 err = crypto_shash_setkey(child, key, keylen);
367 crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & 385 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
368 CRYPTO_TFM_RES_MASK); 386 CRYPTO_TFM_RES_MASK);
369 return err; 387 return err;
370} 388}
371 389
@@ -385,21 +403,19 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
385 403
386static void cryptd_hash_init(struct crypto_async_request *req_async, int err) 404static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
387{ 405{
388 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 406 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
389 struct crypto_hash *child = ctx->child; 407 struct crypto_shash *child = ctx->child;
390 struct ahash_request *req = ahash_request_cast(req_async); 408 struct ahash_request *req = ahash_request_cast(req_async);
391 struct cryptd_hash_request_ctx *rctx; 409 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
392 struct hash_desc desc; 410 struct shash_desc *desc = &rctx->desc;
393
394 rctx = ahash_request_ctx(req);
395 411
396 if (unlikely(err == -EINPROGRESS)) 412 if (unlikely(err == -EINPROGRESS))
397 goto out; 413 goto out;
398 414
399 desc.tfm = child; 415 desc->tfm = child;
400 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 416 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
401 417
402 err = crypto_hash_crt(child)->init(&desc); 418 err = crypto_shash_init(desc);
403 419
404 req->base.complete = rctx->complete; 420 req->base.complete = rctx->complete;
405 421
@@ -416,23 +432,15 @@ static int cryptd_hash_init_enqueue(struct ahash_request *req)
416 432
417static void cryptd_hash_update(struct crypto_async_request *req_async, int err) 433static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
418{ 434{
419 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 435 struct ahash_request *req = ahash_request_cast(req_async);
420 struct crypto_hash *child = ctx->child;
421 struct ahash_request *req = ahash_request_cast(req_async);
422 struct cryptd_hash_request_ctx *rctx; 436 struct cryptd_hash_request_ctx *rctx;
423 struct hash_desc desc;
424 437
425 rctx = ahash_request_ctx(req); 438 rctx = ahash_request_ctx(req);
426 439
427 if (unlikely(err == -EINPROGRESS)) 440 if (unlikely(err == -EINPROGRESS))
428 goto out; 441 goto out;
429 442
430 desc.tfm = child; 443 err = shash_ahash_update(req, &rctx->desc);
431 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
432
433 err = crypto_hash_crt(child)->update(&desc,
434 req->src,
435 req->nbytes);
436 444
437 req->base.complete = rctx->complete; 445 req->base.complete = rctx->complete;
438 446
@@ -449,21 +457,13 @@ static int cryptd_hash_update_enqueue(struct ahash_request *req)
449 457
450static void cryptd_hash_final(struct crypto_async_request *req_async, int err) 458static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
451{ 459{
452 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 460 struct ahash_request *req = ahash_request_cast(req_async);
453 struct crypto_hash *child = ctx->child; 461 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
454 struct ahash_request *req = ahash_request_cast(req_async);
455 struct cryptd_hash_request_ctx *rctx;
456 struct hash_desc desc;
457
458 rctx = ahash_request_ctx(req);
459 462
460 if (unlikely(err == -EINPROGRESS)) 463 if (unlikely(err == -EINPROGRESS))
461 goto out; 464 goto out;
462 465
463 desc.tfm = child; 466 err = crypto_shash_final(&rctx->desc, req->result);
464 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
465
466 err = crypto_hash_crt(child)->final(&desc, req->result);
467 467
468 req->base.complete = rctx->complete; 468 req->base.complete = rctx->complete;
469 469
@@ -478,26 +478,44 @@ static int cryptd_hash_final_enqueue(struct ahash_request *req)
478 return cryptd_hash_enqueue(req, cryptd_hash_final); 478 return cryptd_hash_enqueue(req, cryptd_hash_final);
479} 479}
480 480
481static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) 481static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
482{ 482{
483 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); 483 struct ahash_request *req = ahash_request_cast(req_async);
484 struct crypto_hash *child = ctx->child; 484 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
485 struct ahash_request *req = ahash_request_cast(req_async);
486 struct cryptd_hash_request_ctx *rctx;
487 struct hash_desc desc;
488 485
489 rctx = ahash_request_ctx(req); 486 if (unlikely(err == -EINPROGRESS))
487 goto out;
488
489 err = shash_ahash_finup(req, &rctx->desc);
490
491 req->base.complete = rctx->complete;
492
493out:
494 local_bh_disable();
495 rctx->complete(&req->base, err);
496 local_bh_enable();
497}
498
499static int cryptd_hash_finup_enqueue(struct ahash_request *req)
500{
501 return cryptd_hash_enqueue(req, cryptd_hash_finup);
502}
503
504static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
505{
506 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
507 struct crypto_shash *child = ctx->child;
508 struct ahash_request *req = ahash_request_cast(req_async);
509 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
510 struct shash_desc *desc = &rctx->desc;
490 511
491 if (unlikely(err == -EINPROGRESS)) 512 if (unlikely(err == -EINPROGRESS))
492 goto out; 513 goto out;
493 514
494 desc.tfm = child; 515 desc->tfm = child;
495 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 516 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
496 517
497 err = crypto_hash_crt(child)->digest(&desc, 518 err = shash_ahash_digest(req, desc);
498 req->src,
499 req->nbytes,
500 req->result);
501 519
502 req->base.complete = rctx->complete; 520 req->base.complete = rctx->complete;
503 521
@@ -512,64 +530,108 @@ static int cryptd_hash_digest_enqueue(struct ahash_request *req)
512 return cryptd_hash_enqueue(req, cryptd_hash_digest); 530 return cryptd_hash_enqueue(req, cryptd_hash_digest);
513} 531}
514 532
515static struct crypto_instance *cryptd_alloc_hash( 533static int cryptd_hash_export(struct ahash_request *req, void *out)
516 struct rtattr **tb, struct cryptd_queue *queue)
517{ 534{
518 struct crypto_instance *inst; 535 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
536
537 return crypto_shash_export(&rctx->desc, out);
538}
539
540static int cryptd_hash_import(struct ahash_request *req, const void *in)
541{
542 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
543
544 return crypto_shash_import(&rctx->desc, in);
545}
546
547static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
548 struct cryptd_queue *queue)
549{
550 struct hashd_instance_ctx *ctx;
551 struct ahash_instance *inst;
552 struct shash_alg *salg;
519 struct crypto_alg *alg; 553 struct crypto_alg *alg;
554 int err;
520 555
521 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, 556 salg = shash_attr_alg(tb[1], 0, 0);
522 CRYPTO_ALG_TYPE_HASH_MASK); 557 if (IS_ERR(salg))
523 if (IS_ERR(alg)) 558 return PTR_ERR(salg);
524 return ERR_PTR(PTR_ERR(alg));
525 559
526 inst = cryptd_alloc_instance(alg, queue); 560 alg = &salg->base;
561 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
562 sizeof(*ctx));
563 err = PTR_ERR(inst);
527 if (IS_ERR(inst)) 564 if (IS_ERR(inst))
528 goto out_put_alg; 565 goto out_put_alg;
529 566
530 inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; 567 ctx = ahash_instance_ctx(inst);
531 inst->alg.cra_type = &crypto_ahash_type; 568 ctx->queue = queue;
532 569
533 inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; 570 err = crypto_init_shash_spawn(&ctx->spawn, salg,
534 inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); 571 ahash_crypto_instance(inst));
572 if (err)
573 goto out_free_inst;
535 574
536 inst->alg.cra_init = cryptd_hash_init_tfm; 575 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
537 inst->alg.cra_exit = cryptd_hash_exit_tfm;
538 576
539 inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; 577 inst->alg.halg.digestsize = salg->digestsize;
540 inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; 578 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
541 inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; 579
542 inst->alg.cra_ahash.setkey = cryptd_hash_setkey; 580 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
543 inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; 581 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
582
583 inst->alg.init = cryptd_hash_init_enqueue;
584 inst->alg.update = cryptd_hash_update_enqueue;
585 inst->alg.final = cryptd_hash_final_enqueue;
586 inst->alg.finup = cryptd_hash_finup_enqueue;
587 inst->alg.export = cryptd_hash_export;
588 inst->alg.import = cryptd_hash_import;
589 inst->alg.setkey = cryptd_hash_setkey;
590 inst->alg.digest = cryptd_hash_digest_enqueue;
591
592 err = ahash_register_instance(tmpl, inst);
593 if (err) {
594 crypto_drop_shash(&ctx->spawn);
595out_free_inst:
596 kfree(inst);
597 }
544 598
545out_put_alg: 599out_put_alg:
546 crypto_mod_put(alg); 600 crypto_mod_put(alg);
547 return inst; 601 return err;
548} 602}
549 603
550static struct cryptd_queue queue; 604static struct cryptd_queue queue;
551 605
552static struct crypto_instance *cryptd_alloc(struct rtattr **tb) 606static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
553{ 607{
554 struct crypto_attr_type *algt; 608 struct crypto_attr_type *algt;
555 609
556 algt = crypto_get_attr_type(tb); 610 algt = crypto_get_attr_type(tb);
557 if (IS_ERR(algt)) 611 if (IS_ERR(algt))
558 return ERR_CAST(algt); 612 return PTR_ERR(algt);
559 613
560 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { 614 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
561 case CRYPTO_ALG_TYPE_BLKCIPHER: 615 case CRYPTO_ALG_TYPE_BLKCIPHER:
562 return cryptd_alloc_blkcipher(tb, &queue); 616 return cryptd_create_blkcipher(tmpl, tb, &queue);
563 case CRYPTO_ALG_TYPE_DIGEST: 617 case CRYPTO_ALG_TYPE_DIGEST:
564 return cryptd_alloc_hash(tb, &queue); 618 return cryptd_create_hash(tmpl, tb, &queue);
565 } 619 }
566 620
567 return ERR_PTR(-EINVAL); 621 return -EINVAL;
568} 622}
569 623
570static void cryptd_free(struct crypto_instance *inst) 624static void cryptd_free(struct crypto_instance *inst)
571{ 625{
572 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); 626 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
627 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
628
629 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
630 case CRYPTO_ALG_TYPE_AHASH:
631 crypto_drop_shash(&hctx->spawn);
632 kfree(ahash_instance(inst));
633 return;
634 }
573 635
574 crypto_drop_spawn(&ctx->spawn); 636 crypto_drop_spawn(&ctx->spawn);
575 kfree(inst); 637 kfree(inst);
@@ -577,7 +639,7 @@ static void cryptd_free(struct crypto_instance *inst)
577 639
578static struct crypto_template cryptd_tmpl = { 640static struct crypto_template cryptd_tmpl = {
579 .name = "cryptd", 641 .name = "cryptd",
580 .alloc = cryptd_alloc, 642 .create = cryptd_create,
581 .free = cryptd_free, 643 .free = cryptd_free,
582 .module = THIS_MODULE, 644 .module = THIS_MODULE,
583}; 645};
@@ -620,6 +682,41 @@ void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
620} 682}
621EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); 683EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
622 684
685struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
686 u32 type, u32 mask)
687{
688 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
689 struct crypto_ahash *tfm;
690
691 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
692 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
693 return ERR_PTR(-EINVAL);
694 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
695 if (IS_ERR(tfm))
696 return ERR_CAST(tfm);
697 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
698 crypto_free_ahash(tfm);
699 return ERR_PTR(-EINVAL);
700 }
701
702 return __cryptd_ahash_cast(tfm);
703}
704EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
705
706struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
707{
708 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
709
710 return ctx->child;
711}
712EXPORT_SYMBOL_GPL(cryptd_ahash_child);
713
714void cryptd_free_ahash(struct cryptd_ahash *tfm)
715{
716 crypto_free_ahash(&tfm->base);
717}
718EXPORT_SYMBOL_GPL(cryptd_free_ahash);
719
623static int __init cryptd_init(void) 720static int __init cryptd_init(void)
624{ 721{
625 int err; 722 int err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 2d7425f0e7b8..6c3bfabb9d1d 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -219,6 +219,8 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
219 inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; 219 inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
220 inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; 220 inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
221 221
222 inst->alg.cra_blkcipher.geniv = "chainiv";
223
222out: 224out:
223 crypto_mod_put(alg); 225 crypto_mod_put(alg);
224 return inst; 226 return inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e70afd0c73dd..5fc3292483ef 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -11,7 +11,10 @@
11#include <crypto/gf128mul.h> 11#include <crypto/gf128mul.h>
12#include <crypto/internal/aead.h> 12#include <crypto/internal/aead.h>
13#include <crypto/internal/skcipher.h> 13#include <crypto/internal/skcipher.h>
14#include <crypto/internal/hash.h>
14#include <crypto/scatterwalk.h> 15#include <crypto/scatterwalk.h>
16#include <crypto/hash.h>
17#include "internal.h"
15#include <linux/completion.h> 18#include <linux/completion.h>
16#include <linux/err.h> 19#include <linux/err.h>
17#include <linux/init.h> 20#include <linux/init.h>
@@ -21,11 +24,12 @@
21 24
22struct gcm_instance_ctx { 25struct gcm_instance_ctx {
23 struct crypto_skcipher_spawn ctr; 26 struct crypto_skcipher_spawn ctr;
27 struct crypto_ahash_spawn ghash;
24}; 28};
25 29
26struct crypto_gcm_ctx { 30struct crypto_gcm_ctx {
27 struct crypto_ablkcipher *ctr; 31 struct crypto_ablkcipher *ctr;
28 struct gf128mul_4k *gf128; 32 struct crypto_ahash *ghash;
29}; 33};
30 34
31struct crypto_rfc4106_ctx { 35struct crypto_rfc4106_ctx {
@@ -34,10 +38,9 @@ struct crypto_rfc4106_ctx {
34}; 38};
35 39
36struct crypto_gcm_ghash_ctx { 40struct crypto_gcm_ghash_ctx {
37 u32 bytes; 41 unsigned int cryptlen;
38 u32 flags; 42 struct scatterlist *src;
39 struct gf128mul_4k *gf128; 43 crypto_completion_t complete;
40 u8 buffer[16];
41}; 44};
42 45
43struct crypto_gcm_req_priv_ctx { 46struct crypto_gcm_req_priv_ctx {
@@ -45,8 +48,11 @@ struct crypto_gcm_req_priv_ctx {
45 u8 iauth_tag[16]; 48 u8 iauth_tag[16];
46 struct scatterlist src[2]; 49 struct scatterlist src[2];
47 struct scatterlist dst[2]; 50 struct scatterlist dst[2];
48 struct crypto_gcm_ghash_ctx ghash; 51 struct crypto_gcm_ghash_ctx ghash_ctx;
49 struct ablkcipher_request abreq; 52 union {
53 struct ahash_request ahreq;
54 struct ablkcipher_request abreq;
55 } u;
50}; 56};
51 57
52struct crypto_gcm_setkey_result { 58struct crypto_gcm_setkey_result {
@@ -54,6 +60,8 @@ struct crypto_gcm_setkey_result {
54 struct completion completion; 60 struct completion completion;
55}; 61};
56 62
63static void *gcm_zeroes;
64
57static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( 65static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
58 struct aead_request *req) 66 struct aead_request *req)
59{ 67{
@@ -62,113 +70,6 @@ static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
62 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); 70 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
63} 71}
64 72
65static void crypto_gcm_ghash_init(struct crypto_gcm_ghash_ctx *ctx, u32 flags,
66 struct gf128mul_4k *gf128)
67{
68 ctx->bytes = 0;
69 ctx->flags = flags;
70 ctx->gf128 = gf128;
71 memset(ctx->buffer, 0, 16);
72}
73
74static void crypto_gcm_ghash_update(struct crypto_gcm_ghash_ctx *ctx,
75 const u8 *src, unsigned int srclen)
76{
77 u8 *dst = ctx->buffer;
78
79 if (ctx->bytes) {
80 int n = min(srclen, ctx->bytes);
81 u8 *pos = dst + (16 - ctx->bytes);
82
83 ctx->bytes -= n;
84 srclen -= n;
85
86 while (n--)
87 *pos++ ^= *src++;
88
89 if (!ctx->bytes)
90 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
91 }
92
93 while (srclen >= 16) {
94 crypto_xor(dst, src, 16);
95 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
96 src += 16;
97 srclen -= 16;
98 }
99
100 if (srclen) {
101 ctx->bytes = 16 - srclen;
102 while (srclen--)
103 *dst++ ^= *src++;
104 }
105}
106
107static void crypto_gcm_ghash_update_sg(struct crypto_gcm_ghash_ctx *ctx,
108 struct scatterlist *sg, int len)
109{
110 struct scatter_walk walk;
111 u8 *src;
112 int n;
113
114 if (!len)
115 return;
116
117 scatterwalk_start(&walk, sg);
118
119 while (len) {
120 n = scatterwalk_clamp(&walk, len);
121
122 if (!n) {
123 scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
124 n = scatterwalk_clamp(&walk, len);
125 }
126
127 src = scatterwalk_map(&walk, 0);
128
129 crypto_gcm_ghash_update(ctx, src, n);
130 len -= n;
131
132 scatterwalk_unmap(src, 0);
133 scatterwalk_advance(&walk, n);
134 scatterwalk_done(&walk, 0, len);
135 if (len)
136 crypto_yield(ctx->flags);
137 }
138}
139
140static void crypto_gcm_ghash_flush(struct crypto_gcm_ghash_ctx *ctx)
141{
142 u8 *dst = ctx->buffer;
143
144 if (ctx->bytes) {
145 u8 *tmp = dst + (16 - ctx->bytes);
146
147 while (ctx->bytes--)
148 *tmp++ ^= 0;
149
150 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
151 }
152
153 ctx->bytes = 0;
154}
155
156static void crypto_gcm_ghash_final_xor(struct crypto_gcm_ghash_ctx *ctx,
157 unsigned int authlen,
158 unsigned int cryptlen, u8 *dst)
159{
160 u8 *buf = ctx->buffer;
161 u128 lengths;
162
163 lengths.a = cpu_to_be64(authlen * 8);
164 lengths.b = cpu_to_be64(cryptlen * 8);
165
166 crypto_gcm_ghash_flush(ctx);
167 crypto_xor(buf, (u8 *)&lengths, 16);
168 gf128mul_4k_lle((be128 *)buf, ctx->gf128);
169 crypto_xor(dst, buf, 16);
170}
171
172static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) 73static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err)
173{ 74{
174 struct crypto_gcm_setkey_result *result = req->data; 75 struct crypto_gcm_setkey_result *result = req->data;
@@ -184,6 +85,7 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
184 unsigned int keylen) 85 unsigned int keylen)
185{ 86{
186 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 87 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
88 struct crypto_ahash *ghash = ctx->ghash;
187 struct crypto_ablkcipher *ctr = ctx->ctr; 89 struct crypto_ablkcipher *ctr = ctx->ctr;
188 struct { 90 struct {
189 be128 hash; 91 be128 hash;
@@ -233,13 +135,12 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
233 if (err) 135 if (err)
234 goto out; 136 goto out;
235 137
236 if (ctx->gf128 != NULL) 138 crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
237 gf128mul_free_4k(ctx->gf128); 139 crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
238 140 CRYPTO_TFM_REQ_MASK);
239 ctx->gf128 = gf128mul_init_4k_lle(&data->hash); 141 err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
240 142 crypto_aead_set_flags(aead, crypto_ahash_get_flags(ghash) &
241 if (ctx->gf128 == NULL) 143 CRYPTO_TFM_RES_MASK);
242 err = -ENOMEM;
243 144
244out: 145out:
245 kfree(data); 146 kfree(data);
@@ -272,8 +173,6 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
272 struct crypto_aead *aead = crypto_aead_reqtfm(req); 173 struct crypto_aead *aead = crypto_aead_reqtfm(req);
273 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); 174 struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
274 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 175 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
275 u32 flags = req->base.tfm->crt_flags;
276 struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
277 struct scatterlist *dst; 176 struct scatterlist *dst;
278 __be32 counter = cpu_to_be32(1); 177 __be32 counter = cpu_to_be32(1);
279 178
@@ -296,108 +195,398 @@ static void crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req,
296 ablkcipher_request_set_crypt(ablk_req, pctx->src, dst, 195 ablkcipher_request_set_crypt(ablk_req, pctx->src, dst,
297 cryptlen + sizeof(pctx->auth_tag), 196 cryptlen + sizeof(pctx->auth_tag),
298 req->iv); 197 req->iv);
198}
199
200static inline unsigned int gcm_remain(unsigned int len)
201{
202 len &= 0xfU;
203 return len ? 16 - len : 0;
204}
205
206static void gcm_hash_len_done(struct crypto_async_request *areq, int err);
207static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
299 208
300 crypto_gcm_ghash_init(ghash, flags, ctx->gf128); 209static int gcm_hash_update(struct aead_request *req,
210 struct crypto_gcm_req_priv_ctx *pctx,
211 crypto_completion_t complete,
212 struct scatterlist *src,
213 unsigned int len)
214{
215 struct ahash_request *ahreq = &pctx->u.ahreq;
301 216
302 crypto_gcm_ghash_update_sg(ghash, req->assoc, req->assoclen); 217 ahash_request_set_callback(ahreq, aead_request_flags(req),
303 crypto_gcm_ghash_flush(ghash); 218 complete, req);
219 ahash_request_set_crypt(ahreq, src, NULL, len);
220
221 return crypto_ahash_update(ahreq);
304} 222}
305 223
306static int crypto_gcm_hash(struct aead_request *req) 224static int gcm_hash_remain(struct aead_request *req,
225 struct crypto_gcm_req_priv_ctx *pctx,
226 unsigned int remain,
227 crypto_completion_t complete)
307{ 228{
308 struct crypto_aead *aead = crypto_aead_reqtfm(req); 229 struct ahash_request *ahreq = &pctx->u.ahreq;
230
231 ahash_request_set_callback(ahreq, aead_request_flags(req),
232 complete, req);
233 sg_init_one(pctx->src, gcm_zeroes, remain);
234 ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
235
236 return crypto_ahash_update(ahreq);
237}
238
239static int gcm_hash_len(struct aead_request *req,
240 struct crypto_gcm_req_priv_ctx *pctx)
241{
242 struct ahash_request *ahreq = &pctx->u.ahreq;
243 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
244 u128 lengths;
245
246 lengths.a = cpu_to_be64(req->assoclen * 8);
247 lengths.b = cpu_to_be64(gctx->cryptlen * 8);
248 memcpy(pctx->iauth_tag, &lengths, 16);
249 sg_init_one(pctx->src, pctx->iauth_tag, 16);
250 ahash_request_set_callback(ahreq, aead_request_flags(req),
251 gcm_hash_len_done, req);
252 ahash_request_set_crypt(ahreq, pctx->src,
253 NULL, sizeof(lengths));
254
255 return crypto_ahash_update(ahreq);
256}
257
258static int gcm_hash_final(struct aead_request *req,
259 struct crypto_gcm_req_priv_ctx *pctx)
260{
261 struct ahash_request *ahreq = &pctx->u.ahreq;
262
263 ahash_request_set_callback(ahreq, aead_request_flags(req),
264 gcm_hash_final_done, req);
265 ahash_request_set_crypt(ahreq, NULL, pctx->iauth_tag, 0);
266
267 return crypto_ahash_final(ahreq);
268}
269
270static void gcm_hash_final_done(struct crypto_async_request *areq,
271 int err)
272{
273 struct aead_request *req = areq->data;
309 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 274 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
310 u8 *auth_tag = pctx->auth_tag; 275 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
311 struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; 276
277 if (!err)
278 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
312 279
313 crypto_gcm_ghash_update_sg(ghash, req->dst, req->cryptlen); 280 gctx->complete(areq, err);
314 crypto_gcm_ghash_final_xor(ghash, req->assoclen, req->cryptlen, 281}
315 auth_tag); 282
283static void gcm_hash_len_done(struct crypto_async_request *areq,
284 int err)
285{
286 struct aead_request *req = areq->data;
287 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
288
289 if (!err) {
290 err = gcm_hash_final(req, pctx);
291 if (err == -EINPROGRESS || err == -EBUSY)
292 return;
293 }
294
295 gcm_hash_final_done(areq, err);
296}
297
298static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
299 int err)
300{
301 struct aead_request *req = areq->data;
302 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
303
304 if (!err) {
305 err = gcm_hash_len(req, pctx);
306 if (err == -EINPROGRESS || err == -EBUSY)
307 return;
308 }
309
310 gcm_hash_len_done(areq, err);
311}
312
313static void gcm_hash_crypt_done(struct crypto_async_request *areq,
314 int err)
315{
316 struct aead_request *req = areq->data;
317 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
318 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
319 unsigned int remain;
320
321 if (!err) {
322 remain = gcm_remain(gctx->cryptlen);
323 BUG_ON(!remain);
324 err = gcm_hash_remain(req, pctx, remain,
325 gcm_hash_crypt_remain_done);
326 if (err == -EINPROGRESS || err == -EBUSY)
327 return;
328 }
329
330 gcm_hash_crypt_remain_done(areq, err);
331}
332
333static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
334 int err)
335{
336 struct aead_request *req = areq->data;
337 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
338 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
339 crypto_completion_t complete;
340 unsigned int remain = 0;
341
342 if (!err && gctx->cryptlen) {
343 remain = gcm_remain(gctx->cryptlen);
344 complete = remain ? gcm_hash_crypt_done :
345 gcm_hash_crypt_remain_done;
346 err = gcm_hash_update(req, pctx, complete,
347 gctx->src, gctx->cryptlen);
348 if (err == -EINPROGRESS || err == -EBUSY)
349 return;
350 }
351
352 if (remain)
353 gcm_hash_crypt_done(areq, err);
354 else
355 gcm_hash_crypt_remain_done(areq, err);
356}
357
358static void gcm_hash_assoc_done(struct crypto_async_request *areq,
359 int err)
360{
361 struct aead_request *req = areq->data;
362 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
363 unsigned int remain;
364
365 if (!err) {
366 remain = gcm_remain(req->assoclen);
367 BUG_ON(!remain);
368 err = gcm_hash_remain(req, pctx, remain,
369 gcm_hash_assoc_remain_done);
370 if (err == -EINPROGRESS || err == -EBUSY)
371 return;
372 }
373
374 gcm_hash_assoc_remain_done(areq, err);
375}
376
377static void gcm_hash_init_done(struct crypto_async_request *areq,
378 int err)
379{
380 struct aead_request *req = areq->data;
381 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
382 crypto_completion_t complete;
383 unsigned int remain = 0;
384
385 if (!err && req->assoclen) {
386 remain = gcm_remain(req->assoclen);
387 complete = remain ? gcm_hash_assoc_done :
388 gcm_hash_assoc_remain_done;
389 err = gcm_hash_update(req, pctx, complete,
390 req->assoc, req->assoclen);
391 if (err == -EINPROGRESS || err == -EBUSY)
392 return;
393 }
394
395 if (remain)
396 gcm_hash_assoc_done(areq, err);
397 else
398 gcm_hash_assoc_remain_done(areq, err);
399}
400
401static int gcm_hash(struct aead_request *req,
402 struct crypto_gcm_req_priv_ctx *pctx)
403{
404 struct ahash_request *ahreq = &pctx->u.ahreq;
405 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
406 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
407 unsigned int remain;
408 crypto_completion_t complete;
409 int err;
410
411 ahash_request_set_tfm(ahreq, ctx->ghash);
412
413 ahash_request_set_callback(ahreq, aead_request_flags(req),
414 gcm_hash_init_done, req);
415 err = crypto_ahash_init(ahreq);
416 if (err)
417 return err;
418 remain = gcm_remain(req->assoclen);
419 complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
420 err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
421 if (err)
422 return err;
423 if (remain) {
424 err = gcm_hash_remain(req, pctx, remain,
425 gcm_hash_assoc_remain_done);
426 if (err)
427 return err;
428 }
429 remain = gcm_remain(gctx->cryptlen);
430 complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
431 err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
432 if (err)
433 return err;
434 if (remain) {
435 err = gcm_hash_remain(req, pctx, remain,
436 gcm_hash_crypt_remain_done);
437 if (err)
438 return err;
439 }
440 err = gcm_hash_len(req, pctx);
441 if (err)
442 return err;
443 err = gcm_hash_final(req, pctx);
444 if (err)
445 return err;
446
447 return 0;
448}
449
450static void gcm_enc_copy_hash(struct aead_request *req,
451 struct crypto_gcm_req_priv_ctx *pctx)
452{
453 struct crypto_aead *aead = crypto_aead_reqtfm(req);
454 u8 *auth_tag = pctx->auth_tag;
316 455
317 scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, 456 scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen,
318 crypto_aead_authsize(aead), 1); 457 crypto_aead_authsize(aead), 1);
319 return 0;
320} 458}
321 459
322static void crypto_gcm_encrypt_done(struct crypto_async_request *areq, int err) 460static void gcm_enc_hash_done(struct crypto_async_request *areq,
461 int err)
323{ 462{
324 struct aead_request *req = areq->data; 463 struct aead_request *req = areq->data;
464 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
325 465
326 if (!err) 466 if (!err)
327 err = crypto_gcm_hash(req); 467 gcm_enc_copy_hash(req, pctx);
328 468
329 aead_request_complete(req, err); 469 aead_request_complete(req, err);
330} 470}
331 471
472static void gcm_encrypt_done(struct crypto_async_request *areq,
473 int err)
474{
475 struct aead_request *req = areq->data;
476 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
477
478 if (!err) {
479 err = gcm_hash(req, pctx);
480 if (err == -EINPROGRESS || err == -EBUSY)
481 return;
482 }
483
484 gcm_enc_hash_done(areq, err);
485}
486
332static int crypto_gcm_encrypt(struct aead_request *req) 487static int crypto_gcm_encrypt(struct aead_request *req)
333{ 488{
334 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 489 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
335 struct ablkcipher_request *abreq = &pctx->abreq; 490 struct ablkcipher_request *abreq = &pctx->u.abreq;
491 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
336 int err; 492 int err;
337 493
338 crypto_gcm_init_crypt(abreq, req, req->cryptlen); 494 crypto_gcm_init_crypt(abreq, req, req->cryptlen);
339 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 495 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
340 crypto_gcm_encrypt_done, req); 496 gcm_encrypt_done, req);
497
498 gctx->src = req->dst;
499 gctx->cryptlen = req->cryptlen;
500 gctx->complete = gcm_enc_hash_done;
341 501
342 err = crypto_ablkcipher_encrypt(abreq); 502 err = crypto_ablkcipher_encrypt(abreq);
343 if (err) 503 if (err)
344 return err; 504 return err;
345 505
346 return crypto_gcm_hash(req); 506 err = gcm_hash(req, pctx);
507 if (err)
508 return err;
509
510 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
511 gcm_enc_copy_hash(req, pctx);
512
513 return 0;
347} 514}
348 515
349static int crypto_gcm_verify(struct aead_request *req) 516static int crypto_gcm_verify(struct aead_request *req,
517 struct crypto_gcm_req_priv_ctx *pctx)
350{ 518{
351 struct crypto_aead *aead = crypto_aead_reqtfm(req); 519 struct crypto_aead *aead = crypto_aead_reqtfm(req);
352 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
353 struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash;
354 u8 *auth_tag = pctx->auth_tag; 520 u8 *auth_tag = pctx->auth_tag;
355 u8 *iauth_tag = pctx->iauth_tag; 521 u8 *iauth_tag = pctx->iauth_tag;
356 unsigned int authsize = crypto_aead_authsize(aead); 522 unsigned int authsize = crypto_aead_authsize(aead);
357 unsigned int cryptlen = req->cryptlen - authsize; 523 unsigned int cryptlen = req->cryptlen - authsize;
358 524
359 crypto_gcm_ghash_final_xor(ghash, req->assoclen, cryptlen, auth_tag); 525 crypto_xor(auth_tag, iauth_tag, 16);
360
361 authsize = crypto_aead_authsize(aead);
362 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); 526 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
363 return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; 527 return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
364} 528}
365 529
366static void crypto_gcm_decrypt_done(struct crypto_async_request *areq, int err) 530static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
367{ 531{
368 struct aead_request *req = areq->data; 532 struct aead_request *req = areq->data;
533 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
369 534
370 if (!err) 535 if (!err)
371 err = crypto_gcm_verify(req); 536 err = crypto_gcm_verify(req, pctx);
372 537
373 aead_request_complete(req, err); 538 aead_request_complete(req, err);
374} 539}
375 540
541static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
542{
543 struct aead_request *req = areq->data;
544 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
545 struct ablkcipher_request *abreq = &pctx->u.abreq;
546 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
547
548 if (!err) {
549 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
550 gcm_decrypt_done, req);
551 crypto_gcm_init_crypt(abreq, req, gctx->cryptlen);
552 err = crypto_ablkcipher_decrypt(abreq);
553 if (err == -EINPROGRESS || err == -EBUSY)
554 return;
555 }
556
557 gcm_decrypt_done(areq, err);
558}
559
376static int crypto_gcm_decrypt(struct aead_request *req) 560static int crypto_gcm_decrypt(struct aead_request *req)
377{ 561{
378 struct crypto_aead *aead = crypto_aead_reqtfm(req); 562 struct crypto_aead *aead = crypto_aead_reqtfm(req);
379 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 563 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
380 struct ablkcipher_request *abreq = &pctx->abreq; 564 struct ablkcipher_request *abreq = &pctx->u.abreq;
381 struct crypto_gcm_ghash_ctx *ghash = &pctx->ghash; 565 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
382 unsigned int cryptlen = req->cryptlen;
383 unsigned int authsize = crypto_aead_authsize(aead); 566 unsigned int authsize = crypto_aead_authsize(aead);
567 unsigned int cryptlen = req->cryptlen;
384 int err; 568 int err;
385 569
386 if (cryptlen < authsize) 570 if (cryptlen < authsize)
387 return -EINVAL; 571 return -EINVAL;
388 cryptlen -= authsize; 572 cryptlen -= authsize;
389 573
390 crypto_gcm_init_crypt(abreq, req, cryptlen); 574 gctx->src = req->src;
391 ablkcipher_request_set_callback(abreq, aead_request_flags(req), 575 gctx->cryptlen = cryptlen;
392 crypto_gcm_decrypt_done, req); 576 gctx->complete = gcm_dec_hash_done;
393 577
394 crypto_gcm_ghash_update_sg(ghash, req->src, cryptlen); 578 err = gcm_hash(req, pctx);
579 if (err)
580 return err;
395 581
582 ablkcipher_request_set_callback(abreq, aead_request_flags(req),
583 gcm_decrypt_done, req);
584 crypto_gcm_init_crypt(abreq, req, cryptlen);
396 err = crypto_ablkcipher_decrypt(abreq); 585 err = crypto_ablkcipher_decrypt(abreq);
397 if (err) 586 if (err)
398 return err; 587 return err;
399 588
400 return crypto_gcm_verify(req); 589 return crypto_gcm_verify(req, pctx);
401} 590}
402 591
403static int crypto_gcm_init_tfm(struct crypto_tfm *tfm) 592static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
@@ -406,43 +595,56 @@ static int crypto_gcm_init_tfm(struct crypto_tfm *tfm)
406 struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst); 595 struct gcm_instance_ctx *ictx = crypto_instance_ctx(inst);
407 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); 596 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
408 struct crypto_ablkcipher *ctr; 597 struct crypto_ablkcipher *ctr;
598 struct crypto_ahash *ghash;
409 unsigned long align; 599 unsigned long align;
410 int err; 600 int err;
411 601
602 ghash = crypto_spawn_ahash(&ictx->ghash);
603 if (IS_ERR(ghash))
604 return PTR_ERR(ghash);
605
412 ctr = crypto_spawn_skcipher(&ictx->ctr); 606 ctr = crypto_spawn_skcipher(&ictx->ctr);
413 err = PTR_ERR(ctr); 607 err = PTR_ERR(ctr);
414 if (IS_ERR(ctr)) 608 if (IS_ERR(ctr))
415 return err; 609 goto err_free_hash;
416 610
417 ctx->ctr = ctr; 611 ctx->ctr = ctr;
418 ctx->gf128 = NULL; 612 ctx->ghash = ghash;
419 613
420 align = crypto_tfm_alg_alignmask(tfm); 614 align = crypto_tfm_alg_alignmask(tfm);
421 align &= ~(crypto_tfm_ctx_alignment() - 1); 615 align &= ~(crypto_tfm_ctx_alignment() - 1);
422 tfm->crt_aead.reqsize = align + 616 tfm->crt_aead.reqsize = align +
423 sizeof(struct crypto_gcm_req_priv_ctx) + 617 offsetof(struct crypto_gcm_req_priv_ctx, u) +
424 crypto_ablkcipher_reqsize(ctr); 618 max(sizeof(struct ablkcipher_request) +
619 crypto_ablkcipher_reqsize(ctr),
620 sizeof(struct ahash_request) +
621 crypto_ahash_reqsize(ghash));
425 622
426 return 0; 623 return 0;
624
625err_free_hash:
626 crypto_free_ahash(ghash);
627 return err;
427} 628}
428 629
429static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm) 630static void crypto_gcm_exit_tfm(struct crypto_tfm *tfm)
430{ 631{
431 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm); 632 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
432 633
433 if (ctx->gf128 != NULL) 634 crypto_free_ahash(ctx->ghash);
434 gf128mul_free_4k(ctx->gf128);
435
436 crypto_free_ablkcipher(ctx->ctr); 635 crypto_free_ablkcipher(ctx->ctr);
437} 636}
438 637
439static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, 638static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
440 const char *full_name, 639 const char *full_name,
441 const char *ctr_name) 640 const char *ctr_name,
641 const char *ghash_name)
442{ 642{
443 struct crypto_attr_type *algt; 643 struct crypto_attr_type *algt;
444 struct crypto_instance *inst; 644 struct crypto_instance *inst;
445 struct crypto_alg *ctr; 645 struct crypto_alg *ctr;
646 struct crypto_alg *ghash_alg;
647 struct ahash_alg *ghash_ahash_alg;
446 struct gcm_instance_ctx *ctx; 648 struct gcm_instance_ctx *ctx;
447 int err; 649 int err;
448 650
@@ -454,17 +656,31 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
454 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) 656 if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
455 return ERR_PTR(-EINVAL); 657 return ERR_PTR(-EINVAL);
456 658
659 ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
660 CRYPTO_ALG_TYPE_HASH,
661 CRYPTO_ALG_TYPE_AHASH_MASK);
662 err = PTR_ERR(ghash_alg);
663 if (IS_ERR(ghash_alg))
664 return ERR_PTR(err);
665
666 err = -ENOMEM;
457 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); 667 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
458 if (!inst) 668 if (!inst)
459 return ERR_PTR(-ENOMEM); 669 goto out_put_ghash;
460 670
461 ctx = crypto_instance_ctx(inst); 671 ctx = crypto_instance_ctx(inst);
672 ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base);
673 err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg,
674 inst);
675 if (err)
676 goto err_free_inst;
677
462 crypto_set_skcipher_spawn(&ctx->ctr, inst); 678 crypto_set_skcipher_spawn(&ctx->ctr, inst);
463 err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, 679 err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0,
464 crypto_requires_sync(algt->type, 680 crypto_requires_sync(algt->type,
465 algt->mask)); 681 algt->mask));
466 if (err) 682 if (err)
467 goto err_free_inst; 683 goto err_drop_ghash;
468 684
469 ctr = crypto_skcipher_spawn_alg(&ctx->ctr); 685 ctr = crypto_skcipher_spawn_alg(&ctx->ctr);
470 686
@@ -479,7 +695,8 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
479 695
480 err = -ENAMETOOLONG; 696 err = -ENAMETOOLONG;
481 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, 697 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
482 "gcm_base(%s)", ctr->cra_driver_name) >= 698 "gcm_base(%s,%s)", ctr->cra_driver_name,
699 ghash_alg->cra_driver_name) >=
483 CRYPTO_MAX_ALG_NAME) 700 CRYPTO_MAX_ALG_NAME)
484 goto out_put_ctr; 701 goto out_put_ctr;
485 702
@@ -502,12 +719,16 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
502 inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; 719 inst->alg.cra_aead.decrypt = crypto_gcm_decrypt;
503 720
504out: 721out:
722 crypto_mod_put(ghash_alg);
505 return inst; 723 return inst;
506 724
507out_put_ctr: 725out_put_ctr:
508 crypto_drop_skcipher(&ctx->ctr); 726 crypto_drop_skcipher(&ctx->ctr);
727err_drop_ghash:
728 crypto_drop_ahash(&ctx->ghash);
509err_free_inst: 729err_free_inst:
510 kfree(inst); 730 kfree(inst);
731out_put_ghash:
511 inst = ERR_PTR(err); 732 inst = ERR_PTR(err);
512 goto out; 733 goto out;
513} 734}
@@ -532,7 +753,7 @@ static struct crypto_instance *crypto_gcm_alloc(struct rtattr **tb)
532 CRYPTO_MAX_ALG_NAME) 753 CRYPTO_MAX_ALG_NAME)
533 return ERR_PTR(-ENAMETOOLONG); 754 return ERR_PTR(-ENAMETOOLONG);
534 755
535 return crypto_gcm_alloc_common(tb, full_name, ctr_name); 756 return crypto_gcm_alloc_common(tb, full_name, ctr_name, "ghash");
536} 757}
537 758
538static void crypto_gcm_free(struct crypto_instance *inst) 759static void crypto_gcm_free(struct crypto_instance *inst)
@@ -540,6 +761,7 @@ static void crypto_gcm_free(struct crypto_instance *inst)
540 struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst); 761 struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
541 762
542 crypto_drop_skcipher(&ctx->ctr); 763 crypto_drop_skcipher(&ctx->ctr);
764 crypto_drop_ahash(&ctx->ghash);
543 kfree(inst); 765 kfree(inst);
544} 766}
545 767
@@ -554,6 +776,7 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
554{ 776{
555 int err; 777 int err;
556 const char *ctr_name; 778 const char *ctr_name;
779 const char *ghash_name;
557 char full_name[CRYPTO_MAX_ALG_NAME]; 780 char full_name[CRYPTO_MAX_ALG_NAME];
558 781
559 ctr_name = crypto_attr_alg_name(tb[1]); 782 ctr_name = crypto_attr_alg_name(tb[1]);
@@ -561,11 +784,16 @@ static struct crypto_instance *crypto_gcm_base_alloc(struct rtattr **tb)
561 if (IS_ERR(ctr_name)) 784 if (IS_ERR(ctr_name))
562 return ERR_PTR(err); 785 return ERR_PTR(err);
563 786
564 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s)", 787 ghash_name = crypto_attr_alg_name(tb[2]);
565 ctr_name) >= CRYPTO_MAX_ALG_NAME) 788 err = PTR_ERR(ghash_name);
789 if (IS_ERR(ghash_name))
790 return ERR_PTR(err);
791
792 if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
793 ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
566 return ERR_PTR(-ENAMETOOLONG); 794 return ERR_PTR(-ENAMETOOLONG);
567 795
568 return crypto_gcm_alloc_common(tb, full_name, ctr_name); 796 return crypto_gcm_alloc_common(tb, full_name, ctr_name, ghash_name);
569} 797}
570 798
571static struct crypto_template crypto_gcm_base_tmpl = { 799static struct crypto_template crypto_gcm_base_tmpl = {
@@ -784,6 +1012,10 @@ static int __init crypto_gcm_module_init(void)
784{ 1012{
785 int err; 1013 int err;
786 1014
1015 gcm_zeroes = kzalloc(16, GFP_KERNEL);
1016 if (!gcm_zeroes)
1017 return -ENOMEM;
1018
787 err = crypto_register_template(&crypto_gcm_base_tmpl); 1019 err = crypto_register_template(&crypto_gcm_base_tmpl);
788 if (err) 1020 if (err)
789 goto out; 1021 goto out;
@@ -796,18 +1028,20 @@ static int __init crypto_gcm_module_init(void)
796 if (err) 1028 if (err)
797 goto out_undo_gcm; 1029 goto out_undo_gcm;
798 1030
799out: 1031 return 0;
800 return err;
801 1032
802out_undo_gcm: 1033out_undo_gcm:
803 crypto_unregister_template(&crypto_gcm_tmpl); 1034 crypto_unregister_template(&crypto_gcm_tmpl);
804out_undo_base: 1035out_undo_base:
805 crypto_unregister_template(&crypto_gcm_base_tmpl); 1036 crypto_unregister_template(&crypto_gcm_base_tmpl);
806 goto out; 1037out:
1038 kfree(gcm_zeroes);
1039 return err;
807} 1040}
808 1041
809static void __exit crypto_gcm_module_exit(void) 1042static void __exit crypto_gcm_module_exit(void)
810{ 1043{
1044 kfree(gcm_zeroes);
811 crypto_unregister_template(&crypto_rfc4106_tmpl); 1045 crypto_unregister_template(&crypto_rfc4106_tmpl);
812 crypto_unregister_template(&crypto_gcm_tmpl); 1046 crypto_unregister_template(&crypto_gcm_tmpl);
813 crypto_unregister_template(&crypto_gcm_base_tmpl); 1047 crypto_unregister_template(&crypto_gcm_base_tmpl);
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
new file mode 100644
index 000000000000..be4425616931
--- /dev/null
+++ b/crypto/ghash-generic.c
@@ -0,0 +1,170 @@
1/*
2 * GHASH: digest algorithm for GCM (Galois/Counter Mode).
3 *
4 * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi>
5 * Copyright (c) 2009 Intel Corp.
6 * Author: Huang Ying <ying.huang@intel.com>
7 *
8 * The algorithm implementation is copied from gcm.c.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <crypto/algapi.h>
16#include <crypto/gf128mul.h>
17#include <crypto/internal/hash.h>
18#include <linux/crypto.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22
23#define GHASH_BLOCK_SIZE 16
24#define GHASH_DIGEST_SIZE 16
25
26struct ghash_ctx {
27 struct gf128mul_4k *gf128;
28};
29
30struct ghash_desc_ctx {
31 u8 buffer[GHASH_BLOCK_SIZE];
32 u32 bytes;
33};
34
35static int ghash_init(struct shash_desc *desc)
36{
37 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
38
39 memset(dctx, 0, sizeof(*dctx));
40
41 return 0;
42}
43
44static int ghash_setkey(struct crypto_shash *tfm,
45 const u8 *key, unsigned int keylen)
46{
47 struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
48
49 if (keylen != GHASH_BLOCK_SIZE) {
50 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
51 return -EINVAL;
52 }
53
54 if (ctx->gf128)
55 gf128mul_free_4k(ctx->gf128);
56 ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
57 if (!ctx->gf128)
58 return -ENOMEM;
59
60 return 0;
61}
62
63static int ghash_update(struct shash_desc *desc,
64 const u8 *src, unsigned int srclen)
65{
66 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
67 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
68 u8 *dst = dctx->buffer;
69
70 if (dctx->bytes) {
71 int n = min(srclen, dctx->bytes);
72 u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
73
74 dctx->bytes -= n;
75 srclen -= n;
76
77 while (n--)
78 *pos++ ^= *src++;
79
80 if (!dctx->bytes)
81 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
82 }
83
84 while (srclen >= GHASH_BLOCK_SIZE) {
85 crypto_xor(dst, src, GHASH_BLOCK_SIZE);
86 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
87 src += GHASH_BLOCK_SIZE;
88 srclen -= GHASH_BLOCK_SIZE;
89 }
90
91 if (srclen) {
92 dctx->bytes = GHASH_BLOCK_SIZE - srclen;
93 while (srclen--)
94 *dst++ ^= *src++;
95 }
96
97 return 0;
98}
99
100static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
101{
102 u8 *dst = dctx->buffer;
103
104 if (dctx->bytes) {
105 u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
106
107 while (dctx->bytes--)
108 *tmp++ ^= 0;
109
110 gf128mul_4k_lle((be128 *)dst, ctx->gf128);
111 }
112
113 dctx->bytes = 0;
114}
115
116static int ghash_final(struct shash_desc *desc, u8 *dst)
117{
118 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
119 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
120 u8 *buf = dctx->buffer;
121
122 ghash_flush(ctx, dctx);
123 memcpy(dst, buf, GHASH_BLOCK_SIZE);
124
125 return 0;
126}
127
128static void ghash_exit_tfm(struct crypto_tfm *tfm)
129{
130 struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
131 if (ctx->gf128)
132 gf128mul_free_4k(ctx->gf128);
133}
134
135static struct shash_alg ghash_alg = {
136 .digestsize = GHASH_DIGEST_SIZE,
137 .init = ghash_init,
138 .update = ghash_update,
139 .final = ghash_final,
140 .setkey = ghash_setkey,
141 .descsize = sizeof(struct ghash_desc_ctx),
142 .base = {
143 .cra_name = "ghash",
144 .cra_driver_name = "ghash-generic",
145 .cra_priority = 100,
146 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
147 .cra_blocksize = GHASH_BLOCK_SIZE,
148 .cra_ctxsize = sizeof(struct ghash_ctx),
149 .cra_module = THIS_MODULE,
150 .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list),
151 .cra_exit = ghash_exit_tfm,
152 },
153};
154
155static int __init ghash_mod_init(void)
156{
157 return crypto_register_shash(&ghash_alg);
158}
159
160static void __exit ghash_mod_exit(void)
161{
162 crypto_unregister_shash(&ghash_alg);
163}
164
165module_init(ghash_mod_init);
166module_exit(ghash_mod_exit);
167
168MODULE_LICENSE("GPL");
169MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
170MODULE_ALIAS("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 0ad39c374963..15c2eb534541 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -27,7 +27,7 @@
27#include <linux/string.h> 27#include <linux/string.h>
28 28
29struct hmac_ctx { 29struct hmac_ctx {
30 struct crypto_hash *child; 30 struct crypto_shash *hash;
31}; 31};
32 32
33static inline void *align_ptr(void *p, unsigned int align) 33static inline void *align_ptr(void *p, unsigned int align)
@@ -35,65 +35,45 @@ static inline void *align_ptr(void *p, unsigned int align)
35 return (void *)ALIGN((unsigned long)p, align); 35 return (void *)ALIGN((unsigned long)p, align);
36} 36}
37 37
38static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) 38static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
39{ 39{
40 return align_ptr(crypto_hash_ctx_aligned(tfm) + 40 return align_ptr(crypto_shash_ctx_aligned(tfm) +
41 crypto_hash_blocksize(tfm) * 2 + 41 crypto_shash_statesize(tfm) * 2,
42 crypto_hash_digestsize(tfm), sizeof(void *)); 42 crypto_tfm_ctx_alignment());
43} 43}
44 44
45static int hmac_setkey(struct crypto_hash *parent, 45static int hmac_setkey(struct crypto_shash *parent,
46 const u8 *inkey, unsigned int keylen) 46 const u8 *inkey, unsigned int keylen)
47{ 47{
48 int bs = crypto_hash_blocksize(parent); 48 int bs = crypto_shash_blocksize(parent);
49 int ds = crypto_hash_digestsize(parent); 49 int ds = crypto_shash_digestsize(parent);
50 char *ipad = crypto_hash_ctx_aligned(parent); 50 int ss = crypto_shash_statesize(parent);
51 char *opad = ipad + bs; 51 char *ipad = crypto_shash_ctx_aligned(parent);
52 char *digest = opad + bs; 52 char *opad = ipad + ss;
53 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); 53 struct hmac_ctx *ctx = align_ptr(opad + ss,
54 struct crypto_hash *tfm = ctx->child; 54 crypto_tfm_ctx_alignment());
55 struct crypto_shash *hash = ctx->hash;
56 struct {
57 struct shash_desc shash;
58 char ctx[crypto_shash_descsize(hash)];
59 } desc;
55 unsigned int i; 60 unsigned int i;
56 61
62 desc.shash.tfm = hash;
63 desc.shash.flags = crypto_shash_get_flags(parent) &
64 CRYPTO_TFM_REQ_MAY_SLEEP;
65
57 if (keylen > bs) { 66 if (keylen > bs) {
58 struct hash_desc desc;
59 struct scatterlist tmp;
60 int tmplen;
61 int err; 67 int err;
62 68
63 desc.tfm = tfm; 69 err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
64 desc.flags = crypto_hash_get_flags(parent);
65 desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
66
67 err = crypto_hash_init(&desc);
68 if (err) 70 if (err)
69 return err; 71 return err;
70 72
71 tmplen = bs * 2 + ds;
72 sg_init_one(&tmp, ipad, tmplen);
73
74 for (; keylen > tmplen; inkey += tmplen, keylen -= tmplen) {
75 memcpy(ipad, inkey, tmplen);
76 err = crypto_hash_update(&desc, &tmp, tmplen);
77 if (err)
78 return err;
79 }
80
81 if (keylen) {
82 memcpy(ipad, inkey, keylen);
83 err = crypto_hash_update(&desc, &tmp, keylen);
84 if (err)
85 return err;
86 }
87
88 err = crypto_hash_final(&desc, digest);
89 if (err)
90 return err;
91
92 inkey = digest;
93 keylen = ds; 73 keylen = ds;
94 } 74 } else
75 memcpy(ipad, inkey, keylen);
95 76
96 memcpy(ipad, inkey, keylen);
97 memset(ipad + keylen, 0, bs - keylen); 77 memset(ipad + keylen, 0, bs - keylen);
98 memcpy(opad, ipad, bs); 78 memcpy(opad, ipad, bs);
99 79
@@ -102,184 +82,178 @@ static int hmac_setkey(struct crypto_hash *parent,
102 opad[i] ^= 0x5c; 82 opad[i] ^= 0x5c;
103 } 83 }
104 84
105 return 0; 85 return crypto_shash_init(&desc.shash) ?:
86 crypto_shash_update(&desc.shash, ipad, bs) ?:
87 crypto_shash_export(&desc.shash, ipad) ?:
88 crypto_shash_init(&desc.shash) ?:
89 crypto_shash_update(&desc.shash, opad, bs) ?:
90 crypto_shash_export(&desc.shash, opad);
106} 91}
107 92
108static int hmac_init(struct hash_desc *pdesc) 93static int hmac_export(struct shash_desc *pdesc, void *out)
109{ 94{
110 struct crypto_hash *parent = pdesc->tfm; 95 struct shash_desc *desc = shash_desc_ctx(pdesc);
111 int bs = crypto_hash_blocksize(parent);
112 int ds = crypto_hash_digestsize(parent);
113 char *ipad = crypto_hash_ctx_aligned(parent);
114 struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
115 struct hash_desc desc;
116 struct scatterlist tmp;
117 int err;
118 96
119 desc.tfm = ctx->child; 97 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
120 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
121 sg_init_one(&tmp, ipad, bs);
122 98
123 err = crypto_hash_init(&desc); 99 return crypto_shash_export(desc, out);
124 if (unlikely(err))
125 return err;
126
127 return crypto_hash_update(&desc, &tmp, bs);
128} 100}
129 101
130static int hmac_update(struct hash_desc *pdesc, 102static int hmac_import(struct shash_desc *pdesc, const void *in)
131 struct scatterlist *sg, unsigned int nbytes)
132{ 103{
104 struct shash_desc *desc = shash_desc_ctx(pdesc);
133 struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); 105 struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
134 struct hash_desc desc;
135 106
136 desc.tfm = ctx->child; 107 desc->tfm = ctx->hash;
137 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 108 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
138 109
139 return crypto_hash_update(&desc, sg, nbytes); 110 return crypto_shash_import(desc, in);
140} 111}
141 112
142static int hmac_final(struct hash_desc *pdesc, u8 *out) 113static int hmac_init(struct shash_desc *pdesc)
143{ 114{
144 struct crypto_hash *parent = pdesc->tfm; 115 return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
145 int bs = crypto_hash_blocksize(parent); 116}
146 int ds = crypto_hash_digestsize(parent);
147 char *opad = crypto_hash_ctx_aligned(parent) + bs;
148 char *digest = opad + bs;
149 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
150 struct hash_desc desc;
151 struct scatterlist tmp;
152 int err;
153 117
154 desc.tfm = ctx->child; 118static int hmac_update(struct shash_desc *pdesc,
155 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; 119 const u8 *data, unsigned int nbytes)
156 sg_init_one(&tmp, opad, bs + ds); 120{
121 struct shash_desc *desc = shash_desc_ctx(pdesc);
157 122
158 err = crypto_hash_final(&desc, digest); 123 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
159 if (unlikely(err))
160 return err;
161 124
162 return crypto_hash_digest(&desc, &tmp, bs + ds, out); 125 return crypto_shash_update(desc, data, nbytes);
163} 126}
164 127
165static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, 128static int hmac_final(struct shash_desc *pdesc, u8 *out)
166 unsigned int nbytes, u8 *out)
167{ 129{
168 struct crypto_hash *parent = pdesc->tfm; 130 struct crypto_shash *parent = pdesc->tfm;
169 int bs = crypto_hash_blocksize(parent); 131 int ds = crypto_shash_digestsize(parent);
170 int ds = crypto_hash_digestsize(parent); 132 int ss = crypto_shash_statesize(parent);
171 char *ipad = crypto_hash_ctx_aligned(parent); 133 char *opad = crypto_shash_ctx_aligned(parent) + ss;
172 char *opad = ipad + bs; 134 struct shash_desc *desc = shash_desc_ctx(pdesc);
173 char *digest = opad + bs;
174 struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
175 struct hash_desc desc;
176 struct scatterlist sg1[2];
177 struct scatterlist sg2[1];
178 int err;
179 135
180 desc.tfm = ctx->child; 136 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
181 desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
182 137
183 sg_init_table(sg1, 2); 138 return crypto_shash_final(desc, out) ?:
184 sg_set_buf(sg1, ipad, bs); 139 crypto_shash_import(desc, opad) ?:
185 scatterwalk_sg_chain(sg1, 2, sg); 140 crypto_shash_finup(desc, out, ds, out);
141}
186 142
187 sg_init_table(sg2, 1); 143static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
188 sg_set_buf(sg2, opad, bs + ds); 144 unsigned int nbytes, u8 *out)
145{
189 146
190 err = crypto_hash_digest(&desc, sg1, nbytes + bs, digest); 147 struct crypto_shash *parent = pdesc->tfm;
191 if (unlikely(err)) 148 int ds = crypto_shash_digestsize(parent);
192 return err; 149 int ss = crypto_shash_statesize(parent);
150 char *opad = crypto_shash_ctx_aligned(parent) + ss;
151 struct shash_desc *desc = shash_desc_ctx(pdesc);
193 152
194 return crypto_hash_digest(&desc, sg2, bs + ds, out); 153 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
154
155 return crypto_shash_finup(desc, data, nbytes, out) ?:
156 crypto_shash_import(desc, opad) ?:
157 crypto_shash_finup(desc, out, ds, out);
195} 158}
196 159
197static int hmac_init_tfm(struct crypto_tfm *tfm) 160static int hmac_init_tfm(struct crypto_tfm *tfm)
198{ 161{
199 struct crypto_hash *hash; 162 struct crypto_shash *parent = __crypto_shash_cast(tfm);
163 struct crypto_shash *hash;
200 struct crypto_instance *inst = (void *)tfm->__crt_alg; 164 struct crypto_instance *inst = (void *)tfm->__crt_alg;
201 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 165 struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
202 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); 166 struct hmac_ctx *ctx = hmac_ctx(parent);
203 167
204 hash = crypto_spawn_hash(spawn); 168 hash = crypto_spawn_shash(spawn);
205 if (IS_ERR(hash)) 169 if (IS_ERR(hash))
206 return PTR_ERR(hash); 170 return PTR_ERR(hash);
207 171
208 ctx->child = hash; 172 parent->descsize = sizeof(struct shash_desc) +
173 crypto_shash_descsize(hash);
174
175 ctx->hash = hash;
209 return 0; 176 return 0;
210} 177}
211 178
212static void hmac_exit_tfm(struct crypto_tfm *tfm) 179static void hmac_exit_tfm(struct crypto_tfm *tfm)
213{ 180{
214 struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); 181 struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
215 crypto_free_hash(ctx->child); 182 crypto_free_shash(ctx->hash);
216} 183}
217 184
218static void hmac_free(struct crypto_instance *inst) 185static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
219{ 186{
220 crypto_drop_spawn(crypto_instance_ctx(inst)); 187 struct shash_instance *inst;
221 kfree(inst);
222}
223
224static struct crypto_instance *hmac_alloc(struct rtattr **tb)
225{
226 struct crypto_instance *inst;
227 struct crypto_alg *alg; 188 struct crypto_alg *alg;
189 struct shash_alg *salg;
228 int err; 190 int err;
229 int ds; 191 int ds;
192 int ss;
230 193
231 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); 194 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
232 if (err) 195 if (err)
233 return ERR_PTR(err); 196 return err;
234 197
235 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, 198 salg = shash_attr_alg(tb[1], 0, 0);
236 CRYPTO_ALG_TYPE_HASH_MASK); 199 if (IS_ERR(salg))
237 if (IS_ERR(alg)) 200 return PTR_ERR(salg);
238 return ERR_CAST(alg); 201
239 202 err = -EINVAL;
240 inst = ERR_PTR(-EINVAL); 203 ds = salg->digestsize;
241 ds = alg->cra_type == &crypto_hash_type ? 204 ss = salg->statesize;
242 alg->cra_hash.digestsize : 205 alg = &salg->base;
243 alg->cra_type ? 206 if (ds > alg->cra_blocksize ||
244 __crypto_shash_alg(alg)->digestsize : 207 ss < alg->cra_blocksize)
245 alg->cra_digest.dia_digestsize;
246 if (ds > alg->cra_blocksize)
247 goto out_put_alg; 208 goto out_put_alg;
248 209
249 inst = crypto_alloc_instance("hmac", alg); 210 inst = shash_alloc_instance("hmac", alg);
211 err = PTR_ERR(inst);
250 if (IS_ERR(inst)) 212 if (IS_ERR(inst))
251 goto out_put_alg; 213 goto out_put_alg;
252 214
253 inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; 215 err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
254 inst->alg.cra_priority = alg->cra_priority; 216 shash_crypto_instance(inst));
255 inst->alg.cra_blocksize = alg->cra_blocksize; 217 if (err)
256 inst->alg.cra_alignmask = alg->cra_alignmask; 218 goto out_free_inst;
257 inst->alg.cra_type = &crypto_hash_type; 219
258 220 inst->alg.base.cra_priority = alg->cra_priority;
259 inst->alg.cra_hash.digestsize = ds; 221 inst->alg.base.cra_blocksize = alg->cra_blocksize;
260 222 inst->alg.base.cra_alignmask = alg->cra_alignmask;
261 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + 223
262 ALIGN(inst->alg.cra_blocksize * 2 + ds, 224 ss = ALIGN(ss, alg->cra_alignmask + 1);
263 sizeof(void *)); 225 inst->alg.digestsize = ds;
264 226 inst->alg.statesize = ss;
265 inst->alg.cra_init = hmac_init_tfm; 227
266 inst->alg.cra_exit = hmac_exit_tfm; 228 inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
267 229 ALIGN(ss * 2, crypto_tfm_ctx_alignment());
268 inst->alg.cra_hash.init = hmac_init; 230
269 inst->alg.cra_hash.update = hmac_update; 231 inst->alg.base.cra_init = hmac_init_tfm;
270 inst->alg.cra_hash.final = hmac_final; 232 inst->alg.base.cra_exit = hmac_exit_tfm;
271 inst->alg.cra_hash.digest = hmac_digest; 233
272 inst->alg.cra_hash.setkey = hmac_setkey; 234 inst->alg.init = hmac_init;
235 inst->alg.update = hmac_update;
236 inst->alg.final = hmac_final;
237 inst->alg.finup = hmac_finup;
238 inst->alg.export = hmac_export;
239 inst->alg.import = hmac_import;
240 inst->alg.setkey = hmac_setkey;
241
242 err = shash_register_instance(tmpl, inst);
243 if (err) {
244out_free_inst:
245 shash_free_instance(shash_crypto_instance(inst));
246 }
273 247
274out_put_alg: 248out_put_alg:
275 crypto_mod_put(alg); 249 crypto_mod_put(alg);
276 return inst; 250 return err;
277} 251}
278 252
279static struct crypto_template hmac_tmpl = { 253static struct crypto_template hmac_tmpl = {
280 .name = "hmac", 254 .name = "hmac",
281 .alloc = hmac_alloc, 255 .create = hmac_create,
282 .free = hmac_free, 256 .free = shash_free_instance,
283 .module = THIS_MODULE, 257 .module = THIS_MODULE,
284}; 258};
285 259
diff --git a/crypto/internal.h b/crypto/internal.h
index 113579a82dff..2d226362e594 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -25,12 +25,7 @@
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/rwsem.h> 26#include <linux/rwsem.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28#include <linux/fips.h>
29#ifdef CONFIG_CRYPTO_FIPS
30extern int fips_enabled;
31#else
32#define fips_enabled 0
33#endif
34 29
35/* Crypto notification events. */ 30/* Crypto notification events. */
36enum { 31enum {
@@ -65,18 +60,6 @@ static inline void crypto_exit_proc(void)
65{ } 60{ }
66#endif 61#endif
67 62
68static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
69{
70 unsigned int len = alg->cra_ctxsize;
71
72 if (alg->cra_alignmask) {
73 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
74 len += alg->cra_digest.dia_digestsize;
75 }
76
77 return len;
78}
79
80static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg) 63static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
81{ 64{
82 return alg->cra_ctxsize; 65 return alg->cra_ctxsize;
@@ -91,12 +74,9 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
91struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); 74struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask);
92struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); 75struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
93 76
94int crypto_init_digest_ops(struct crypto_tfm *tfm);
95int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
96int crypto_init_cipher_ops(struct crypto_tfm *tfm); 77int crypto_init_cipher_ops(struct crypto_tfm *tfm);
97int crypto_init_compress_ops(struct crypto_tfm *tfm); 78int crypto_init_compress_ops(struct crypto_tfm *tfm);
98 79
99void crypto_exit_digest_ops(struct crypto_tfm *tfm);
100void crypto_exit_cipher_ops(struct crypto_tfm *tfm); 80void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
101void crypto_exit_compress_ops(struct crypto_tfm *tfm); 81void crypto_exit_compress_ops(struct crypto_tfm *tfm);
102 82
@@ -111,12 +91,12 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
111 u32 mask); 91 u32 mask);
112void *crypto_create_tfm(struct crypto_alg *alg, 92void *crypto_create_tfm(struct crypto_alg *alg,
113 const struct crypto_type *frontend); 93 const struct crypto_type *frontend);
94struct crypto_alg *crypto_find_alg(const char *alg_name,
95 const struct crypto_type *frontend,
96 u32 type, u32 mask);
114void *crypto_alloc_tfm(const char *alg_name, 97void *crypto_alloc_tfm(const char *alg_name,
115 const struct crypto_type *frontend, u32 type, u32 mask); 98 const struct crypto_type *frontend, u32 type, u32 mask);
116 99
117int crypto_register_instance(struct crypto_template *tmpl,
118 struct crypto_instance *inst);
119
120int crypto_register_notifier(struct notifier_block *nb); 100int crypto_register_notifier(struct notifier_block *nb);
121int crypto_unregister_notifier(struct notifier_block *nb); 101int crypto_unregister_notifier(struct notifier_block *nb);
122int crypto_probing_notify(unsigned long val, void *v); 102int crypto_probing_notify(unsigned long val, void *v);
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index bcadc03726b7..f7c4a7d7412e 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -36,14 +36,12 @@ static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
36 return 0; 36 return 0;
37} 37}
38 38
39static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg, 39static unsigned int crypto_pcomp_extsize(struct crypto_alg *alg)
40 const struct crypto_type *frontend)
41{ 40{
42 return alg->cra_ctxsize; 41 return alg->cra_ctxsize;
43} 42}
44 43
45static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm, 44static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
46 const struct crypto_type *frontend)
47{ 45{
48 return 0; 46 return 0;
49} 47}
diff --git a/crypto/rng.c b/crypto/rng.c
index 6e94bc735578..ba05e7380e76 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -123,4 +123,4 @@ void crypto_put_default_rng(void)
123EXPORT_SYMBOL_GPL(crypto_put_default_rng); 123EXPORT_SYMBOL_GPL(crypto_put_default_rng);
124 124
125MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
126MODULE_DESCRIPTION("Random Number Genertor"); 126MODULE_DESCRIPTION("Random Number Generator");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 9efef20454cb..0416091bf45a 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -25,31 +25,21 @@
25#include <crypto/sha.h> 25#include <crypto/sha.h>
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27 27
28struct sha1_ctx {
29 u64 count;
30 u32 state[5];
31 u8 buffer[64];
32};
33
34static int sha1_init(struct shash_desc *desc) 28static int sha1_init(struct shash_desc *desc)
35{ 29{
36 struct sha1_ctx *sctx = shash_desc_ctx(desc); 30 struct sha1_state *sctx = shash_desc_ctx(desc);
37 31
38 static const struct sha1_ctx initstate = { 32 *sctx = (struct sha1_state){
39 0, 33 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
40 { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
41 { 0, }
42 }; 34 };
43 35
44 *sctx = initstate;
45
46 return 0; 36 return 0;
47} 37}
48 38
49static int sha1_update(struct shash_desc *desc, const u8 *data, 39static int sha1_update(struct shash_desc *desc, const u8 *data,
50 unsigned int len) 40 unsigned int len)
51{ 41{
52 struct sha1_ctx *sctx = shash_desc_ctx(desc); 42 struct sha1_state *sctx = shash_desc_ctx(desc);
53 unsigned int partial, done; 43 unsigned int partial, done;
54 const u8 *src; 44 const u8 *src;
55 45
@@ -85,7 +75,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
85/* Add padding and return the message digest. */ 75/* Add padding and return the message digest. */
86static int sha1_final(struct shash_desc *desc, u8 *out) 76static int sha1_final(struct shash_desc *desc, u8 *out)
87{ 77{
88 struct sha1_ctx *sctx = shash_desc_ctx(desc); 78 struct sha1_state *sctx = shash_desc_ctx(desc);
89 __be32 *dst = (__be32 *)out; 79 __be32 *dst = (__be32 *)out;
90 u32 i, index, padlen; 80 u32 i, index, padlen;
91 __be64 bits; 81 __be64 bits;
@@ -111,12 +101,31 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
111 return 0; 101 return 0;
112} 102}
113 103
104static int sha1_export(struct shash_desc *desc, void *out)
105{
106 struct sha1_state *sctx = shash_desc_ctx(desc);
107
108 memcpy(out, sctx, sizeof(*sctx));
109 return 0;
110}
111
112static int sha1_import(struct shash_desc *desc, const void *in)
113{
114 struct sha1_state *sctx = shash_desc_ctx(desc);
115
116 memcpy(sctx, in, sizeof(*sctx));
117 return 0;
118}
119
114static struct shash_alg alg = { 120static struct shash_alg alg = {
115 .digestsize = SHA1_DIGEST_SIZE, 121 .digestsize = SHA1_DIGEST_SIZE,
116 .init = sha1_init, 122 .init = sha1_init,
117 .update = sha1_update, 123 .update = sha1_update,
118 .final = sha1_final, 124 .final = sha1_final,
119 .descsize = sizeof(struct sha1_ctx), 125 .export = sha1_export,
126 .import = sha1_import,
127 .descsize = sizeof(struct sha1_state),
128 .statesize = sizeof(struct sha1_state),
120 .base = { 129 .base = {
121 .cra_name = "sha1", 130 .cra_name = "sha1",
122 .cra_driver_name= "sha1-generic", 131 .cra_driver_name= "sha1-generic",
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 6349d8339d37..c48459ebf05b 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -25,12 +25,6 @@
25#include <crypto/sha.h> 25#include <crypto/sha.h>
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27 27
28struct sha256_ctx {
29 u32 count[2];
30 u32 state[8];
31 u8 buf[128];
32};
33
34static inline u32 Ch(u32 x, u32 y, u32 z) 28static inline u32 Ch(u32 x, u32 y, u32 z)
35{ 29{
36 return z ^ (x & (y ^ z)); 30 return z ^ (x & (y ^ z));
@@ -222,7 +216,7 @@ static void sha256_transform(u32 *state, const u8 *input)
222 216
223static int sha224_init(struct shash_desc *desc) 217static int sha224_init(struct shash_desc *desc)
224{ 218{
225 struct sha256_ctx *sctx = shash_desc_ctx(desc); 219 struct sha256_state *sctx = shash_desc_ctx(desc);
226 sctx->state[0] = SHA224_H0; 220 sctx->state[0] = SHA224_H0;
227 sctx->state[1] = SHA224_H1; 221 sctx->state[1] = SHA224_H1;
228 sctx->state[2] = SHA224_H2; 222 sctx->state[2] = SHA224_H2;
@@ -231,15 +225,14 @@ static int sha224_init(struct shash_desc *desc)
231 sctx->state[5] = SHA224_H5; 225 sctx->state[5] = SHA224_H5;
232 sctx->state[6] = SHA224_H6; 226 sctx->state[6] = SHA224_H6;
233 sctx->state[7] = SHA224_H7; 227 sctx->state[7] = SHA224_H7;
234 sctx->count[0] = 0; 228 sctx->count = 0;
235 sctx->count[1] = 0;
236 229
237 return 0; 230 return 0;
238} 231}
239 232
240static int sha256_init(struct shash_desc *desc) 233static int sha256_init(struct shash_desc *desc)
241{ 234{
242 struct sha256_ctx *sctx = shash_desc_ctx(desc); 235 struct sha256_state *sctx = shash_desc_ctx(desc);
243 sctx->state[0] = SHA256_H0; 236 sctx->state[0] = SHA256_H0;
244 sctx->state[1] = SHA256_H1; 237 sctx->state[1] = SHA256_H1;
245 sctx->state[2] = SHA256_H2; 238 sctx->state[2] = SHA256_H2;
@@ -248,7 +241,7 @@ static int sha256_init(struct shash_desc *desc)
248 sctx->state[5] = SHA256_H5; 241 sctx->state[5] = SHA256_H5;
249 sctx->state[6] = SHA256_H6; 242 sctx->state[6] = SHA256_H6;
250 sctx->state[7] = SHA256_H7; 243 sctx->state[7] = SHA256_H7;
251 sctx->count[0] = sctx->count[1] = 0; 244 sctx->count = 0;
252 245
253 return 0; 246 return 0;
254} 247}
@@ -256,58 +249,54 @@ static int sha256_init(struct shash_desc *desc)
256static int sha256_update(struct shash_desc *desc, const u8 *data, 249static int sha256_update(struct shash_desc *desc, const u8 *data,
257 unsigned int len) 250 unsigned int len)
258{ 251{
259 struct sha256_ctx *sctx = shash_desc_ctx(desc); 252 struct sha256_state *sctx = shash_desc_ctx(desc);
260 unsigned int i, index, part_len; 253 unsigned int partial, done;
261 254 const u8 *src;
262 /* Compute number of bytes mod 128 */ 255
263 index = (unsigned int)((sctx->count[0] >> 3) & 0x3f); 256 partial = sctx->count & 0x3f;
264 257 sctx->count += len;
265 /* Update number of bits */ 258 done = 0;
266 if ((sctx->count[0] += (len << 3)) < (len << 3)) { 259 src = data;
267 sctx->count[1]++; 260
268 sctx->count[1] += (len >> 29); 261 if ((partial + len) > 63) {
269 } 262 if (partial) {
270 263 done = -partial;
271 part_len = 64 - index; 264 memcpy(sctx->buf + partial, data, done + 64);
272 265 src = sctx->buf;
273 /* Transform as many times as possible. */ 266 }
274 if (len >= part_len) { 267
275 memcpy(&sctx->buf[index], data, part_len); 268 do {
276 sha256_transform(sctx->state, sctx->buf); 269 sha256_transform(sctx->state, src);
277 270 done += 64;
278 for (i = part_len; i + 63 < len; i += 64) 271 src = data + done;
279 sha256_transform(sctx->state, &data[i]); 272 } while (done + 63 < len);
280 index = 0; 273
281 } else { 274 partial = 0;
282 i = 0;
283 } 275 }
284 276 memcpy(sctx->buf + partial, src, len - done);
285 /* Buffer remaining input */
286 memcpy(&sctx->buf[index], &data[i], len-i);
287 277
288 return 0; 278 return 0;
289} 279}
290 280
291static int sha256_final(struct shash_desc *desc, u8 *out) 281static int sha256_final(struct shash_desc *desc, u8 *out)
292{ 282{
293 struct sha256_ctx *sctx = shash_desc_ctx(desc); 283 struct sha256_state *sctx = shash_desc_ctx(desc);
294 __be32 *dst = (__be32 *)out; 284 __be32 *dst = (__be32 *)out;
295 __be32 bits[2]; 285 __be64 bits;
296 unsigned int index, pad_len; 286 unsigned int index, pad_len;
297 int i; 287 int i;
298 static const u8 padding[64] = { 0x80, }; 288 static const u8 padding[64] = { 0x80, };
299 289
300 /* Save number of bits */ 290 /* Save number of bits */
301 bits[1] = cpu_to_be32(sctx->count[0]); 291 bits = cpu_to_be64(sctx->count << 3);
302 bits[0] = cpu_to_be32(sctx->count[1]);
303 292
304 /* Pad out to 56 mod 64. */ 293 /* Pad out to 56 mod 64. */
305 index = (sctx->count[0] >> 3) & 0x3f; 294 index = sctx->count & 0x3f;
306 pad_len = (index < 56) ? (56 - index) : ((64+56) - index); 295 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
307 sha256_update(desc, padding, pad_len); 296 sha256_update(desc, padding, pad_len);
308 297
309 /* Append length (before padding) */ 298 /* Append length (before padding) */
310 sha256_update(desc, (const u8 *)bits, sizeof(bits)); 299 sha256_update(desc, (const u8 *)&bits, sizeof(bits));
311 300
312 /* Store state in digest */ 301 /* Store state in digest */
313 for (i = 0; i < 8; i++) 302 for (i = 0; i < 8; i++)
@@ -331,12 +320,31 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
331 return 0; 320 return 0;
332} 321}
333 322
323static int sha256_export(struct shash_desc *desc, void *out)
324{
325 struct sha256_state *sctx = shash_desc_ctx(desc);
326
327 memcpy(out, sctx, sizeof(*sctx));
328 return 0;
329}
330
331static int sha256_import(struct shash_desc *desc, const void *in)
332{
333 struct sha256_state *sctx = shash_desc_ctx(desc);
334
335 memcpy(sctx, in, sizeof(*sctx));
336 return 0;
337}
338
334static struct shash_alg sha256 = { 339static struct shash_alg sha256 = {
335 .digestsize = SHA256_DIGEST_SIZE, 340 .digestsize = SHA256_DIGEST_SIZE,
336 .init = sha256_init, 341 .init = sha256_init,
337 .update = sha256_update, 342 .update = sha256_update,
338 .final = sha256_final, 343 .final = sha256_final,
339 .descsize = sizeof(struct sha256_ctx), 344 .export = sha256_export,
345 .import = sha256_import,
346 .descsize = sizeof(struct sha256_state),
347 .statesize = sizeof(struct sha256_state),
340 .base = { 348 .base = {
341 .cra_name = "sha256", 349 .cra_name = "sha256",
342 .cra_driver_name= "sha256-generic", 350 .cra_driver_name= "sha256-generic",
@@ -351,7 +359,7 @@ static struct shash_alg sha224 = {
351 .init = sha224_init, 359 .init = sha224_init,
352 .update = sha256_update, 360 .update = sha256_update,
353 .final = sha224_final, 361 .final = sha224_final,
354 .descsize = sizeof(struct sha256_ctx), 362 .descsize = sizeof(struct sha256_state),
355 .base = { 363 .base = {
356 .cra_name = "sha224", 364 .cra_name = "sha224",
357 .cra_driver_name= "sha224-generic", 365 .cra_driver_name= "sha224-generic",
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 3bea38d12242..9ed9f60316e5 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -21,12 +21,6 @@
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <asm/byteorder.h> 22#include <asm/byteorder.h>
23 23
24struct sha512_ctx {
25 u64 state[8];
26 u32 count[4];
27 u8 buf[128];
28};
29
30static DEFINE_PER_CPU(u64[80], msg_schedule); 24static DEFINE_PER_CPU(u64[80], msg_schedule);
31 25
32static inline u64 Ch(u64 x, u64 y, u64 z) 26static inline u64 Ch(u64 x, u64 y, u64 z)
@@ -141,7 +135,7 @@ sha512_transform(u64 *state, const u8 *input)
141static int 135static int
142sha512_init(struct shash_desc *desc) 136sha512_init(struct shash_desc *desc)
143{ 137{
144 struct sha512_ctx *sctx = shash_desc_ctx(desc); 138 struct sha512_state *sctx = shash_desc_ctx(desc);
145 sctx->state[0] = SHA512_H0; 139 sctx->state[0] = SHA512_H0;
146 sctx->state[1] = SHA512_H1; 140 sctx->state[1] = SHA512_H1;
147 sctx->state[2] = SHA512_H2; 141 sctx->state[2] = SHA512_H2;
@@ -150,7 +144,7 @@ sha512_init(struct shash_desc *desc)
150 sctx->state[5] = SHA512_H5; 144 sctx->state[5] = SHA512_H5;
151 sctx->state[6] = SHA512_H6; 145 sctx->state[6] = SHA512_H6;
152 sctx->state[7] = SHA512_H7; 146 sctx->state[7] = SHA512_H7;
153 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; 147 sctx->count[0] = sctx->count[1] = 0;
154 148
155 return 0; 149 return 0;
156} 150}
@@ -158,7 +152,7 @@ sha512_init(struct shash_desc *desc)
158static int 152static int
159sha384_init(struct shash_desc *desc) 153sha384_init(struct shash_desc *desc)
160{ 154{
161 struct sha512_ctx *sctx = shash_desc_ctx(desc); 155 struct sha512_state *sctx = shash_desc_ctx(desc);
162 sctx->state[0] = SHA384_H0; 156 sctx->state[0] = SHA384_H0;
163 sctx->state[1] = SHA384_H1; 157 sctx->state[1] = SHA384_H1;
164 sctx->state[2] = SHA384_H2; 158 sctx->state[2] = SHA384_H2;
@@ -167,7 +161,7 @@ sha384_init(struct shash_desc *desc)
167 sctx->state[5] = SHA384_H5; 161 sctx->state[5] = SHA384_H5;
168 sctx->state[6] = SHA384_H6; 162 sctx->state[6] = SHA384_H6;
169 sctx->state[7] = SHA384_H7; 163 sctx->state[7] = SHA384_H7;
170 sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; 164 sctx->count[0] = sctx->count[1] = 0;
171 165
172 return 0; 166 return 0;
173} 167}
@@ -175,20 +169,16 @@ sha384_init(struct shash_desc *desc)
175static int 169static int
176sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) 170sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
177{ 171{
178 struct sha512_ctx *sctx = shash_desc_ctx(desc); 172 struct sha512_state *sctx = shash_desc_ctx(desc);
179 173
180 unsigned int i, index, part_len; 174 unsigned int i, index, part_len;
181 175
182 /* Compute number of bytes mod 128 */ 176 /* Compute number of bytes mod 128 */
183 index = (unsigned int)((sctx->count[0] >> 3) & 0x7F); 177 index = sctx->count[0] & 0x7f;
184 178
185 /* Update number of bits */ 179 /* Update number of bytes */
186 if ((sctx->count[0] += (len << 3)) < (len << 3)) { 180 if (!(sctx->count[0] += len))
187 if ((sctx->count[1] += 1) < 1) 181 sctx->count[1]++;
188 if ((sctx->count[2] += 1) < 1)
189 sctx->count[3]++;
190 sctx->count[1] += (len >> 29);
191 }
192 182
193 part_len = 128 - index; 183 part_len = 128 - index;
194 184
@@ -214,21 +204,19 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
214static int 204static int
215sha512_final(struct shash_desc *desc, u8 *hash) 205sha512_final(struct shash_desc *desc, u8 *hash)
216{ 206{
217 struct sha512_ctx *sctx = shash_desc_ctx(desc); 207 struct sha512_state *sctx = shash_desc_ctx(desc);
218 static u8 padding[128] = { 0x80, }; 208 static u8 padding[128] = { 0x80, };
219 __be64 *dst = (__be64 *)hash; 209 __be64 *dst = (__be64 *)hash;
220 __be32 bits[4]; 210 __be64 bits[2];
221 unsigned int index, pad_len; 211 unsigned int index, pad_len;
222 int i; 212 int i;
223 213
224 /* Save number of bits */ 214 /* Save number of bits */
225 bits[3] = cpu_to_be32(sctx->count[0]); 215 bits[1] = cpu_to_be64(sctx->count[0] << 3);
226 bits[2] = cpu_to_be32(sctx->count[1]); 216 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
227 bits[1] = cpu_to_be32(sctx->count[2]);
228 bits[0] = cpu_to_be32(sctx->count[3]);
229 217
230 /* Pad out to 112 mod 128. */ 218 /* Pad out to 112 mod 128. */
231 index = (sctx->count[0] >> 3) & 0x7f; 219 index = sctx->count[0] & 0x7f;
232 pad_len = (index < 112) ? (112 - index) : ((128+112) - index); 220 pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
233 sha512_update(desc, padding, pad_len); 221 sha512_update(desc, padding, pad_len);
234 222
@@ -240,7 +228,7 @@ sha512_final(struct shash_desc *desc, u8 *hash)
240 dst[i] = cpu_to_be64(sctx->state[i]); 228 dst[i] = cpu_to_be64(sctx->state[i]);
241 229
242 /* Zeroize sensitive information. */ 230 /* Zeroize sensitive information. */
243 memset(sctx, 0, sizeof(struct sha512_ctx)); 231 memset(sctx, 0, sizeof(struct sha512_state));
244 232
245 return 0; 233 return 0;
246} 234}
@@ -262,7 +250,7 @@ static struct shash_alg sha512 = {
262 .init = sha512_init, 250 .init = sha512_init,
263 .update = sha512_update, 251 .update = sha512_update,
264 .final = sha512_final, 252 .final = sha512_final,
265 .descsize = sizeof(struct sha512_ctx), 253 .descsize = sizeof(struct sha512_state),
266 .base = { 254 .base = {
267 .cra_name = "sha512", 255 .cra_name = "sha512",
268 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 256 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
@@ -276,7 +264,7 @@ static struct shash_alg sha384 = {
276 .init = sha384_init, 264 .init = sha384_init,
277 .update = sha512_update, 265 .update = sha512_update,
278 .final = sha384_final, 266 .final = sha384_final,
279 .descsize = sizeof(struct sha512_ctx), 267 .descsize = sizeof(struct sha512_state),
280 .base = { 268 .base = {
281 .cra_name = "sha384", 269 .cra_name = "sha384",
282 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 270 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
diff --git a/crypto/shash.c b/crypto/shash.c
index 2ccc8b0076ce..91f7b9d83881 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -22,6 +22,12 @@
22 22
23static const struct crypto_type crypto_shash_type; 23static const struct crypto_type crypto_shash_type;
24 24
25static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
26 unsigned int keylen)
27{
28 return -ENOSYS;
29}
30
25static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, 31static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
26 unsigned int keylen) 32 unsigned int keylen)
27{ 33{
@@ -39,8 +45,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
39 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 45 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
40 memcpy(alignbuffer, key, keylen); 46 memcpy(alignbuffer, key, keylen);
41 err = shash->setkey(tfm, alignbuffer, keylen); 47 err = shash->setkey(tfm, alignbuffer, keylen);
42 memset(alignbuffer, 0, keylen); 48 kzfree(buffer);
43 kfree(buffer);
44 return err; 49 return err;
45} 50}
46 51
@@ -50,9 +55,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
50 struct shash_alg *shash = crypto_shash_alg(tfm); 55 struct shash_alg *shash = crypto_shash_alg(tfm);
51 unsigned long alignmask = crypto_shash_alignmask(tfm); 56 unsigned long alignmask = crypto_shash_alignmask(tfm);
52 57
53 if (!shash->setkey)
54 return -ENOSYS;
55
56 if ((unsigned long)key & alignmask) 58 if ((unsigned long)key & alignmask)
57 return shash_setkey_unaligned(tfm, key, keylen); 59 return shash_setkey_unaligned(tfm, key, keylen);
58 60
@@ -74,15 +76,19 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
74 unsigned long alignmask = crypto_shash_alignmask(tfm); 76 unsigned long alignmask = crypto_shash_alignmask(tfm);
75 unsigned int unaligned_len = alignmask + 1 - 77 unsigned int unaligned_len = alignmask + 1 -
76 ((unsigned long)data & alignmask); 78 ((unsigned long)data & alignmask);
77 u8 buf[shash_align_buffer_size(unaligned_len, alignmask)] 79 u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
78 __attribute__ ((aligned)); 80 __attribute__ ((aligned));
81 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
82 int err;
79 83
80 if (unaligned_len > len) 84 if (unaligned_len > len)
81 unaligned_len = len; 85 unaligned_len = len;
82 86
83 memcpy(buf, data, unaligned_len); 87 memcpy(buf, data, unaligned_len);
88 err = shash->update(desc, buf, unaligned_len);
89 memset(buf, 0, unaligned_len);
84 90
85 return shash->update(desc, buf, unaligned_len) ?: 91 return err ?:
86 shash->update(desc, data + unaligned_len, len - unaligned_len); 92 shash->update(desc, data + unaligned_len, len - unaligned_len);
87} 93}
88 94
@@ -106,12 +112,19 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
106 unsigned long alignmask = crypto_shash_alignmask(tfm); 112 unsigned long alignmask = crypto_shash_alignmask(tfm);
107 struct shash_alg *shash = crypto_shash_alg(tfm); 113 struct shash_alg *shash = crypto_shash_alg(tfm);
108 unsigned int ds = crypto_shash_digestsize(tfm); 114 unsigned int ds = crypto_shash_digestsize(tfm);
109 u8 buf[shash_align_buffer_size(ds, alignmask)] 115 u8 ubuf[shash_align_buffer_size(ds, alignmask)]
110 __attribute__ ((aligned)); 116 __attribute__ ((aligned));
117 u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
111 int err; 118 int err;
112 119
113 err = shash->final(desc, buf); 120 err = shash->final(desc, buf);
121 if (err)
122 goto out;
123
114 memcpy(out, buf, ds); 124 memcpy(out, buf, ds);
125
126out:
127 memset(buf, 0, ds);
115 return err; 128 return err;
116} 129}
117 130
@@ -142,8 +155,7 @@ int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
142 struct shash_alg *shash = crypto_shash_alg(tfm); 155 struct shash_alg *shash = crypto_shash_alg(tfm);
143 unsigned long alignmask = crypto_shash_alignmask(tfm); 156 unsigned long alignmask = crypto_shash_alignmask(tfm);
144 157
145 if (((unsigned long)data | (unsigned long)out) & alignmask || 158 if (((unsigned long)data | (unsigned long)out) & alignmask)
146 !shash->finup)
147 return shash_finup_unaligned(desc, data, len, out); 159 return shash_finup_unaligned(desc, data, len, out);
148 160
149 return shash->finup(desc, data, len, out); 161 return shash->finup(desc, data, len, out);
@@ -154,8 +166,7 @@ static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
154 unsigned int len, u8 *out) 166 unsigned int len, u8 *out)
155{ 167{
156 return crypto_shash_init(desc) ?: 168 return crypto_shash_init(desc) ?:
157 crypto_shash_update(desc, data, len) ?: 169 crypto_shash_finup(desc, data, len, out);
158 crypto_shash_final(desc, out);
159} 170}
160 171
161int crypto_shash_digest(struct shash_desc *desc, const u8 *data, 172int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
@@ -165,27 +176,24 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
165 struct shash_alg *shash = crypto_shash_alg(tfm); 176 struct shash_alg *shash = crypto_shash_alg(tfm);
166 unsigned long alignmask = crypto_shash_alignmask(tfm); 177 unsigned long alignmask = crypto_shash_alignmask(tfm);
167 178
168 if (((unsigned long)data | (unsigned long)out) & alignmask || 179 if (((unsigned long)data | (unsigned long)out) & alignmask)
169 !shash->digest)
170 return shash_digest_unaligned(desc, data, len, out); 180 return shash_digest_unaligned(desc, data, len, out);
171 181
172 return shash->digest(desc, data, len, out); 182 return shash->digest(desc, data, len, out);
173} 183}
174EXPORT_SYMBOL_GPL(crypto_shash_digest); 184EXPORT_SYMBOL_GPL(crypto_shash_digest);
175 185
176int crypto_shash_import(struct shash_desc *desc, const u8 *in) 186static int shash_default_export(struct shash_desc *desc, void *out)
177{ 187{
178 struct crypto_shash *tfm = desc->tfm; 188 memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
179 struct shash_alg *alg = crypto_shash_alg(tfm); 189 return 0;
180 190}
181 memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(tfm));
182
183 if (alg->reinit)
184 alg->reinit(desc);
185 191
192static int shash_default_import(struct shash_desc *desc, const void *in)
193{
194 memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
186 return 0; 195 return 0;
187} 196}
188EXPORT_SYMBOL_GPL(crypto_shash_import);
189 197
190static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key, 198static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
191 unsigned int keylen) 199 unsigned int keylen)
@@ -206,9 +214,8 @@ static int shash_async_init(struct ahash_request *req)
206 return crypto_shash_init(desc); 214 return crypto_shash_init(desc);
207} 215}
208 216
209static int shash_async_update(struct ahash_request *req) 217int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
210{ 218{
211 struct shash_desc *desc = ahash_request_ctx(req);
212 struct crypto_hash_walk walk; 219 struct crypto_hash_walk walk;
213 int nbytes; 220 int nbytes;
214 221
@@ -218,13 +225,51 @@ static int shash_async_update(struct ahash_request *req)
218 225
219 return nbytes; 226 return nbytes;
220} 227}
228EXPORT_SYMBOL_GPL(shash_ahash_update);
229
230static int shash_async_update(struct ahash_request *req)
231{
232 return shash_ahash_update(req, ahash_request_ctx(req));
233}
221 234
222static int shash_async_final(struct ahash_request *req) 235static int shash_async_final(struct ahash_request *req)
223{ 236{
224 return crypto_shash_final(ahash_request_ctx(req), req->result); 237 return crypto_shash_final(ahash_request_ctx(req), req->result);
225} 238}
226 239
227static int shash_async_digest(struct ahash_request *req) 240int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
241{
242 struct crypto_hash_walk walk;
243 int nbytes;
244
245 nbytes = crypto_hash_walk_first(req, &walk);
246 if (!nbytes)
247 return crypto_shash_final(desc, req->result);
248
249 do {
250 nbytes = crypto_hash_walk_last(&walk) ?
251 crypto_shash_finup(desc, walk.data, nbytes,
252 req->result) :
253 crypto_shash_update(desc, walk.data, nbytes);
254 nbytes = crypto_hash_walk_done(&walk, nbytes);
255 } while (nbytes > 0);
256
257 return nbytes;
258}
259EXPORT_SYMBOL_GPL(shash_ahash_finup);
260
261static int shash_async_finup(struct ahash_request *req)
262{
263 struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
264 struct shash_desc *desc = ahash_request_ctx(req);
265
266 desc->tfm = *ctx;
267 desc->flags = req->base.flags;
268
269 return shash_ahash_finup(req, desc);
270}
271
272int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
228{ 273{
229 struct scatterlist *sg = req->src; 274 struct scatterlist *sg = req->src;
230 unsigned int offset = sg->offset; 275 unsigned int offset = sg->offset;
@@ -232,34 +277,40 @@ static int shash_async_digest(struct ahash_request *req)
232 int err; 277 int err;
233 278
234 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 279 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
235 struct crypto_shash **ctx =
236 crypto_ahash_ctx(crypto_ahash_reqtfm(req));
237 struct shash_desc *desc = ahash_request_ctx(req);
238 void *data; 280 void *data;
239 281
240 desc->tfm = *ctx;
241 desc->flags = req->base.flags;
242
243 data = crypto_kmap(sg_page(sg), 0); 282 data = crypto_kmap(sg_page(sg), 0);
244 err = crypto_shash_digest(desc, data + offset, nbytes, 283 err = crypto_shash_digest(desc, data + offset, nbytes,
245 req->result); 284 req->result);
246 crypto_kunmap(data, 0); 285 crypto_kunmap(data, 0);
247 crypto_yield(desc->flags); 286 crypto_yield(desc->flags);
248 goto out; 287 } else
249 } 288 err = crypto_shash_init(desc) ?:
289 shash_ahash_finup(req, desc);
250 290
251 err = shash_async_init(req); 291 return err;
252 if (err) 292}
253 goto out; 293EXPORT_SYMBOL_GPL(shash_ahash_digest);
254 294
255 err = shash_async_update(req); 295static int shash_async_digest(struct ahash_request *req)
256 if (err) 296{
257 goto out; 297 struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
298 struct shash_desc *desc = ahash_request_ctx(req);
258 299
259 err = shash_async_final(req); 300 desc->tfm = *ctx;
301 desc->flags = req->base.flags;
260 302
261out: 303 return shash_ahash_digest(req, desc);
262 return err; 304}
305
306static int shash_async_export(struct ahash_request *req, void *out)
307{
308 return crypto_shash_export(ahash_request_ctx(req), out);
309}
310
311static int shash_async_import(struct ahash_request *req, const void *in)
312{
313 return crypto_shash_import(ahash_request_ctx(req), in);
263} 314}
264 315
265static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm) 316static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
@@ -269,11 +320,11 @@ static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
269 crypto_free_shash(*ctx); 320 crypto_free_shash(*ctx);
270} 321}
271 322
272static int crypto_init_shash_ops_async(struct crypto_tfm *tfm) 323int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
273{ 324{
274 struct crypto_alg *calg = tfm->__crt_alg; 325 struct crypto_alg *calg = tfm->__crt_alg;
275 struct shash_alg *alg = __crypto_shash_alg(calg); 326 struct shash_alg *alg = __crypto_shash_alg(calg);
276 struct ahash_tfm *crt = &tfm->crt_ahash; 327 struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
277 struct crypto_shash **ctx = crypto_tfm_ctx(tfm); 328 struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
278 struct crypto_shash *shash; 329 struct crypto_shash *shash;
279 330
@@ -291,11 +342,17 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
291 342
292 crt->init = shash_async_init; 343 crt->init = shash_async_init;
293 crt->update = shash_async_update; 344 crt->update = shash_async_update;
294 crt->final = shash_async_final; 345 crt->final = shash_async_final;
346 crt->finup = shash_async_finup;
295 crt->digest = shash_async_digest; 347 crt->digest = shash_async_digest;
296 crt->setkey = shash_async_setkey;
297 348
298 crt->digestsize = alg->digestsize; 349 if (alg->setkey)
350 crt->setkey = shash_async_setkey;
351 if (alg->export)
352 crt->export = shash_async_export;
353 if (alg->import)
354 crt->import = shash_async_import;
355
299 crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); 356 crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
300 357
301 return 0; 358 return 0;
@@ -304,14 +361,16 @@ static int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
304static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key, 361static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
305 unsigned int keylen) 362 unsigned int keylen)
306{ 363{
307 struct shash_desc *desc = crypto_hash_ctx(tfm); 364 struct shash_desc **descp = crypto_hash_ctx(tfm);
365 struct shash_desc *desc = *descp;
308 366
309 return crypto_shash_setkey(desc->tfm, key, keylen); 367 return crypto_shash_setkey(desc->tfm, key, keylen);
310} 368}
311 369
312static int shash_compat_init(struct hash_desc *hdesc) 370static int shash_compat_init(struct hash_desc *hdesc)
313{ 371{
314 struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); 372 struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
373 struct shash_desc *desc = *descp;
315 374
316 desc->flags = hdesc->flags; 375 desc->flags = hdesc->flags;
317 376
@@ -321,7 +380,8 @@ static int shash_compat_init(struct hash_desc *hdesc)
321static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg, 380static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
322 unsigned int len) 381 unsigned int len)
323{ 382{
324 struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); 383 struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
384 struct shash_desc *desc = *descp;
325 struct crypto_hash_walk walk; 385 struct crypto_hash_walk walk;
326 int nbytes; 386 int nbytes;
327 387
@@ -334,7 +394,9 @@ static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
334 394
335static int shash_compat_final(struct hash_desc *hdesc, u8 *out) 395static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
336{ 396{
337 return crypto_shash_final(crypto_hash_ctx(hdesc->tfm), out); 397 struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
398
399 return crypto_shash_final(*descp, out);
338} 400}
339 401
340static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg, 402static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
@@ -344,7 +406,8 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
344 int err; 406 int err;
345 407
346 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 408 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
347 struct shash_desc *desc = crypto_hash_ctx(hdesc->tfm); 409 struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
410 struct shash_desc *desc = *descp;
348 void *data; 411 void *data;
349 412
350 desc->flags = hdesc->flags; 413 desc->flags = hdesc->flags;
@@ -372,9 +435,11 @@ out:
372 435
373static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm) 436static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
374{ 437{
375 struct shash_desc *desc= crypto_tfm_ctx(tfm); 438 struct shash_desc **descp = crypto_tfm_ctx(tfm);
439 struct shash_desc *desc = *descp;
376 440
377 crypto_free_shash(desc->tfm); 441 crypto_free_shash(desc->tfm);
442 kzfree(desc);
378} 443}
379 444
380static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm) 445static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
@@ -382,8 +447,9 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
382 struct hash_tfm *crt = &tfm->crt_hash; 447 struct hash_tfm *crt = &tfm->crt_hash;
383 struct crypto_alg *calg = tfm->__crt_alg; 448 struct crypto_alg *calg = tfm->__crt_alg;
384 struct shash_alg *alg = __crypto_shash_alg(calg); 449 struct shash_alg *alg = __crypto_shash_alg(calg);
385 struct shash_desc *desc = crypto_tfm_ctx(tfm); 450 struct shash_desc **descp = crypto_tfm_ctx(tfm);
386 struct crypto_shash *shash; 451 struct crypto_shash *shash;
452 struct shash_desc *desc;
387 453
388 if (!crypto_mod_get(calg)) 454 if (!crypto_mod_get(calg))
389 return -EAGAIN; 455 return -EAGAIN;
@@ -394,6 +460,14 @@ static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
394 return PTR_ERR(shash); 460 return PTR_ERR(shash);
395 } 461 }
396 462
463 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
464 GFP_KERNEL);
465 if (!desc) {
466 crypto_free_shash(shash);
467 return -ENOMEM;
468 }
469
470 *descp = desc;
397 desc->tfm = shash; 471 desc->tfm = shash;
398 tfm->exit = crypto_exit_shash_ops_compat; 472 tfm->exit = crypto_exit_shash_ops_compat;
399 473
@@ -413,8 +487,6 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
413 switch (mask & CRYPTO_ALG_TYPE_MASK) { 487 switch (mask & CRYPTO_ALG_TYPE_MASK) {
414 case CRYPTO_ALG_TYPE_HASH_MASK: 488 case CRYPTO_ALG_TYPE_HASH_MASK:
415 return crypto_init_shash_ops_compat(tfm); 489 return crypto_init_shash_ops_compat(tfm);
416 case CRYPTO_ALG_TYPE_AHASH_MASK:
417 return crypto_init_shash_ops_async(tfm);
418 } 490 }
419 491
420 return -EINVAL; 492 return -EINVAL;
@@ -423,26 +495,23 @@ static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
423static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type, 495static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
424 u32 mask) 496 u32 mask)
425{ 497{
426 struct shash_alg *salg = __crypto_shash_alg(alg);
427
428 switch (mask & CRYPTO_ALG_TYPE_MASK) { 498 switch (mask & CRYPTO_ALG_TYPE_MASK) {
429 case CRYPTO_ALG_TYPE_HASH_MASK: 499 case CRYPTO_ALG_TYPE_HASH_MASK:
430 return sizeof(struct shash_desc) + salg->descsize; 500 return sizeof(struct shash_desc *);
431 case CRYPTO_ALG_TYPE_AHASH_MASK:
432 return sizeof(struct crypto_shash *);
433 } 501 }
434 502
435 return 0; 503 return 0;
436} 504}
437 505
438static int crypto_shash_init_tfm(struct crypto_tfm *tfm, 506static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
439 const struct crypto_type *frontend)
440{ 507{
508 struct crypto_shash *hash = __crypto_shash_cast(tfm);
509
510 hash->descsize = crypto_shash_alg(hash)->descsize;
441 return 0; 511 return 0;
442} 512}
443 513
444static unsigned int crypto_shash_extsize(struct crypto_alg *alg, 514static unsigned int crypto_shash_extsize(struct crypto_alg *alg)
445 const struct crypto_type *frontend)
446{ 515{
447 return alg->cra_ctxsize; 516 return alg->cra_ctxsize;
448} 517}
@@ -456,7 +525,6 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
456 seq_printf(m, "type : shash\n"); 525 seq_printf(m, "type : shash\n");
457 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 526 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
458 seq_printf(m, "digestsize : %u\n", salg->digestsize); 527 seq_printf(m, "digestsize : %u\n", salg->digestsize);
459 seq_printf(m, "descsize : %u\n", salg->descsize);
460} 528}
461 529
462static const struct crypto_type crypto_shash_type = { 530static const struct crypto_type crypto_shash_type = {
@@ -480,18 +548,43 @@ struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
480} 548}
481EXPORT_SYMBOL_GPL(crypto_alloc_shash); 549EXPORT_SYMBOL_GPL(crypto_alloc_shash);
482 550
483int crypto_register_shash(struct shash_alg *alg) 551static int shash_prepare_alg(struct shash_alg *alg)
484{ 552{
485 struct crypto_alg *base = &alg->base; 553 struct crypto_alg *base = &alg->base;
486 554
487 if (alg->digestsize > PAGE_SIZE / 8 || 555 if (alg->digestsize > PAGE_SIZE / 8 ||
488 alg->descsize > PAGE_SIZE / 8) 556 alg->descsize > PAGE_SIZE / 8 ||
557 alg->statesize > PAGE_SIZE / 8)
489 return -EINVAL; 558 return -EINVAL;
490 559
491 base->cra_type = &crypto_shash_type; 560 base->cra_type = &crypto_shash_type;
492 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 561 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
493 base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; 562 base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
494 563
564 if (!alg->finup)
565 alg->finup = shash_finup_unaligned;
566 if (!alg->digest)
567 alg->digest = shash_digest_unaligned;
568 if (!alg->export) {
569 alg->export = shash_default_export;
570 alg->import = shash_default_import;
571 alg->statesize = alg->descsize;
572 }
573 if (!alg->setkey)
574 alg->setkey = shash_no_setkey;
575
576 return 0;
577}
578
579int crypto_register_shash(struct shash_alg *alg)
580{
581 struct crypto_alg *base = &alg->base;
582 int err;
583
584 err = shash_prepare_alg(alg);
585 if (err)
586 return err;
587
495 return crypto_register_alg(base); 588 return crypto_register_alg(base);
496} 589}
497EXPORT_SYMBOL_GPL(crypto_register_shash); 590EXPORT_SYMBOL_GPL(crypto_register_shash);
@@ -502,5 +595,44 @@ int crypto_unregister_shash(struct shash_alg *alg)
502} 595}
503EXPORT_SYMBOL_GPL(crypto_unregister_shash); 596EXPORT_SYMBOL_GPL(crypto_unregister_shash);
504 597
598int shash_register_instance(struct crypto_template *tmpl,
599 struct shash_instance *inst)
600{
601 int err;
602
603 err = shash_prepare_alg(&inst->alg);
604 if (err)
605 return err;
606
607 return crypto_register_instance(tmpl, shash_crypto_instance(inst));
608}
609EXPORT_SYMBOL_GPL(shash_register_instance);
610
611void shash_free_instance(struct crypto_instance *inst)
612{
613 crypto_drop_spawn(crypto_instance_ctx(inst));
614 kfree(shash_instance(inst));
615}
616EXPORT_SYMBOL_GPL(shash_free_instance);
617
618int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
619 struct shash_alg *alg,
620 struct crypto_instance *inst)
621{
622 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
623 &crypto_shash_type);
624}
625EXPORT_SYMBOL_GPL(crypto_init_shash_spawn);
626
627struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
628{
629 struct crypto_alg *alg;
630
631 alg = crypto_attr_alg2(rta, &crypto_shash_type, type, mask);
632 return IS_ERR(alg) ? ERR_CAST(alg) :
633 container_of(alg, struct shash_alg, base);
634}
635EXPORT_SYMBOL_GPL(shash_attr_alg);
636
505MODULE_LICENSE("GPL"); 637MODULE_LICENSE("GPL");
506MODULE_DESCRIPTION("Synchronous cryptographic hash type"); 638MODULE_DESCRIPTION("Synchronous cryptographic hash type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d59ba5079d14..aa3f84ccc786 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -45,6 +45,9 @@
45 */ 45 */
46static unsigned int sec; 46static unsigned int sec;
47 47
48static char *alg = NULL;
49static u32 type;
50static u32 mask;
48static int mode; 51static int mode;
49static char *tvmem[TVMEMSIZE]; 52static char *tvmem[TVMEMSIZE];
50 53
@@ -716,6 +719,10 @@ static int do_test(int m)
716 ret += tcrypt_test("hmac(rmd160)"); 719 ret += tcrypt_test("hmac(rmd160)");
717 break; 720 break;
718 721
722 case 109:
723 ret += tcrypt_test("vmac(aes)");
724 break;
725
719 case 150: 726 case 150:
720 ret += tcrypt_test("ansi_cprng"); 727 ret += tcrypt_test("ansi_cprng");
721 break; 728 break;
@@ -885,6 +892,12 @@ static int do_test(int m)
885 return ret; 892 return ret;
886} 893}
887 894
895static int do_alg_test(const char *alg, u32 type, u32 mask)
896{
897 return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
898 0 : -ENOENT;
899}
900
888static int __init tcrypt_mod_init(void) 901static int __init tcrypt_mod_init(void)
889{ 902{
890 int err = -ENOMEM; 903 int err = -ENOMEM;
@@ -896,7 +909,11 @@ static int __init tcrypt_mod_init(void)
896 goto err_free_tv; 909 goto err_free_tv;
897 } 910 }
898 911
899 err = do_test(mode); 912 if (alg)
913 err = do_alg_test(alg, type, mask);
914 else
915 err = do_test(mode);
916
900 if (err) { 917 if (err) {
901 printk(KERN_ERR "tcrypt: one or more tests failed!\n"); 918 printk(KERN_ERR "tcrypt: one or more tests failed!\n");
902 goto err_free_tv; 919 goto err_free_tv;
@@ -928,6 +945,9 @@ static void __exit tcrypt_mod_fini(void) { }
928module_init(tcrypt_mod_init); 945module_init(tcrypt_mod_init);
929module_exit(tcrypt_mod_fini); 946module_exit(tcrypt_mod_fini);
930 947
948module_param(alg, charp, 0);
949module_param(type, uint, 0);
950module_param(mask, uint, 0);
931module_param(mode, int, 0); 951module_param(mode, int, 0);
932module_param(sec, uint, 0); 952module_param(sec, uint, 0);
933MODULE_PARM_DESC(sec, "Length in seconds of speed tests " 953MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index e9e9d84293b9..6d5b746637be 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -190,10 +190,6 @@ static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
190 190
191 hash_buff = xbuf[0]; 191 hash_buff = xbuf[0];
192 192
193 ret = -EINVAL;
194 if (WARN_ON(template[i].psize > PAGE_SIZE))
195 goto out;
196
197 memcpy(hash_buff, template[i].plaintext, template[i].psize); 193 memcpy(hash_buff, template[i].plaintext, template[i].psize);
198 sg_init_one(&sg[0], hash_buff, template[i].psize); 194 sg_init_one(&sg[0], hash_buff, template[i].psize);
199 195
@@ -2252,6 +2248,15 @@ static const struct alg_test_desc alg_test_descs[] = {
2252 } 2248 }
2253 } 2249 }
2254 }, { 2250 }, {
2251 .alg = "vmac(aes)",
2252 .test = alg_test_hash,
2253 .suite = {
2254 .hash = {
2255 .vecs = aes_vmac128_tv_template,
2256 .count = VMAC_AES_TEST_VECTORS
2257 }
2258 }
2259 }, {
2255 .alg = "wp256", 2260 .alg = "wp256",
2256 .test = alg_test_hash, 2261 .test = alg_test_hash,
2257 .suite = { 2262 .suite = {
@@ -2348,6 +2353,7 @@ static int alg_find_test(const char *alg)
2348int alg_test(const char *driver, const char *alg, u32 type, u32 mask) 2353int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
2349{ 2354{
2350 int i; 2355 int i;
2356 int j;
2351 int rc; 2357 int rc;
2352 2358
2353 if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { 2359 if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
@@ -2369,14 +2375,22 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
2369 } 2375 }
2370 2376
2371 i = alg_find_test(alg); 2377 i = alg_find_test(alg);
2372 if (i < 0) 2378 j = alg_find_test(driver);
2379 if (i < 0 && j < 0)
2373 goto notest; 2380 goto notest;
2374 2381
2375 if (fips_enabled && !alg_test_descs[i].fips_allowed) 2382 if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
2383 (j >= 0 && !alg_test_descs[j].fips_allowed)))
2376 goto non_fips_alg; 2384 goto non_fips_alg;
2377 2385
2378 rc = alg_test_descs[i].test(alg_test_descs + i, driver, 2386 rc = 0;
2379 type, mask); 2387 if (i >= 0)
2388 rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
2389 type, mask);
2390 if (j >= 0)
2391 rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
2392 type, mask);
2393
2380test_done: 2394test_done:
2381 if (fips_enabled && rc) 2395 if (fips_enabled && rc)
2382 panic("%s: %s alg self test failed in fips mode!\n", driver, alg); 2396 panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 69316228fc19..9963b18983ab 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1654,6 +1654,22 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
1654 } 1654 }
1655}; 1655};
1656 1656
1657#define VMAC_AES_TEST_VECTORS 1
1658static char vmac_string[128] = {'\x01', '\x01', '\x01', '\x01',
1659 '\x02', '\x03', '\x02', '\x02',
1660 '\x02', '\x04', '\x01', '\x07',
1661 '\x04', '\x01', '\x04', '\x03',};
1662static struct hash_testvec aes_vmac128_tv_template[] = {
1663 {
1664 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
1665 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
1666 .plaintext = vmac_string,
1667 .digest = "\xcb\xd7\x8a\xfd\xb7\x33\x79\xe7",
1668 .psize = 128,
1669 .ksize = 16,
1670 },
1671};
1672
1657/* 1673/*
1658 * SHA384 HMAC test vectors from RFC4231 1674 * SHA384 HMAC test vectors from RFC4231
1659 */ 1675 */
diff --git a/crypto/vmac.c b/crypto/vmac.c
new file mode 100644
index 000000000000..0a9468e575de
--- /dev/null
+++ b/crypto/vmac.c
@@ -0,0 +1,678 @@
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19/* --------------------------------------------------------------------------
20 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
21 * This implementation is herby placed in the public domain.
22 * The authors offers no warranty. Use at your own risk.
23 * Please send bug reports to the authors.
24 * Last modified: 17 APR 08, 1700 PDT
25 * ----------------------------------------------------------------------- */
26
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/crypto.h>
30#include <linux/scatterlist.h>
31#include <asm/byteorder.h>
32#include <crypto/scatterwalk.h>
33#include <crypto/vmac.h>
34#include <crypto/internal/hash.h>
35
36/*
37 * Constants and masks
38 */
39#define UINT64_C(x) x##ULL
40const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
41const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
42const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
43const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
44const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
45
46#ifdef __LITTLE_ENDIAN
47#define INDEX_HIGH 1
48#define INDEX_LOW 0
49#else
50#define INDEX_HIGH 0
51#define INDEX_LOW 1
52#endif
53
54/*
55 * The following routines are used in this implementation. They are
56 * written via macros to simulate zero-overhead call-by-reference.
57 *
58 * MUL64: 64x64->128-bit multiplication
59 * PMUL64: assumes top bits cleared on inputs
60 * ADD128: 128x128->128-bit addition
61 */
62
63#define ADD128(rh, rl, ih, il) \
64 do { \
65 u64 _il = (il); \
66 (rl) += (_il); \
67 if ((rl) < (_il)) \
68 (rh)++; \
69 (rh) += (ih); \
70 } while (0)
71
72#define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
73
74#define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
75 do { \
76 u64 _i1 = (i1), _i2 = (i2); \
77 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
78 rh = MUL32(_i1>>32, _i2>>32); \
79 rl = MUL32(_i1, _i2); \
80 ADD128(rh, rl, (m >> 32), (m << 32)); \
81 } while (0)
82
83#define MUL64(rh, rl, i1, i2) \
84 do { \
85 u64 _i1 = (i1), _i2 = (i2); \
86 u64 m1 = MUL32(_i1, _i2>>32); \
87 u64 m2 = MUL32(_i1>>32, _i2); \
88 rh = MUL32(_i1>>32, _i2>>32); \
89 rl = MUL32(_i1, _i2); \
90 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
91 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
92 } while (0)
93
94/*
95 * For highest performance the L1 NH and L2 polynomial hashes should be
96 * carefully implemented to take advantage of one's target architechture.
97 * Here these two hash functions are defined multiple time; once for
98 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
99 * for the rest (32-bit) architectures.
100 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
101 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
102 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
103 * NH computations at once).
104 */
105
106#ifdef CONFIG_64BIT
107
108#define nh_16(mp, kp, nw, rh, rl) \
109 do { \
110 int i; u64 th, tl; \
111 rh = rl = 0; \
112 for (i = 0; i < nw; i += 2) { \
113 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
114 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
115 ADD128(rh, rl, th, tl); \
116 } \
117 } while (0)
118
119#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
120 do { \
121 int i; u64 th, tl; \
122 rh1 = rl1 = rh = rl = 0; \
123 for (i = 0; i < nw; i += 2) { \
124 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
125 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
126 ADD128(rh, rl, th, tl); \
127 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
128 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
129 ADD128(rh1, rl1, th, tl); \
130 } \
131 } while (0)
132
133#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
134#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
135 do { \
136 int i; u64 th, tl; \
137 rh = rl = 0; \
138 for (i = 0; i < nw; i += 8) { \
139 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
140 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
141 ADD128(rh, rl, th, tl); \
142 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
143 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
144 ADD128(rh, rl, th, tl); \
145 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
146 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
147 ADD128(rh, rl, th, tl); \
148 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
149 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
150 ADD128(rh, rl, th, tl); \
151 } \
152 } while (0)
153
154#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
155 do { \
156 int i; u64 th, tl; \
157 rh1 = rl1 = rh = rl = 0; \
158 for (i = 0; i < nw; i += 8) { \
159 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i], \
160 le64_to_cpup((mp)+i+1)+(kp)[i+1]); \
161 ADD128(rh, rl, th, tl); \
162 MUL64(th, tl, le64_to_cpup((mp)+i)+(kp)[i+2], \
163 le64_to_cpup((mp)+i+1)+(kp)[i+3]); \
164 ADD128(rh1, rl1, th, tl); \
165 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+2], \
166 le64_to_cpup((mp)+i+3)+(kp)[i+3]); \
167 ADD128(rh, rl, th, tl); \
168 MUL64(th, tl, le64_to_cpup((mp)+i+2)+(kp)[i+4], \
169 le64_to_cpup((mp)+i+3)+(kp)[i+5]); \
170 ADD128(rh1, rl1, th, tl); \
171 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+4], \
172 le64_to_cpup((mp)+i+5)+(kp)[i+5]); \
173 ADD128(rh, rl, th, tl); \
174 MUL64(th, tl, le64_to_cpup((mp)+i+4)+(kp)[i+6], \
175 le64_to_cpup((mp)+i+5)+(kp)[i+7]); \
176 ADD128(rh1, rl1, th, tl); \
177 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+6], \
178 le64_to_cpup((mp)+i+7)+(kp)[i+7]); \
179 ADD128(rh, rl, th, tl); \
180 MUL64(th, tl, le64_to_cpup((mp)+i+6)+(kp)[i+8], \
181 le64_to_cpup((mp)+i+7)+(kp)[i+9]); \
182 ADD128(rh1, rl1, th, tl); \
183 } \
184 } while (0)
185#endif
186
187#define poly_step(ah, al, kh, kl, mh, ml) \
188 do { \
189 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
190 /* compute ab*cd, put bd into result registers */ \
191 PMUL64(t3h, t3l, al, kh); \
192 PMUL64(t2h, t2l, ah, kl); \
193 PMUL64(t1h, t1l, ah, 2*kh); \
194 PMUL64(ah, al, al, kl); \
195 /* add 2 * ac to result */ \
196 ADD128(ah, al, t1h, t1l); \
197 /* add together ad + bc */ \
198 ADD128(t2h, t2l, t3h, t3l); \
199 /* now (ah,al), (t2l,2*t2h) need summing */ \
200 /* first add the high registers, carrying into t2h */ \
201 ADD128(t2h, ah, z, t2l); \
202 /* double t2h and add top bit of ah */ \
203 t2h = 2 * t2h + (ah >> 63); \
204 ah &= m63; \
205 /* now add the low registers */ \
206 ADD128(ah, al, mh, ml); \
207 ADD128(ah, al, z, t2h); \
208 } while (0)
209
210#else /* ! CONFIG_64BIT */
211
212#ifndef nh_16
213#define nh_16(mp, kp, nw, rh, rl) \
214 do { \
215 u64 t1, t2, m1, m2, t; \
216 int i; \
217 rh = rl = t = 0; \
218 for (i = 0; i < nw; i += 2) { \
219 t1 = le64_to_cpup(mp+i) + kp[i]; \
220 t2 = le64_to_cpup(mp+i+1) + kp[i+1]; \
221 m2 = MUL32(t1 >> 32, t2); \
222 m1 = MUL32(t1, t2 >> 32); \
223 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
224 MUL32(t1, t2)); \
225 rh += (u64)(u32)(m1 >> 32) \
226 + (u32)(m2 >> 32); \
227 t += (u64)(u32)m1 + (u32)m2; \
228 } \
229 ADD128(rh, rl, (t >> 32), (t << 32)); \
230 } while (0)
231#endif
232
233static void poly_step_func(u64 *ahi, u64 *alo,
234 const u64 *kh, const u64 *kl,
235 const u64 *mh, const u64 *ml)
236{
237#define a0 (*(((u32 *)alo)+INDEX_LOW))
238#define a1 (*(((u32 *)alo)+INDEX_HIGH))
239#define a2 (*(((u32 *)ahi)+INDEX_LOW))
240#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
241#define k0 (*(((u32 *)kl)+INDEX_LOW))
242#define k1 (*(((u32 *)kl)+INDEX_HIGH))
243#define k2 (*(((u32 *)kh)+INDEX_LOW))
244#define k3 (*(((u32 *)kh)+INDEX_HIGH))
245
246 u64 p, q, t;
247 u32 t2;
248
249 p = MUL32(a3, k3);
250 p += p;
251 p += *(u64 *)mh;
252 p += MUL32(a0, k2);
253 p += MUL32(a1, k1);
254 p += MUL32(a2, k0);
255 t = (u32)(p);
256 p >>= 32;
257 p += MUL32(a0, k3);
258 p += MUL32(a1, k2);
259 p += MUL32(a2, k1);
260 p += MUL32(a3, k0);
261 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
262 p >>= 31;
263 p += (u64)(((u32 *)ml)[INDEX_LOW]);
264 p += MUL32(a0, k0);
265 q = MUL32(a1, k3);
266 q += MUL32(a2, k2);
267 q += MUL32(a3, k1);
268 q += q;
269 p += q;
270 t2 = (u32)(p);
271 p >>= 32;
272 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
273 p += MUL32(a0, k1);
274 p += MUL32(a1, k0);
275 q = MUL32(a2, k3);
276 q += MUL32(a3, k2);
277 q += q;
278 p += q;
279 *(u64 *)(alo) = (p << 32) | t2;
280 p >>= 32;
281 *(u64 *)(ahi) = p + t;
282
283#undef a0
284#undef a1
285#undef a2
286#undef a3
287#undef k0
288#undef k1
289#undef k2
290#undef k3
291}
292
293#define poly_step(ah, al, kh, kl, mh, ml) \
294 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
295
296#endif /* end of specialized NH and poly definitions */
297
298/* At least nh_16 is defined. Defined others as needed here */
299#ifndef nh_16_2
300#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
301 do { \
302 nh_16(mp, kp, nw, rh, rl); \
303 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
304 } while (0)
305#endif
306#ifndef nh_vmac_nhbytes
307#define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
308 nh_16(mp, kp, nw, rh, rl)
309#endif
310#ifndef nh_vmac_nhbytes_2
311#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
312 do { \
313 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
314 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
315 } while (0)
316#endif
317
318static void vhash_abort(struct vmac_ctx *ctx)
319{
320 ctx->polytmp[0] = ctx->polykey[0] ;
321 ctx->polytmp[1] = ctx->polykey[1] ;
322 ctx->first_block_processed = 0;
323}
324
325static u64 l3hash(u64 p1, u64 p2,
326 u64 k1, u64 k2, u64 len)
327{
328 u64 rh, rl, t, z = 0;
329
330 /* fully reduce (p1,p2)+(len,0) mod p127 */
331 t = p1 >> 63;
332 p1 &= m63;
333 ADD128(p1, p2, len, t);
334 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
335 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
336 ADD128(p1, p2, z, t);
337 p1 &= m63;
338
339 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
340 t = p1 + (p2 >> 32);
341 t += (t >> 32);
342 t += (u32)t > 0xfffffffeu;
343 p1 += (t >> 32);
344 p2 += (p1 << 32);
345
346 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
347 p1 += k1;
348 p1 += (0 - (p1 < k1)) & 257;
349 p2 += k2;
350 p2 += (0 - (p2 < k2)) & 257;
351
352 /* compute (p1+k1)*(p2+k2)%p64 */
353 MUL64(rh, rl, p1, p2);
354 t = rh >> 56;
355 ADD128(t, rl, z, rh);
356 rh <<= 8;
357 ADD128(t, rl, z, rh);
358 t += t << 8;
359 rl += t;
360 rl += (0 - (rl < t)) & 257;
361 rl += (0 - (rl > p64-1)) & 257;
362 return rl;
363}
364
365static void vhash_update(const unsigned char *m,
366 unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
367 struct vmac_ctx *ctx)
368{
369 u64 rh, rl, *mptr;
370 const u64 *kptr = (u64 *)ctx->nhkey;
371 int i;
372 u64 ch, cl;
373 u64 pkh = ctx->polykey[0];
374 u64 pkl = ctx->polykey[1];
375
376 mptr = (u64 *)m;
377 i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
378
379 ch = ctx->polytmp[0];
380 cl = ctx->polytmp[1];
381
382 if (!ctx->first_block_processed) {
383 ctx->first_block_processed = 1;
384 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
385 rh &= m62;
386 ADD128(ch, cl, rh, rl);
387 mptr += (VMAC_NHBYTES/sizeof(u64));
388 i--;
389 }
390
391 while (i--) {
392 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
393 rh &= m62;
394 poly_step(ch, cl, pkh, pkl, rh, rl);
395 mptr += (VMAC_NHBYTES/sizeof(u64));
396 }
397
398 ctx->polytmp[0] = ch;
399 ctx->polytmp[1] = cl;
400}
401
402static u64 vhash(unsigned char m[], unsigned int mbytes,
403 u64 *tagl, struct vmac_ctx *ctx)
404{
405 u64 rh, rl, *mptr;
406 const u64 *kptr = (u64 *)ctx->nhkey;
407 int i, remaining;
408 u64 ch, cl;
409 u64 pkh = ctx->polykey[0];
410 u64 pkl = ctx->polykey[1];
411
412 mptr = (u64 *)m;
413 i = mbytes / VMAC_NHBYTES;
414 remaining = mbytes % VMAC_NHBYTES;
415
416 if (ctx->first_block_processed) {
417 ch = ctx->polytmp[0];
418 cl = ctx->polytmp[1];
419 } else if (i) {
420 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
421 ch &= m62;
422 ADD128(ch, cl, pkh, pkl);
423 mptr += (VMAC_NHBYTES/sizeof(u64));
424 i--;
425 } else if (remaining) {
426 nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
427 ch &= m62;
428 ADD128(ch, cl, pkh, pkl);
429 mptr += (VMAC_NHBYTES/sizeof(u64));
430 goto do_l3;
431 } else {/* Empty String */
432 ch = pkh; cl = pkl;
433 goto do_l3;
434 }
435
436 while (i--) {
437 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
438 rh &= m62;
439 poly_step(ch, cl, pkh, pkl, rh, rl);
440 mptr += (VMAC_NHBYTES/sizeof(u64));
441 }
442 if (remaining) {
443 nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
444 rh &= m62;
445 poly_step(ch, cl, pkh, pkl, rh, rl);
446 }
447
448do_l3:
449 vhash_abort(ctx);
450 remaining *= 8;
451 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
452}
453
454static u64 vmac(unsigned char m[], unsigned int mbytes,
455 unsigned char n[16], u64 *tagl,
456 struct vmac_ctx_t *ctx)
457{
458 u64 *in_n, *out_p;
459 u64 p, h;
460 int i;
461
462 in_n = ctx->__vmac_ctx.cached_nonce;
463 out_p = ctx->__vmac_ctx.cached_aes;
464
465 i = n[15] & 1;
466 if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
467 in_n[0] = *(u64 *)(n);
468 in_n[1] = *(u64 *)(n+8);
469 ((unsigned char *)in_n)[15] &= 0xFE;
470 crypto_cipher_encrypt_one(ctx->child,
471 (unsigned char *)out_p, (unsigned char *)in_n);
472
473 ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
474 }
475 p = be64_to_cpup(out_p + i);
476 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
477 return p + h;
478}
479
480static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
481{
482 u64 in[2] = {0}, out[2];
483 unsigned i;
484 int err = 0;
485
486 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
487 if (err)
488 return err;
489
490 /* Fill nh key */
491 ((unsigned char *)in)[0] = 0x80;
492 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
493 crypto_cipher_encrypt_one(ctx->child,
494 (unsigned char *)out, (unsigned char *)in);
495 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
496 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
497 ((unsigned char *)in)[15] += 1;
498 }
499
500 /* Fill poly key */
501 ((unsigned char *)in)[0] = 0xC0;
502 in[1] = 0;
503 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
504 crypto_cipher_encrypt_one(ctx->child,
505 (unsigned char *)out, (unsigned char *)in);
506 ctx->__vmac_ctx.polytmp[i] =
507 ctx->__vmac_ctx.polykey[i] =
508 be64_to_cpup(out) & mpoly;
509 ctx->__vmac_ctx.polytmp[i+1] =
510 ctx->__vmac_ctx.polykey[i+1] =
511 be64_to_cpup(out+1) & mpoly;
512 ((unsigned char *)in)[15] += 1;
513 }
514
515 /* Fill ip key */
516 ((unsigned char *)in)[0] = 0xE0;
517 in[1] = 0;
518 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
519 do {
520 crypto_cipher_encrypt_one(ctx->child,
521 (unsigned char *)out, (unsigned char *)in);
522 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
523 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
524 ((unsigned char *)in)[15] += 1;
525 } while (ctx->__vmac_ctx.l3key[i] >= p64
526 || ctx->__vmac_ctx.l3key[i+1] >= p64);
527 }
528
529 /* Invalidate nonce/aes cache and reset other elements */
530 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
531 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
532 ctx->__vmac_ctx.first_block_processed = 0;
533
534 return err;
535}
536
537static int vmac_setkey(struct crypto_shash *parent,
538 const u8 *key, unsigned int keylen)
539{
540 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
541
542 if (keylen != VMAC_KEY_LEN) {
543 crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
544 return -EINVAL;
545 }
546
547 return vmac_set_key((u8 *)key, ctx);
548}
549
550static int vmac_init(struct shash_desc *pdesc)
551{
552 struct crypto_shash *parent = pdesc->tfm;
553 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
554
555 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
556 return 0;
557}
558
559static int vmac_update(struct shash_desc *pdesc, const u8 *p,
560 unsigned int len)
561{
562 struct crypto_shash *parent = pdesc->tfm;
563 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
564
565 vhash_update(p, len, &ctx->__vmac_ctx);
566
567 return 0;
568}
569
570static int vmac_final(struct shash_desc *pdesc, u8 *out)
571{
572 struct crypto_shash *parent = pdesc->tfm;
573 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
574 vmac_t mac;
575 u8 nonce[16] = {};
576
577 mac = vmac(NULL, 0, nonce, NULL, ctx);
578 memcpy(out, &mac, sizeof(vmac_t));
579 memset(&mac, 0, sizeof(vmac_t));
580 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
581 return 0;
582}
583
584static int vmac_init_tfm(struct crypto_tfm *tfm)
585{
586 struct crypto_cipher *cipher;
587 struct crypto_instance *inst = (void *)tfm->__crt_alg;
588 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
589 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
590
591 cipher = crypto_spawn_cipher(spawn);
592 if (IS_ERR(cipher))
593 return PTR_ERR(cipher);
594
595 ctx->child = cipher;
596 return 0;
597}
598
599static void vmac_exit_tfm(struct crypto_tfm *tfm)
600{
601 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
602 crypto_free_cipher(ctx->child);
603}
604
605static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
606{
607 struct shash_instance *inst;
608 struct crypto_alg *alg;
609 int err;
610
611 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
612 if (err)
613 return err;
614
615 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
616 CRYPTO_ALG_TYPE_MASK);
617 if (IS_ERR(alg))
618 return PTR_ERR(alg);
619
620 inst = shash_alloc_instance("vmac", alg);
621 err = PTR_ERR(inst);
622 if (IS_ERR(inst))
623 goto out_put_alg;
624
625 err = crypto_init_spawn(shash_instance_ctx(inst), alg,
626 shash_crypto_instance(inst),
627 CRYPTO_ALG_TYPE_MASK);
628 if (err)
629 goto out_free_inst;
630
631 inst->alg.base.cra_priority = alg->cra_priority;
632 inst->alg.base.cra_blocksize = alg->cra_blocksize;
633 inst->alg.base.cra_alignmask = alg->cra_alignmask;
634
635 inst->alg.digestsize = sizeof(vmac_t);
636 inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
637 inst->alg.base.cra_init = vmac_init_tfm;
638 inst->alg.base.cra_exit = vmac_exit_tfm;
639
640 inst->alg.init = vmac_init;
641 inst->alg.update = vmac_update;
642 inst->alg.final = vmac_final;
643 inst->alg.setkey = vmac_setkey;
644
645 err = shash_register_instance(tmpl, inst);
646 if (err) {
647out_free_inst:
648 shash_free_instance(shash_crypto_instance(inst));
649 }
650
651out_put_alg:
652 crypto_mod_put(alg);
653 return err;
654}
655
656static struct crypto_template vmac_tmpl = {
657 .name = "vmac",
658 .create = vmac_create,
659 .free = shash_free_instance,
660 .module = THIS_MODULE,
661};
662
663static int __init vmac_module_init(void)
664{
665 return crypto_register_template(&vmac_tmpl);
666}
667
668static void __exit vmac_module_exit(void)
669{
670 crypto_unregister_template(&vmac_tmpl);
671}
672
673module_init(vmac_module_init);
674module_exit(vmac_module_exit);
675
676MODULE_LICENSE("GPL");
677MODULE_DESCRIPTION("VMAC hash algorithm");
678
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index b63b633e549c..bb7b67fba349 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -19,211 +19,142 @@
19 * Kazunori Miyazawa <miyazawa@linux-ipv6.org> 19 * Kazunori Miyazawa <miyazawa@linux-ipv6.org>
20 */ 20 */
21 21
22#include <crypto/scatterwalk.h> 22#include <crypto/internal/hash.h>
23#include <linux/crypto.h>
24#include <linux/err.h> 23#include <linux/err.h>
25#include <linux/hardirq.h>
26#include <linux/kernel.h> 24#include <linux/kernel.h>
27#include <linux/mm.h>
28#include <linux/rtnetlink.h>
29#include <linux/slab.h>
30#include <linux/scatterlist.h>
31 25
32static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101, 26static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
33 0x02020202, 0x02020202, 0x02020202, 0x02020202, 27 0x02020202, 0x02020202, 0x02020202, 0x02020202,
34 0x03030303, 0x03030303, 0x03030303, 0x03030303}; 28 0x03030303, 0x03030303, 0x03030303, 0x03030303};
29
35/* 30/*
36 * +------------------------ 31 * +------------------------
37 * | <parent tfm> 32 * | <parent tfm>
38 * +------------------------ 33 * +------------------------
39 * | crypto_xcbc_ctx 34 * | xcbc_tfm_ctx
40 * +------------------------ 35 * +------------------------
41 * | odds (block size) 36 * | consts (block size * 2)
42 * +------------------------ 37 * +------------------------
43 * | prev (block size) 38 */
39struct xcbc_tfm_ctx {
40 struct crypto_cipher *child;
41 u8 ctx[];
42};
43
44/*
44 * +------------------------ 45 * +------------------------
45 * | key (block size) 46 * | <shash desc>
46 * +------------------------ 47 * +------------------------
47 * | consts (block size * 3) 48 * | xcbc_desc_ctx
49 * +------------------------
50 * | odds (block size)
51 * +------------------------
52 * | prev (block size)
48 * +------------------------ 53 * +------------------------
49 */ 54 */
50struct crypto_xcbc_ctx { 55struct xcbc_desc_ctx {
51 struct crypto_cipher *child;
52 u8 *odds;
53 u8 *prev;
54 u8 *key;
55 u8 *consts;
56 void (*xor)(u8 *a, const u8 *b, unsigned int bs);
57 unsigned int keylen;
58 unsigned int len; 56 unsigned int len;
57 u8 ctx[];
59}; 58};
60 59
61static void xor_128(u8 *a, const u8 *b, unsigned int bs) 60static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
62{ 61 const u8 *inkey, unsigned int keylen)
63 ((u32 *)a)[0] ^= ((u32 *)b)[0];
64 ((u32 *)a)[1] ^= ((u32 *)b)[1];
65 ((u32 *)a)[2] ^= ((u32 *)b)[2];
66 ((u32 *)a)[3] ^= ((u32 *)b)[3];
67}
68
69static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
70 struct crypto_xcbc_ctx *ctx)
71{ 62{
72 int bs = crypto_hash_blocksize(parent); 63 unsigned long alignmask = crypto_shash_alignmask(parent);
64 struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
65 int bs = crypto_shash_blocksize(parent);
66 u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
73 int err = 0; 67 int err = 0;
74 u8 key1[bs]; 68 u8 key1[bs];
75 69
76 if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen))) 70 if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
77 return err; 71 return err;
78 72
79 crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts); 73 crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs);
74 crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2);
75 crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks);
80 76
81 return crypto_cipher_setkey(ctx->child, key1, bs); 77 return crypto_cipher_setkey(ctx->child, key1, bs);
82}
83
84static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
85 const u8 *inkey, unsigned int keylen)
86{
87 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
88
89 if (keylen != crypto_cipher_blocksize(ctx->child))
90 return -EINVAL;
91 78
92 ctx->keylen = keylen;
93 memcpy(ctx->key, inkey, keylen);
94 ctx->consts = (u8*)ks;
95
96 return _crypto_xcbc_digest_setkey(parent, ctx);
97} 79}
98 80
99static int crypto_xcbc_digest_init(struct hash_desc *pdesc) 81static int crypto_xcbc_digest_init(struct shash_desc *pdesc)
100{ 82{
101 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(pdesc->tfm); 83 unsigned long alignmask = crypto_shash_alignmask(pdesc->tfm);
102 int bs = crypto_hash_blocksize(pdesc->tfm); 84 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
85 int bs = crypto_shash_blocksize(pdesc->tfm);
86 u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs;
103 87
104 ctx->len = 0; 88 ctx->len = 0;
105 memset(ctx->odds, 0, bs); 89 memset(prev, 0, bs);
106 memset(ctx->prev, 0, bs);
107 90
108 return 0; 91 return 0;
109} 92}
110 93
111static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, 94static int crypto_xcbc_digest_update(struct shash_desc *pdesc, const u8 *p,
112 struct scatterlist *sg, 95 unsigned int len)
113 unsigned int nbytes)
114{ 96{
115 struct crypto_hash *parent = pdesc->tfm; 97 struct crypto_shash *parent = pdesc->tfm;
116 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent); 98 unsigned long alignmask = crypto_shash_alignmask(parent);
117 struct crypto_cipher *tfm = ctx->child; 99 struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
118 int bs = crypto_hash_blocksize(parent); 100 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
119 101 struct crypto_cipher *tfm = tctx->child;
120 for (;;) { 102 int bs = crypto_shash_blocksize(parent);
121 struct page *pg = sg_page(sg); 103 u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
122 unsigned int offset = sg->offset; 104 u8 *prev = odds + bs;
123 unsigned int slen = sg->length; 105
124 106 /* checking the data can fill the block */
125 if (unlikely(slen > nbytes)) 107 if ((ctx->len + len) <= bs) {
126 slen = nbytes; 108 memcpy(odds + ctx->len, p, len);
127 109 ctx->len += len;
128 nbytes -= slen; 110 return 0;
129
130 while (slen > 0) {
131 unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
132 char *p = crypto_kmap(pg, 0) + offset;
133
134 /* checking the data can fill the block */
135 if ((ctx->len + len) <= bs) {
136 memcpy(ctx->odds + ctx->len, p, len);
137 ctx->len += len;
138 slen -= len;
139
140 /* checking the rest of the page */
141 if (len + offset >= PAGE_SIZE) {
142 offset = 0;
143 pg++;
144 } else
145 offset += len;
146
147 crypto_kunmap(p, 0);
148 crypto_yield(pdesc->flags);
149 continue;
150 }
151
152 /* filling odds with new data and encrypting it */
153 memcpy(ctx->odds + ctx->len, p, bs - ctx->len);
154 len -= bs - ctx->len;
155 p += bs - ctx->len;
156
157 ctx->xor(ctx->prev, ctx->odds, bs);
158 crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
159
160 /* clearing the length */
161 ctx->len = 0;
162
163 /* encrypting the rest of data */
164 while (len > bs) {
165 ctx->xor(ctx->prev, p, bs);
166 crypto_cipher_encrypt_one(tfm, ctx->prev,
167 ctx->prev);
168 p += bs;
169 len -= bs;
170 }
171
172 /* keeping the surplus of blocksize */
173 if (len) {
174 memcpy(ctx->odds, p, len);
175 ctx->len = len;
176 }
177 crypto_kunmap(p, 0);
178 crypto_yield(pdesc->flags);
179 slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
180 offset = 0;
181 pg++;
182 }
183
184 if (!nbytes)
185 break;
186 sg = scatterwalk_sg_next(sg);
187 } 111 }
188 112
189 return 0; 113 /* filling odds with new data and encrypting it */
190} 114 memcpy(odds + ctx->len, p, bs - ctx->len);
115 len -= bs - ctx->len;
116 p += bs - ctx->len;
191 117
192static int crypto_xcbc_digest_update(struct hash_desc *pdesc, 118 crypto_xor(prev, odds, bs);
193 struct scatterlist *sg, 119 crypto_cipher_encrypt_one(tfm, prev, prev);
194 unsigned int nbytes)
195{
196 if (WARN_ON_ONCE(in_irq()))
197 return -EDEADLK;
198 return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
199}
200 120
201static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out) 121 /* clearing the length */
202{ 122 ctx->len = 0;
203 struct crypto_hash *parent = pdesc->tfm;
204 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
205 struct crypto_cipher *tfm = ctx->child;
206 int bs = crypto_hash_blocksize(parent);
207 int err = 0;
208
209 if (ctx->len == bs) {
210 u8 key2[bs];
211 123
212 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) 124 /* encrypting the rest of data */
213 return err; 125 while (len > bs) {
126 crypto_xor(prev, p, bs);
127 crypto_cipher_encrypt_one(tfm, prev, prev);
128 p += bs;
129 len -= bs;
130 }
214 131
215 crypto_cipher_encrypt_one(tfm, key2, 132 /* keeping the surplus of blocksize */
216 (u8 *)(ctx->consts + bs)); 133 if (len) {
134 memcpy(odds, p, len);
135 ctx->len = len;
136 }
217 137
218 ctx->xor(ctx->prev, ctx->odds, bs); 138 return 0;
219 ctx->xor(ctx->prev, key2, bs); 139}
220 _crypto_xcbc_digest_setkey(parent, ctx);
221 140
222 crypto_cipher_encrypt_one(tfm, out, ctx->prev); 141static int crypto_xcbc_digest_final(struct shash_desc *pdesc, u8 *out)
223 } else { 142{
224 u8 key3[bs]; 143 struct crypto_shash *parent = pdesc->tfm;
144 unsigned long alignmask = crypto_shash_alignmask(parent);
145 struct xcbc_tfm_ctx *tctx = crypto_shash_ctx(parent);
146 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc);
147 struct crypto_cipher *tfm = tctx->child;
148 int bs = crypto_shash_blocksize(parent);
149 u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1);
150 u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
151 u8 *prev = odds + bs;
152 unsigned int offset = 0;
153
154 if (ctx->len != bs) {
225 unsigned int rlen; 155 unsigned int rlen;
226 u8 *p = ctx->odds + ctx->len; 156 u8 *p = odds + ctx->len;
157
227 *p = 0x80; 158 *p = 0x80;
228 p++; 159 p++;
229 160
@@ -231,32 +162,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
231 if (rlen) 162 if (rlen)
232 memset(p, 0, rlen); 163 memset(p, 0, rlen);
233 164
234 if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0) 165 offset += bs;
235 return err;
236
237 crypto_cipher_encrypt_one(tfm, key3,
238 (u8 *)(ctx->consts + bs * 2));
239
240 ctx->xor(ctx->prev, ctx->odds, bs);
241 ctx->xor(ctx->prev, key3, bs);
242
243 _crypto_xcbc_digest_setkey(parent, ctx);
244
245 crypto_cipher_encrypt_one(tfm, out, ctx->prev);
246 } 166 }
247 167
248 return 0; 168 crypto_xor(prev, odds, bs);
249} 169 crypto_xor(prev, consts + offset, bs);
250 170
251static int crypto_xcbc_digest(struct hash_desc *pdesc, 171 crypto_cipher_encrypt_one(tfm, out, prev);
252 struct scatterlist *sg, unsigned int nbytes, u8 *out)
253{
254 if (WARN_ON_ONCE(in_irq()))
255 return -EDEADLK;
256 172
257 crypto_xcbc_digest_init(pdesc); 173 return 0;
258 crypto_xcbc_digest_update2(pdesc, sg, nbytes);
259 return crypto_xcbc_digest_final(pdesc, out);
260} 174}
261 175
262static int xcbc_init_tfm(struct crypto_tfm *tfm) 176static int xcbc_init_tfm(struct crypto_tfm *tfm)
@@ -264,95 +178,95 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
264 struct crypto_cipher *cipher; 178 struct crypto_cipher *cipher;
265 struct crypto_instance *inst = (void *)tfm->__crt_alg; 179 struct crypto_instance *inst = (void *)tfm->__crt_alg;
266 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 180 struct crypto_spawn *spawn = crypto_instance_ctx(inst);
267 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); 181 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
268 int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
269 182
270 cipher = crypto_spawn_cipher(spawn); 183 cipher = crypto_spawn_cipher(spawn);
271 if (IS_ERR(cipher)) 184 if (IS_ERR(cipher))
272 return PTR_ERR(cipher); 185 return PTR_ERR(cipher);
273 186
274 switch(bs) {
275 case 16:
276 ctx->xor = xor_128;
277 break;
278 default:
279 return -EINVAL;
280 }
281
282 ctx->child = cipher; 187 ctx->child = cipher;
283 ctx->odds = (u8*)(ctx+1);
284 ctx->prev = ctx->odds + bs;
285 ctx->key = ctx->prev + bs;
286 188
287 return 0; 189 return 0;
288}; 190};
289 191
290static void xcbc_exit_tfm(struct crypto_tfm *tfm) 192static void xcbc_exit_tfm(struct crypto_tfm *tfm)
291{ 193{
292 struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm)); 194 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
293 crypto_free_cipher(ctx->child); 195 crypto_free_cipher(ctx->child);
294} 196}
295 197
296static struct crypto_instance *xcbc_alloc(struct rtattr **tb) 198static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
297{ 199{
298 struct crypto_instance *inst; 200 struct shash_instance *inst;
299 struct crypto_alg *alg; 201 struct crypto_alg *alg;
202 unsigned long alignmask;
300 int err; 203 int err;
301 204
302 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH); 205 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
303 if (err) 206 if (err)
304 return ERR_PTR(err); 207 return err;
305 208
306 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, 209 alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
307 CRYPTO_ALG_TYPE_MASK); 210 CRYPTO_ALG_TYPE_MASK);
308 if (IS_ERR(alg)) 211 if (IS_ERR(alg))
309 return ERR_CAST(alg); 212 return PTR_ERR(alg);
310 213
311 switch(alg->cra_blocksize) { 214 switch(alg->cra_blocksize) {
312 case 16: 215 case 16:
313 break; 216 break;
314 default: 217 default:
315 inst = ERR_PTR(-EINVAL);
316 goto out_put_alg; 218 goto out_put_alg;
317 } 219 }
318 220
319 inst = crypto_alloc_instance("xcbc", alg); 221 inst = shash_alloc_instance("xcbc", alg);
222 err = PTR_ERR(inst);
320 if (IS_ERR(inst)) 223 if (IS_ERR(inst))
321 goto out_put_alg; 224 goto out_put_alg;
322 225
323 inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; 226 err = crypto_init_spawn(shash_instance_ctx(inst), alg,
324 inst->alg.cra_priority = alg->cra_priority; 227 shash_crypto_instance(inst),
325 inst->alg.cra_blocksize = alg->cra_blocksize; 228 CRYPTO_ALG_TYPE_MASK);
326 inst->alg.cra_alignmask = alg->cra_alignmask; 229 if (err)
327 inst->alg.cra_type = &crypto_hash_type; 230 goto out_free_inst;
328 231
329 inst->alg.cra_hash.digestsize = alg->cra_blocksize; 232 alignmask = alg->cra_alignmask | 3;
330 inst->alg.cra_ctxsize = sizeof(struct crypto_xcbc_ctx) + 233 inst->alg.base.cra_alignmask = alignmask;
331 ALIGN(inst->alg.cra_blocksize * 3, sizeof(void *)); 234 inst->alg.base.cra_priority = alg->cra_priority;
332 inst->alg.cra_init = xcbc_init_tfm; 235 inst->alg.base.cra_blocksize = alg->cra_blocksize;
333 inst->alg.cra_exit = xcbc_exit_tfm; 236
334 237 inst->alg.digestsize = alg->cra_blocksize;
335 inst->alg.cra_hash.init = crypto_xcbc_digest_init; 238 inst->alg.descsize = ALIGN(sizeof(struct xcbc_desc_ctx),
336 inst->alg.cra_hash.update = crypto_xcbc_digest_update; 239 crypto_tfm_ctx_alignment()) +
337 inst->alg.cra_hash.final = crypto_xcbc_digest_final; 240 (alignmask &
338 inst->alg.cra_hash.digest = crypto_xcbc_digest; 241 ~(crypto_tfm_ctx_alignment() - 1)) +
339 inst->alg.cra_hash.setkey = crypto_xcbc_digest_setkey; 242 alg->cra_blocksize * 2;
243
244 inst->alg.base.cra_ctxsize = ALIGN(sizeof(struct xcbc_tfm_ctx),
245 alignmask + 1) +
246 alg->cra_blocksize * 2;
247 inst->alg.base.cra_init = xcbc_init_tfm;
248 inst->alg.base.cra_exit = xcbc_exit_tfm;
249
250 inst->alg.init = crypto_xcbc_digest_init;
251 inst->alg.update = crypto_xcbc_digest_update;
252 inst->alg.final = crypto_xcbc_digest_final;
253 inst->alg.setkey = crypto_xcbc_digest_setkey;
254
255 err = shash_register_instance(tmpl, inst);
256 if (err) {
257out_free_inst:
258 shash_free_instance(shash_crypto_instance(inst));
259 }
340 260
341out_put_alg: 261out_put_alg:
342 crypto_mod_put(alg); 262 crypto_mod_put(alg);
343 return inst; 263 return err;
344}
345
346static void xcbc_free(struct crypto_instance *inst)
347{
348 crypto_drop_spawn(crypto_instance_ctx(inst));
349 kfree(inst);
350} 264}
351 265
352static struct crypto_template crypto_xcbc_tmpl = { 266static struct crypto_template crypto_xcbc_tmpl = {
353 .name = "xcbc", 267 .name = "xcbc",
354 .alloc = xcbc_alloc, 268 .create = xcbc_create,
355 .free = xcbc_free, 269 .free = shash_free_instance,
356 .module = THIS_MODULE, 270 .module = THIS_MODULE,
357}; 271};
358 272
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index f6baa77deefb..0c4ca4d318b3 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -78,9 +78,10 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
78 78
79static int __init blacklist_by_year(void) 79static int __init blacklist_by_year(void)
80{ 80{
81 int year = dmi_get_year(DMI_BIOS_DATE); 81 int year;
82
82 /* Doesn't exist? Likely an old system */ 83 /* Doesn't exist? Likely an old system */
83 if (year == -1) { 84 if (!dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL)) {
84 printk(KERN_ERR PREFIX "no DMI BIOS year, " 85 printk(KERN_ERR PREFIX "no DMI BIOS year, "
85 "acpi=force is required to enable ACPI\n" ); 86 "acpi=force is required to enable ACPI\n" );
86 return 1; 87 return 1;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b17c57f85032..ab2fa4eeb364 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -26,6 +26,17 @@ config ATA_NONSTANDARD
26 bool 26 bool
27 default n 27 default n
28 28
29config ATA_VERBOSE_ERROR
30 bool "Verbose ATA error reporting"
31 default y
32 help
33 This option adds parsing of ATA command descriptions and error bits
34 in libata kernel output, making it easier to interpret.
35 This option will enlarge the kernel by approx. 6KB. Disable it only
36 if kernel size is more important than ease of debugging.
37
38 If unsure, say Y.
39
29config ATA_ACPI 40config ATA_ACPI
30 bool "ATA ACPI Support" 41 bool "ATA ACPI Support"
31 depends on ACPI && PCI 42 depends on ACPI && PCI
@@ -586,6 +597,16 @@ config PATA_RB532
586 597
587 If unsure, say N. 598 If unsure, say N.
588 599
600config PATA_RDC
601 tristate "RDC PATA support"
602 depends on PCI
603 help
604 This option enables basic support for the later RDC PATA controllers
605 controllers via the new ATA layer. For the RDC 1010, you need to
606 enable the IT821X driver instead.
607
608 If unsure, say N.
609
589config PATA_RZ1000 610config PATA_RZ1000
590 tristate "PC Tech RZ1000 PATA support" 611 tristate "PC Tech RZ1000 PATA support"
591 depends on PCI 612 depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 38906f9bbb4e..463eb52236aa 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
57obj-$(CONFIG_PATA_QDI) += pata_qdi.o 57obj-$(CONFIG_PATA_QDI) += pata_qdi.o
58obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o 58obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
59obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o 59obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
60obj-$(CONFIG_PATA_RDC) += pata_rdc.o
60obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 61obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
61obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o 62obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
62obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o 63obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe3eba5d6b3e..d4cd9c203314 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -329,10 +329,24 @@ static ssize_t ahci_activity_store(struct ata_device *dev,
329 enum sw_activity val); 329 enum sw_activity val);
330static void ahci_init_sw_activity(struct ata_link *link); 330static void ahci_init_sw_activity(struct ata_link *link);
331 331
332static ssize_t ahci_show_host_caps(struct device *dev,
333 struct device_attribute *attr, char *buf);
334static ssize_t ahci_show_host_version(struct device *dev,
335 struct device_attribute *attr, char *buf);
336static ssize_t ahci_show_port_cmd(struct device *dev,
337 struct device_attribute *attr, char *buf);
338
339DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
340DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
341DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
342
332static struct device_attribute *ahci_shost_attrs[] = { 343static struct device_attribute *ahci_shost_attrs[] = {
333 &dev_attr_link_power_management_policy, 344 &dev_attr_link_power_management_policy,
334 &dev_attr_em_message_type, 345 &dev_attr_em_message_type,
335 &dev_attr_em_message, 346 &dev_attr_em_message,
347 &dev_attr_ahci_host_caps,
348 &dev_attr_ahci_host_version,
349 &dev_attr_ahci_port_cmd,
336 NULL 350 NULL
337}; 351};
338 352
@@ -539,6 +553,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
539 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */ 553 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
540 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ 554 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
541 555
556 /* AMD */
557 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
558 /* AMD is using RAID class only for ahci controllers */
559 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
560 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
561
542 /* VIA */ 562 /* VIA */
543 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ 563 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
544 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ 564 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
@@ -702,6 +722,36 @@ static void ahci_enable_ahci(void __iomem *mmio)
702 WARN_ON(1); 722 WARN_ON(1);
703} 723}
704 724
725static ssize_t ahci_show_host_caps(struct device *dev,
726 struct device_attribute *attr, char *buf)
727{
728 struct Scsi_Host *shost = class_to_shost(dev);
729 struct ata_port *ap = ata_shost_to_port(shost);
730 struct ahci_host_priv *hpriv = ap->host->private_data;
731
732 return sprintf(buf, "%x\n", hpriv->cap);
733}
734
735static ssize_t ahci_show_host_version(struct device *dev,
736 struct device_attribute *attr, char *buf)
737{
738 struct Scsi_Host *shost = class_to_shost(dev);
739 struct ata_port *ap = ata_shost_to_port(shost);
740 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
741
742 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
743}
744
745static ssize_t ahci_show_port_cmd(struct device *dev,
746 struct device_attribute *attr, char *buf)
747{
748 struct Scsi_Host *shost = class_to_shost(dev);
749 struct ata_port *ap = ata_shost_to_port(shost);
750 void __iomem *port_mmio = ahci_port_base(ap);
751
752 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
753}
754
705/** 755/**
706 * ahci_save_initial_config - Save and fixup initial config values 756 * ahci_save_initial_config - Save and fixup initial config values
707 * @pdev: target PCI device 757 * @pdev: target PCI device
@@ -1584,7 +1634,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1584 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); 1634 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1585} 1635}
1586 1636
1587static int ahci_kick_engine(struct ata_port *ap, int force_restart) 1637static int ahci_kick_engine(struct ata_port *ap)
1588{ 1638{
1589 void __iomem *port_mmio = ahci_port_base(ap); 1639 void __iomem *port_mmio = ahci_port_base(ap);
1590 struct ahci_host_priv *hpriv = ap->host->private_data; 1640 struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -1592,18 +1642,16 @@ static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1592 u32 tmp; 1642 u32 tmp;
1593 int busy, rc; 1643 int busy, rc;
1594 1644
1595 /* do we need to kick the port? */
1596 busy = status & (ATA_BUSY | ATA_DRQ);
1597 if (!busy && !force_restart)
1598 return 0;
1599
1600 /* stop engine */ 1645 /* stop engine */
1601 rc = ahci_stop_engine(ap); 1646 rc = ahci_stop_engine(ap);
1602 if (rc) 1647 if (rc)
1603 goto out_restart; 1648 goto out_restart;
1604 1649
1605 /* need to do CLO? */ 1650 /* need to do CLO?
1606 if (!busy) { 1651 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1652 */
1653 busy = status & (ATA_BUSY | ATA_DRQ);
1654 if (!busy && !sata_pmp_attached(ap)) {
1607 rc = 0; 1655 rc = 0;
1608 goto out_restart; 1656 goto out_restart;
1609 } 1657 }
@@ -1651,7 +1699,7 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1651 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1699 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1652 1, timeout_msec); 1700 1, timeout_msec);
1653 if (tmp & 0x1) { 1701 if (tmp & 0x1) {
1654 ahci_kick_engine(ap, 1); 1702 ahci_kick_engine(ap);
1655 return -EBUSY; 1703 return -EBUSY;
1656 } 1704 }
1657 } else 1705 } else
@@ -1674,7 +1722,7 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1674 DPRINTK("ENTER\n"); 1722 DPRINTK("ENTER\n");
1675 1723
1676 /* prepare for SRST (AHCI-1.1 10.4.1) */ 1724 /* prepare for SRST (AHCI-1.1 10.4.1) */
1677 rc = ahci_kick_engine(ap, 1); 1725 rc = ahci_kick_engine(ap);
1678 if (rc && rc != -EOPNOTSUPP) 1726 if (rc && rc != -EOPNOTSUPP)
1679 ata_link_printk(link, KERN_WARNING, 1727 ata_link_printk(link, KERN_WARNING,
1680 "failed to reset engine (errno=%d)\n", rc); 1728 "failed to reset engine (errno=%d)\n", rc);
@@ -1890,7 +1938,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1890 rc = ata_wait_after_reset(link, jiffies + 2 * HZ, 1938 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1891 ahci_check_ready); 1939 ahci_check_ready);
1892 if (rc) 1940 if (rc)
1893 ahci_kick_engine(ap, 0); 1941 ahci_kick_engine(ap);
1894 } 1942 }
1895 return rc; 1943 return rc;
1896} 1944}
@@ -2271,7 +2319,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2271 2319
2272 /* make DMA engine forget about the failed command */ 2320 /* make DMA engine forget about the failed command */
2273 if (qc->flags & ATA_QCFLAG_FAILED) 2321 if (qc->flags & ATA_QCFLAG_FAILED)
2274 ahci_kick_engine(ap, 1); 2322 ahci_kick_engine(ap);
2275} 2323}
2276 2324
2277static void ahci_pmp_attach(struct ata_port *ap) 2325static void ahci_pmp_attach(struct ata_port *ap)
@@ -2603,14 +2651,18 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
2603} 2651}
2604 2652
2605/* 2653/*
2606 * SB600 ahci controller on ASUS M2A-VM can't do 64bit DMA with older 2654 * SB600 ahci controller on certain boards can't do 64bit DMA with
2607 * BIOS. The oldest version known to be broken is 0901 and working is 2655 * older BIOS.
2608 * 1501 which was released on 2007-10-26. Force 32bit DMA on anything
2609 * older than 1501. Please read bko#9412 for more info.
2610 */ 2656 */
2611static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev) 2657static bool ahci_sb600_32bit_only(struct pci_dev *pdev)
2612{ 2658{
2613 static const struct dmi_system_id sysids[] = { 2659 static const struct dmi_system_id sysids[] = {
2660 /*
2661 * The oldest version known to be broken is 0901 and
2662 * working is 1501 which was released on 2007-10-26.
2663 * Force 32bit DMA on anything older than 1501.
2664 * Please read bko#9412 for more info.
2665 */
2614 { 2666 {
2615 .ident = "ASUS M2A-VM", 2667 .ident = "ASUS M2A-VM",
2616 .matches = { 2668 .matches = {
@@ -2618,31 +2670,48 @@ static bool ahci_asus_m2a_vm_32bit_only(struct pci_dev *pdev)
2618 "ASUSTeK Computer INC."), 2670 "ASUSTeK Computer INC."),
2619 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"), 2671 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2620 }, 2672 },
2673 .driver_data = "20071026", /* yyyymmdd */
2674 },
2675 /*
2676 * It's yet unknown whether more recent BIOS fixes the
2677 * problem. Blacklist the whole board for the time
2678 * being. Please read the following thread for more
2679 * info.
2680 *
2681 * http://thread.gmane.org/gmane.linux.ide/42326
2682 */
2683 {
2684 .ident = "Gigabyte GA-MA69VM-S2",
2685 .matches = {
2686 DMI_MATCH(DMI_BOARD_VENDOR,
2687 "Gigabyte Technology Co., Ltd."),
2688 DMI_MATCH(DMI_BOARD_NAME, "GA-MA69VM-S2"),
2689 },
2621 }, 2690 },
2622 { } 2691 { }
2623 }; 2692 };
2624 const char *cutoff_mmdd = "10/26"; 2693 const struct dmi_system_id *match;
2625 const char *date;
2626 int year;
2627 2694
2695 match = dmi_first_match(sysids);
2628 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) || 2696 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2629 !dmi_check_system(sysids)) 2697 !match)
2630 return false; 2698 return false;
2631 2699
2632 /* 2700 if (match->driver_data) {
2633 * Argh.... both version and date are free form strings. 2701 int year, month, date;
2634 * Let's hope they're using the same date format across 2702 char buf[9];
2635 * different versions. 2703
2636 */ 2704 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2637 date = dmi_get_system_info(DMI_BIOS_DATE); 2705 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2638 year = dmi_get_year(DMI_BIOS_DATE);
2639 if (date && strlen(date) >= 10 && date[2] == '/' && date[5] == '/' &&
2640 (year > 2007 ||
2641 (year == 2007 && strncmp(date, cutoff_mmdd, 5) >= 0)))
2642 return false;
2643 2706
2644 dev_printk(KERN_WARNING, &pdev->dev, "ASUS M2A-VM: BIOS too old, " 2707 if (strcmp(buf, match->driver_data) >= 0)
2645 "forcing 32bit DMA, update BIOS\n"); 2708 return false;
2709
2710 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2711 "forcing 32bit DMA, update BIOS\n", match->ident);
2712 } else
2713 dev_printk(KERN_WARNING, &pdev->dev, "%s: this board can't "
2714 "do 64bit DMA, forcing 32bit\n", match->ident);
2646 2715
2647 return true; 2716 return true;
2648} 2717}
@@ -2857,8 +2926,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2857 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) 2926 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2858 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; 2927 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2859 2928
2860 /* apply ASUS M2A_VM quirk */ 2929 /* apply sb600 32bit only quirk */
2861 if (ahci_asus_m2a_vm_32bit_only(pdev)) 2930 if (ahci_sb600_32bit_only(pdev))
2862 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY; 2931 hpriv->flags |= AHCI_HFLAG_32BIT_ONLY;
2863 2932
2864 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI)) 2933 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
@@ -2869,7 +2938,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2869 2938
2870 /* prepare host */ 2939 /* prepare host */
2871 if (hpriv->cap & HOST_CAP_NCQ) 2940 if (hpriv->cap & HOST_CAP_NCQ)
2872 pi.flags |= ATA_FLAG_NCQ; 2941 pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
2873 2942
2874 if (hpriv->cap & HOST_CAP_PMP) 2943 if (hpriv->cap & HOST_CAP_PMP)
2875 pi.flags |= ATA_FLAG_PMP; 2944 pi.flags |= ATA_FLAG_PMP;
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index ac176da1f94e..01964b6e6f6b 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -689,6 +689,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
689 struct ata_taskfile tf, ptf, rtf; 689 struct ata_taskfile tf, ptf, rtf;
690 unsigned int err_mask; 690 unsigned int err_mask;
691 const char *level; 691 const char *level;
692 const char *descr;
692 char msg[60]; 693 char msg[60];
693 int rc; 694 int rc;
694 695
@@ -736,11 +737,13 @@ static int ata_acpi_run_tf(struct ata_device *dev,
736 snprintf(msg, sizeof(msg), "filtered out"); 737 snprintf(msg, sizeof(msg), "filtered out");
737 rc = 0; 738 rc = 0;
738 } 739 }
740 descr = ata_get_cmd_descript(tf.command);
739 741
740 ata_dev_printk(dev, level, 742 ata_dev_printk(dev, level,
741 "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x %s\n", 743 "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
742 tf.command, tf.feature, tf.nsect, tf.lbal, 744 tf.command, tf.feature, tf.nsect, tf.lbal,
743 tf.lbam, tf.lbah, tf.device, msg); 745 tf.lbam, tf.lbah, tf.device,
746 (descr ? descr : "unknown"), msg);
744 747
745 return rc; 748 return rc;
746} 749}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 072ba5ea138f..df31deac5c82 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -709,7 +709,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
709 head = tf->device & 0xf; 709 head = tf->device & 0xf;
710 sect = tf->lbal; 710 sect = tf->lbal;
711 711
712 block = (cyl * dev->heads + head) * dev->sectors + sect; 712 if (!sect) {
713 ata_dev_printk(dev, KERN_WARNING, "device reported "
714 "invalid CHS sector 0\n");
715 sect = 1; /* oh well */
716 }
717
718 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
713 } 719 }
714 720
715 return block; 721 return block;
@@ -2299,29 +2305,49 @@ static inline u8 ata_dev_knobble(struct ata_device *dev)
2299 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2305 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2300} 2306}
2301 2307
2302static void ata_dev_config_ncq(struct ata_device *dev, 2308static int ata_dev_config_ncq(struct ata_device *dev,
2303 char *desc, size_t desc_sz) 2309 char *desc, size_t desc_sz)
2304{ 2310{
2305 struct ata_port *ap = dev->link->ap; 2311 struct ata_port *ap = dev->link->ap;
2306 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2312 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2313 unsigned int err_mask;
2314 char *aa_desc = "";
2307 2315
2308 if (!ata_id_has_ncq(dev->id)) { 2316 if (!ata_id_has_ncq(dev->id)) {
2309 desc[0] = '\0'; 2317 desc[0] = '\0';
2310 return; 2318 return 0;
2311 } 2319 }
2312 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2320 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2313 snprintf(desc, desc_sz, "NCQ (not used)"); 2321 snprintf(desc, desc_sz, "NCQ (not used)");
2314 return; 2322 return 0;
2315 } 2323 }
2316 if (ap->flags & ATA_FLAG_NCQ) { 2324 if (ap->flags & ATA_FLAG_NCQ) {
2317 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2325 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2318 dev->flags |= ATA_DFLAG_NCQ; 2326 dev->flags |= ATA_DFLAG_NCQ;
2319 } 2327 }
2320 2328
2329 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2330 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2331 ata_id_has_fpdma_aa(dev->id)) {
2332 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2333 SATA_FPDMA_AA);
2334 if (err_mask) {
2335 ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
2336 "(error_mask=0x%x)\n", err_mask);
2337 if (err_mask != AC_ERR_DEV) {
2338 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2339 return -EIO;
2340 }
2341 } else
2342 aa_desc = ", AA";
2343 }
2344
2321 if (hdepth >= ddepth) 2345 if (hdepth >= ddepth)
2322 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 2346 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2323 else 2347 else
2324 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 2348 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2349 ddepth, aa_desc);
2350 return 0;
2325} 2351}
2326 2352
2327/** 2353/**
@@ -2461,7 +2487,7 @@ int ata_dev_configure(struct ata_device *dev)
2461 2487
2462 if (ata_id_has_lba(id)) { 2488 if (ata_id_has_lba(id)) {
2463 const char *lba_desc; 2489 const char *lba_desc;
2464 char ncq_desc[20]; 2490 char ncq_desc[24];
2465 2491
2466 lba_desc = "LBA"; 2492 lba_desc = "LBA";
2467 dev->flags |= ATA_DFLAG_LBA; 2493 dev->flags |= ATA_DFLAG_LBA;
@@ -2475,7 +2501,9 @@ int ata_dev_configure(struct ata_device *dev)
2475 } 2501 }
2476 2502
2477 /* config NCQ */ 2503 /* config NCQ */
2478 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2504 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2505 if (rc)
2506 return rc;
2479 2507
2480 /* print device info to dmesg */ 2508 /* print device info to dmesg */
2481 if (ata_msg_drv(ap) && print_info) { 2509 if (ata_msg_drv(ap) && print_info) {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 79711b64054b..a04488f0de88 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -40,6 +40,7 @@
40#include <scsi/scsi_eh.h> 40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h> 41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h> 42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_dbg.h>
43#include "../scsi/scsi_transport_api.h" 44#include "../scsi/scsi_transport_api.h"
44 45
45#include <linux/libata.h> 46#include <linux/libata.h>
@@ -999,7 +1000,9 @@ static void __ata_port_freeze(struct ata_port *ap)
999 * ata_port_freeze - abort & freeze port 1000 * ata_port_freeze - abort & freeze port
1000 * @ap: ATA port to freeze 1001 * @ap: ATA port to freeze
1001 * 1002 *
1002 * Abort and freeze @ap. 1003 * Abort and freeze @ap. The freeze operation must be called
1004 * first, because some hardware requires special operations
1005 * before the taskfile registers are accessible.
1003 * 1006 *
1004 * LOCKING: 1007 * LOCKING:
1005 * spin_lock_irqsave(host lock) 1008 * spin_lock_irqsave(host lock)
@@ -1013,8 +1016,8 @@ int ata_port_freeze(struct ata_port *ap)
1013 1016
1014 WARN_ON(!ap->ops->error_handler); 1017 WARN_ON(!ap->ops->error_handler);
1015 1018
1016 nr_aborted = ata_port_abort(ap);
1017 __ata_port_freeze(ap); 1019 __ata_port_freeze(ap);
1020 nr_aborted = ata_port_abort(ap);
1018 1021
1019 return nr_aborted; 1022 return nr_aborted;
1020} 1023}
@@ -2110,6 +2113,116 @@ void ata_eh_autopsy(struct ata_port *ap)
2110} 2113}
2111 2114
2112/** 2115/**
2116 * ata_get_cmd_descript - get description for ATA command
2117 * @command: ATA command code to get description for
2118 *
2119 * Return a textual description of the given command, or NULL if the
2120 * command is not known.
2121 *
2122 * LOCKING:
2123 * None
2124 */
2125const char *ata_get_cmd_descript(u8 command)
2126{
2127#ifdef CONFIG_ATA_VERBOSE_ERROR
2128 static const struct
2129 {
2130 u8 command;
2131 const char *text;
2132 } cmd_descr[] = {
2133 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2134 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2135 { ATA_CMD_STANDBY, "STANDBY" },
2136 { ATA_CMD_IDLE, "IDLE" },
2137 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2138 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2139 { ATA_CMD_NOP, "NOP" },
2140 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2141 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2142 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2143 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2144 { ATA_CMD_SERVICE, "SERVICE" },
2145 { ATA_CMD_READ, "READ DMA" },
2146 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2147 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2148 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2149 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2150 { ATA_CMD_WRITE, "WRITE DMA" },
2151 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2152 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2153 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2154 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2155 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2156 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2157 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2158 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2159 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2160 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2161 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2162 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2163 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2164 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2165 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2166 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2167 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2168 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2169 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2170 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2171 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2172 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2173 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2174 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2175 { ATA_CMD_SLEEP, "SLEEP" },
2176 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2177 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2178 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2179 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2180 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2181 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2182 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2183 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2184 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2185 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2186 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2187 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2188 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2189 { ATA_CMD_PMP_READ, "READ BUFFER" },
2190 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2191 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2192 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2193 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2194 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2195 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2196 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2197 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2198 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2199 { ATA_CMD_SMART, "SMART" },
2200 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2201 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2202 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2203 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2204 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2205 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2206 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2207 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2208 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2209 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2210 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2211 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2212 { ATA_CMD_RESTORE, "RECALIBRATE" },
2213 { 0, NULL } /* terminate list */
2214 };
2215
2216 unsigned int i;
2217 for (i = 0; cmd_descr[i].text; i++)
2218 if (cmd_descr[i].command == command)
2219 return cmd_descr[i].text;
2220#endif
2221
2222 return NULL;
2223}
2224
2225/**
2113 * ata_eh_link_report - report error handling to user 2226 * ata_eh_link_report - report error handling to user
2114 * @link: ATA link EH is going on 2227 * @link: ATA link EH is going on
2115 * 2228 *
@@ -2175,6 +2288,7 @@ static void ata_eh_link_report(struct ata_link *link)
2175 ata_link_printk(link, KERN_ERR, "%s\n", desc); 2288 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2176 } 2289 }
2177 2290
2291#ifdef CONFIG_ATA_VERBOSE_ERROR
2178 if (ehc->i.serror) 2292 if (ehc->i.serror)
2179 ata_link_printk(link, KERN_ERR, 2293 ata_link_printk(link, KERN_ERR,
2180 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n", 2294 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
@@ -2195,6 +2309,7 @@ static void ata_eh_link_report(struct ata_link *link)
2195 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 2309 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2196 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 2310 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2197 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2311 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2312#endif
2198 2313
2199 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2314 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2200 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 2315 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
@@ -2226,14 +2341,23 @@ static void ata_eh_link_report(struct ata_link *link)
2226 dma_str[qc->dma_dir]); 2341 dma_str[qc->dma_dir]);
2227 } 2342 }
2228 2343
2229 if (ata_is_atapi(qc->tf.protocol)) 2344 if (ata_is_atapi(qc->tf.protocol)) {
2230 snprintf(cdb_buf, sizeof(cdb_buf), 2345 if (qc->scsicmd)
2346 scsi_print_command(qc->scsicmd);
2347 else
2348 snprintf(cdb_buf, sizeof(cdb_buf),
2231 "cdb %02x %02x %02x %02x %02x %02x %02x %02x " 2349 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2232 "%02x %02x %02x %02x %02x %02x %02x %02x\n ", 2350 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2233 cdb[0], cdb[1], cdb[2], cdb[3], 2351 cdb[0], cdb[1], cdb[2], cdb[3],
2234 cdb[4], cdb[5], cdb[6], cdb[7], 2352 cdb[4], cdb[5], cdb[6], cdb[7],
2235 cdb[8], cdb[9], cdb[10], cdb[11], 2353 cdb[8], cdb[9], cdb[10], cdb[11],
2236 cdb[12], cdb[13], cdb[14], cdb[15]); 2354 cdb[12], cdb[13], cdb[14], cdb[15]);
2355 } else {
2356 const char *descr = ata_get_cmd_descript(cmd->command);
2357 if (descr)
2358 ata_dev_printk(qc->dev, KERN_ERR,
2359 "failed command: %s\n", descr);
2360 }
2237 2361
2238 ata_dev_printk(qc->dev, KERN_ERR, 2362 ata_dev_printk(qc->dev, KERN_ERR,
2239 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x " 2363 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2252,6 +2376,7 @@ static void ata_eh_link_report(struct ata_link *link)
2252 res->device, qc->err_mask, ata_err_string(qc->err_mask), 2376 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2253 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 2377 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2254 2378
2379#ifdef CONFIG_ATA_VERBOSE_ERROR
2255 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 2380 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2256 ATA_ERR)) { 2381 ATA_ERR)) {
2257 if (res->command & ATA_BUSY) 2382 if (res->command & ATA_BUSY)
@@ -2275,6 +2400,7 @@ static void ata_eh_link_report(struct ata_link *link)
2275 res->feature & ATA_UNC ? "UNC " : "", 2400 res->feature & ATA_UNC ? "UNC " : "",
2276 res->feature & ATA_IDNF ? "IDNF " : "", 2401 res->feature & ATA_IDNF ? "IDNF " : "",
2277 res->feature & ATA_ABORTED ? "ABRT " : ""); 2402 res->feature & ATA_ABORTED ? "ABRT " : "");
2403#endif
2278 } 2404 }
2279} 2405}
2280 2406
@@ -2574,11 +2700,17 @@ int ata_eh_reset(struct ata_link *link, int classify,
2574 postreset(slave, classes); 2700 postreset(slave, classes);
2575 } 2701 }
2576 2702
2577 /* clear cached SError */ 2703 /*
2704 * Some controllers can't be frozen very well and may set
2705 * spuruious error conditions during reset. Clear accumulated
2706 * error information. As reset is the final recovery action,
2707 * nothing is lost by doing this.
2708 */
2578 spin_lock_irqsave(link->ap->lock, flags); 2709 spin_lock_irqsave(link->ap->lock, flags);
2579 link->eh_info.serror = 0; 2710 memset(&link->eh_info, 0, sizeof(link->eh_info));
2580 if (slave) 2711 if (slave)
2581 slave->eh_info.serror = 0; 2712 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2713 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2582 spin_unlock_irqrestore(link->ap->lock, flags); 2714 spin_unlock_irqrestore(link->ap->lock, flags);
2583 2715
2584 /* Make sure onlineness and classification result correspond. 2716 /* Make sure onlineness and classification result correspond.
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 619f2c33950e..51f0ffb78cbd 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -221,6 +221,8 @@ static const char *sata_pmp_spec_rev_str(const u32 *gscr)
221{ 221{
222 u32 rev = gscr[SATA_PMP_GSCR_REV]; 222 u32 rev = gscr[SATA_PMP_GSCR_REV];
223 223
224 if (rev & (1 << 3))
225 return "1.2";
224 if (rev & (1 << 2)) 226 if (rev & (1 << 2))
225 return "1.1"; 227 return "1.1";
226 if (rev & (1 << 1)) 228 if (rev & (1 << 1))
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d0dfeef55db5..b4ee28dec521 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1119,10 +1119,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1119 1119
1120 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1120 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1121 } else { 1121 } else {
1122 if (ata_id_is_ssd(dev->id))
1123 queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
1124 sdev->request_queue);
1125
1126 /* ATA devices must be sector aligned */ 1122 /* ATA devices must be sector aligned */
1127 blk_queue_update_dma_alignment(sdev->request_queue, 1123 blk_queue_update_dma_alignment(sdev->request_queue,
1128 ATA_SECT_SIZE - 1); 1124 ATA_SECT_SIZE - 1);
@@ -1257,23 +1253,6 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1257 return queue_depth; 1253 return queue_depth;
1258} 1254}
1259 1255
1260/* XXX: for spindown warning */
1261static void ata_delayed_done_timerfn(unsigned long arg)
1262{
1263 struct scsi_cmnd *scmd = (void *)arg;
1264
1265 scmd->scsi_done(scmd);
1266}
1267
1268/* XXX: for spindown warning */
1269static void ata_delayed_done(struct scsi_cmnd *scmd)
1270{
1271 static struct timer_list timer;
1272
1273 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
1274 mod_timer(&timer, jiffies + 5 * HZ);
1275}
1276
1277/** 1256/**
1278 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 1257 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1279 * @qc: Storage for translated ATA taskfile 1258 * @qc: Storage for translated ATA taskfile
@@ -1338,32 +1317,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1338 system_entering_hibernation()) 1317 system_entering_hibernation())
1339 goto skip; 1318 goto skip;
1340 1319
1341 /* XXX: This is for backward compatibility, will be
1342 * removed. Read Documentation/feature-removal-schedule.txt
1343 * for more info.
1344 */
1345 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
1346 (system_state == SYSTEM_HALT ||
1347 system_state == SYSTEM_POWER_OFF)) {
1348 static unsigned long warned;
1349
1350 if (!test_and_set_bit(0, &warned)) {
1351 ata_dev_printk(qc->dev, KERN_WARNING,
1352 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
1353 "UPDATE SHUTDOWN UTILITY\n");
1354 ata_dev_printk(qc->dev, KERN_WARNING,
1355 "For more info, visit "
1356 "http://linux-ata.org/shutdown.html\n");
1357
1358 /* ->scsi_done is not used, use it for
1359 * delayed completion.
1360 */
1361 scmd->scsi_done = qc->scsidone;
1362 qc->scsidone = ata_delayed_done;
1363 }
1364 goto skip;
1365 }
1366
1367 /* Issue ATA STANDBY IMMEDIATE command */ 1320 /* Issue ATA STANDBY IMMEDIATE command */
1368 tf->command = ATA_CMD_STANDBYNOW1; 1321 tf->command = ATA_CMD_STANDBYNOW1;
1369 } 1322 }
@@ -1764,14 +1717,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1764 } 1717 }
1765 } 1718 }
1766 1719
1767 /* XXX: track spindown state for spindown skipping and warning */
1768 if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
1769 qc->tf.command == ATA_CMD_STANDBYNOW1))
1770 qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
1771 else if (likely(system_state != SYSTEM_HALT &&
1772 system_state != SYSTEM_POWER_OFF))
1773 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
1774
1775 if (need_sense && !ap->ops->error_handler) 1720 if (need_sense && !ap->ops->error_handler)
1776 ata_dump_status(ap->print_id, &qc->result_tf); 1721 ata_dump_status(ap->print_id, &qc->result_tf);
1777 1722
@@ -2815,28 +2760,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2815 goto invalid_fld; 2760 goto invalid_fld;
2816 2761
2817 /* 2762 /*
2818 * Filter TPM commands by default. These provide an
2819 * essentially uncontrolled encrypted "back door" between
2820 * applications and the disk. Set libata.allow_tpm=1 if you
2821 * have a real reason for wanting to use them. This ensures
2822 * that installed software cannot easily mess stuff up without
2823 * user intent. DVR type users will probably ship with this enabled
2824 * for movie content management.
2825 *
2826 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
2827 * for this and should do in future but that it is not sufficient as
2828 * DCS is an optional feature set. Thus we also do the software filter
2829 * so that we comply with the TC consortium stated goal that the user
2830 * can turn off TC features of their system.
2831 */
2832 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2833 goto invalid_fld;
2834
2835 /* We may not issue DMA commands if no DMA mode is set */
2836 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2837 goto invalid_fld;
2838
2839 /*
2840 * 12 and 16 byte CDBs use different offsets to 2763 * 12 and 16 byte CDBs use different offsets to
2841 * provide the various register values. 2764 * provide the various register values.
2842 */ 2765 */
@@ -2885,6 +2808,41 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2885 tf->device = dev->devno ? 2808 tf->device = dev->devno ?
2886 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2809 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2887 2810
2811 /* READ/WRITE LONG use a non-standard sect_size */
2812 qc->sect_size = ATA_SECT_SIZE;
2813 switch (tf->command) {
2814 case ATA_CMD_READ_LONG:
2815 case ATA_CMD_READ_LONG_ONCE:
2816 case ATA_CMD_WRITE_LONG:
2817 case ATA_CMD_WRITE_LONG_ONCE:
2818 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2819 goto invalid_fld;
2820 qc->sect_size = scsi_bufflen(scmd);
2821 }
2822
2823 /*
2824 * Set flags so that all registers will be written, pass on
2825 * write indication (used for PIO/DMA setup), result TF is
2826 * copied back and we don't whine too much about its failure.
2827 */
2828 tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2829 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2830 tf->flags |= ATA_TFLAG_WRITE;
2831
2832 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2833
2834 /*
2835 * Set transfer length.
2836 *
2837 * TODO: find out if we need to do more here to
2838 * cover scatter/gather case.
2839 */
2840 ata_qc_set_pc_nbytes(qc);
2841
2842 /* We may not issue DMA commands if no DMA mode is set */
2843 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2844 goto invalid_fld;
2845
2888 /* sanity check for pio multi commands */ 2846 /* sanity check for pio multi commands */
2889 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) 2847 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
2890 goto invalid_fld; 2848 goto invalid_fld;
@@ -2901,18 +2859,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2901 multi_count); 2859 multi_count);
2902 } 2860 }
2903 2861
2904 /* READ/WRITE LONG use a non-standard sect_size */
2905 qc->sect_size = ATA_SECT_SIZE;
2906 switch (tf->command) {
2907 case ATA_CMD_READ_LONG:
2908 case ATA_CMD_READ_LONG_ONCE:
2909 case ATA_CMD_WRITE_LONG:
2910 case ATA_CMD_WRITE_LONG_ONCE:
2911 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2912 goto invalid_fld;
2913 qc->sect_size = scsi_bufflen(scmd);
2914 }
2915
2916 /* 2862 /*
2917 * Filter SET_FEATURES - XFER MODE command -- otherwise, 2863 * Filter SET_FEATURES - XFER MODE command -- otherwise,
2918 * SET_FEATURES - XFER MODE must be preceded/succeeded 2864 * SET_FEATURES - XFER MODE must be preceded/succeeded
@@ -2920,30 +2866,27 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2920 * controller (i.e. the reason for ->set_piomode(), 2866 * controller (i.e. the reason for ->set_piomode(),
2921 * ->set_dmamode(), and ->post_set_mode() hooks). 2867 * ->set_dmamode(), and ->post_set_mode() hooks).
2922 */ 2868 */
2923 if ((tf->command == ATA_CMD_SET_FEATURES) 2869 if (tf->command == ATA_CMD_SET_FEATURES &&
2924 && (tf->feature == SETFEATURES_XFER)) 2870 tf->feature == SETFEATURES_XFER)
2925 goto invalid_fld; 2871 goto invalid_fld;
2926 2872
2927 /* 2873 /*
2928 * Set flags so that all registers will be written, 2874 * Filter TPM commands by default. These provide an
2929 * and pass on write indication (used for PIO/DMA 2875 * essentially uncontrolled encrypted "back door" between
2930 * setup.) 2876 * applications and the disk. Set libata.allow_tpm=1 if you
2931 */ 2877 * have a real reason for wanting to use them. This ensures
2932 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); 2878 * that installed software cannot easily mess stuff up without
2933 2879 * user intent. DVR type users will probably ship with this enabled
2934 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2880 * for movie content management.
2935 tf->flags |= ATA_TFLAG_WRITE;
2936
2937 /*
2938 * Set transfer length.
2939 * 2881 *
2940 * TODO: find out if we need to do more here to 2882 * Note that for ATA8 we can issue a DCS change and DCS freeze lock
2941 * cover scatter/gather case. 2883 * for this and should do in future but that it is not sufficient as
2884 * DCS is an optional feature set. Thus we also do the software filter
2885 * so that we comply with the TC consortium stated goal that the user
2886 * can turn off TC features of their system.
2942 */ 2887 */
2943 ata_qc_set_pc_nbytes(qc); 2888 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2944 2889 goto invalid_fld;
2945 /* request result TF and be quiet about device error */
2946 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2947 2890
2948 return 0; 2891 return 0;
2949 2892
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 89a1e0018e71..be8e2628f82c 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -164,6 +164,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
164extern void ata_eh_done(struct ata_link *link, struct ata_device *dev, 164extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
165 unsigned int action); 165 unsigned int action);
166extern void ata_eh_autopsy(struct ata_port *ap); 166extern void ata_eh_autopsy(struct ata_port *ap);
167const char *ata_get_cmd_descript(u8 command);
167extern void ata_eh_report(struct ata_port *ap); 168extern void ata_eh_report(struct ata_port *ap);
168extern int ata_eh_reset(struct ata_link *link, int classify, 169extern int ata_eh_reset(struct ata_link *link, int classify,
169 ata_prereset_fn_t prereset, ata_reset_fn_t softreset, 170 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 45915566e4e9..aa4b3f6ae771 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -246,6 +246,7 @@ static const struct pci_device_id atiixp[] = {
246 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, 246 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
247 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, 247 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
248 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, 248 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
249 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), },
249 250
250 { }, 251 { },
251}; 252};
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index d33aa28239a9..403f56165cec 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -202,7 +202,8 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
202} 202}
203 203
204static const struct pci_device_id cs5535[] = { 204static const struct pci_device_id cs5535[] = {
205 { PCI_VDEVICE(NS, 0x002D), }, 205 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_CS5535_IDE), },
206 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5535_IDE), },
206 207
207 { }, 208 { },
208}; 209};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index abdd19fe990a..d6f69561dc86 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -213,7 +213,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
213 * This is tI, C.F. spec. says 0, but Sony CF card requires 213 * This is tI, C.F. spec. says 0, but Sony CF card requires
214 * more, we use 20 nS. 214 * more, we use 20 nS.
215 */ 215 */
216 dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);; 216 dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
217 dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh); 217 dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
218 218
219 dma_tim.s.dmarq = dma_arq; 219 dma_tim.s.dmarq = dma_arq;
@@ -841,7 +841,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
841 ocd = pdev->dev.platform_data; 841 ocd = pdev->dev.platform_data;
842 842
843 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start, 843 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
844 res_cs0->end - res_cs0->start + 1); 844 resource_size(res_cs0));
845 845
846 if (!cs0) 846 if (!cs0)
847 return -ENOMEM; 847 return -ENOMEM;
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index d8d743af3225..3f6ebc6c665a 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -151,14 +151,14 @@ int __devinit __pata_platform_probe(struct device *dev,
151 */ 151 */
152 if (mmio) { 152 if (mmio) {
153 ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start, 153 ap->ioaddr.cmd_addr = devm_ioremap(dev, io_res->start,
154 io_res->end - io_res->start + 1); 154 resource_size(io_res));
155 ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start, 155 ap->ioaddr.ctl_addr = devm_ioremap(dev, ctl_res->start,
156 ctl_res->end - ctl_res->start + 1); 156 resource_size(ctl_res));
157 } else { 157 } else {
158 ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start, 158 ap->ioaddr.cmd_addr = devm_ioport_map(dev, io_res->start,
159 io_res->end - io_res->start + 1); 159 resource_size(io_res));
160 ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start, 160 ap->ioaddr.ctl_addr = devm_ioport_map(dev, ctl_res->start,
161 ctl_res->end - ctl_res->start + 1); 161 resource_size(ctl_res));
162 } 162 }
163 if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) { 163 if (!ap->ioaddr.cmd_addr || !ap->ioaddr.ctl_addr) {
164 dev_err(dev, "failed to map IO/CTL base\n"); 164 dev_err(dev, "failed to map IO/CTL base\n");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 8e3cdef8a25f..45f1e10f917b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -151,7 +151,7 @@ static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
151 info->irq = irq; 151 info->irq = irq;
152 152
153 info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, 153 info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
154 res->end - res->start + 1); 154 resource_size(res));
155 if (!info->iobase) 155 if (!info->iobase)
156 return -ENOMEM; 156 return -ENOMEM;
157 157
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
new file mode 100644
index 000000000000..c843a1e07c4f
--- /dev/null
+++ b/drivers/ata/pata_rdc.c
@@ -0,0 +1,400 @@
1/*
2 * pata_rdc - Driver for later RDC PATA controllers
3 *
4 * This is actually a driver for hardware meeting
5 * INCITS 370-2004 (1510D): ATA Host Adapter Standards
6 *
7 * Based on ata_piix.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, write to
21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/blkdev.h>
29#include <linux/delay.h>
30#include <linux/device.h>
31#include <scsi/scsi_host.h>
32#include <linux/libata.h>
33#include <linux/dmi.h>
34
35#define DRV_NAME "pata_rdc"
36#define DRV_VERSION "0.01"
37
38struct rdc_host_priv {
39 u32 saved_iocfg;
40};
41
42/**
43 * rdc_pata_cable_detect - Probe host controller cable detect info
44 * @ap: Port for which cable detect info is desired
45 *
46 * Read 80c cable indicator from ATA PCI device's PCI config
47 * register. This register is normally set by firmware (BIOS).
48 *
49 * LOCKING:
50 * None (inherited from caller).
51 */
52
53static int rdc_pata_cable_detect(struct ata_port *ap)
54{
55 struct rdc_host_priv *hpriv = ap->host->private_data;
56 u8 mask;
57
58 /* check BIOS cable detect results */
59 mask = 0x30 << (2 * ap->port_no);
60 if ((hpriv->saved_iocfg & mask) == 0)
61 return ATA_CBL_PATA40;
62 return ATA_CBL_PATA80;
63}
64
65/**
66 * rdc_pata_prereset - prereset for PATA host controller
67 * @link: Target link
68 * @deadline: deadline jiffies for the operation
69 *
70 * LOCKING:
71 * None (inherited from caller).
72 */
73static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
74{
75 struct ata_port *ap = link->ap;
76 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
77
78 static const struct pci_bits rdc_enable_bits[] = {
79 { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
80 { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
81 };
82
83 if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
84 return -ENOENT;
85 return ata_sff_prereset(link, deadline);
86}
87
88/**
89 * rdc_set_piomode - Initialize host controller PATA PIO timings
90 * @ap: Port whose timings we are configuring
91 * @adev: um
92 *
93 * Set PIO mode for device, in host controller PCI config space.
94 *
95 * LOCKING:
96 * None (inherited from caller).
97 */
98
99static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
100{
101 unsigned int pio = adev->pio_mode - XFER_PIO_0;
102 struct pci_dev *dev = to_pci_dev(ap->host->dev);
103 unsigned int is_slave = (adev->devno != 0);
104 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
105 unsigned int slave_port = 0x44;
106 u16 master_data;
107 u8 slave_data;
108 u8 udma_enable;
109 int control = 0;
110
111 static const /* ISP RTC */
112 u8 timings[][2] = { { 0, 0 },
113 { 0, 0 },
114 { 1, 0 },
115 { 2, 1 },
116 { 2, 3 }, };
117
118 if (pio >= 2)
119 control |= 1; /* TIME1 enable */
120 if (ata_pio_need_iordy(adev))
121 control |= 2; /* IE enable */
122
123 if (adev->class == ATA_DEV_ATA)
124 control |= 4; /* PPE enable */
125
126 /* PIO configuration clears DTE unconditionally. It will be
127 * programmed in set_dmamode which is guaranteed to be called
128 * after set_piomode if any DMA mode is available.
129 */
130 pci_read_config_word(dev, master_port, &master_data);
131 if (is_slave) {
132 /* clear TIME1|IE1|PPE1|DTE1 */
133 master_data &= 0xff0f;
134 /* Enable SITRE (separate slave timing register) */
135 master_data |= 0x4000;
136 /* enable PPE1, IE1 and TIME1 as needed */
137 master_data |= (control << 4);
138 pci_read_config_byte(dev, slave_port, &slave_data);
139 slave_data &= (ap->port_no ? 0x0f : 0xf0);
140 /* Load the timing nibble for this slave */
141 slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
142 << (ap->port_no ? 4 : 0);
143 } else {
144 /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
145 master_data &= 0xccf0;
146 /* Enable PPE, IE and TIME as appropriate */
147 master_data |= control;
148 /* load ISP and RCT */
149 master_data |=
150 (timings[pio][0] << 12) |
151 (timings[pio][1] << 8);
152 }
153 pci_write_config_word(dev, master_port, master_data);
154 if (is_slave)
155 pci_write_config_byte(dev, slave_port, slave_data);
156
157 /* Ensure the UDMA bit is off - it will be turned back on if
158 UDMA is selected */
159
160 pci_read_config_byte(dev, 0x48, &udma_enable);
161 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
162 pci_write_config_byte(dev, 0x48, udma_enable);
163}
164
165/**
166 * rdc_set_dmamode - Initialize host controller PATA PIO timings
167 * @ap: Port whose timings we are configuring
168 * @adev: Drive in question
169 *
170 * Set UDMA mode for device, in host controller PCI config space.
171 *
172 * LOCKING:
173 * None (inherited from caller).
174 */
175
176static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
177{
178 struct pci_dev *dev = to_pci_dev(ap->host->dev);
179 u8 master_port = ap->port_no ? 0x42 : 0x40;
180 u16 master_data;
181 u8 speed = adev->dma_mode;
182 int devid = adev->devno + 2 * ap->port_no;
183 u8 udma_enable = 0;
184
185 static const /* ISP RTC */
186 u8 timings[][2] = { { 0, 0 },
187 { 0, 0 },
188 { 1, 0 },
189 { 2, 1 },
190 { 2, 3 }, };
191
192 pci_read_config_word(dev, master_port, &master_data);
193 pci_read_config_byte(dev, 0x48, &udma_enable);
194
195 if (speed >= XFER_UDMA_0) {
196 unsigned int udma = adev->dma_mode - XFER_UDMA_0;
197 u16 udma_timing;
198 u16 ideconf;
199 int u_clock, u_speed;
200
201 /*
202 * UDMA is handled by a combination of clock switching and
203 * selection of dividers
204 *
205 * Handy rule: Odd modes are UDMATIMx 01, even are 02
206 * except UDMA0 which is 00
207 */
208 u_speed = min(2 - (udma & 1), udma);
209 if (udma == 5)
210 u_clock = 0x1000; /* 100Mhz */
211 else if (udma > 2)
212 u_clock = 1; /* 66Mhz */
213 else
214 u_clock = 0; /* 33Mhz */
215
216 udma_enable |= (1 << devid);
217
218 /* Load the CT/RP selection */
219 pci_read_config_word(dev, 0x4A, &udma_timing);
220 udma_timing &= ~(3 << (4 * devid));
221 udma_timing |= u_speed << (4 * devid);
222 pci_write_config_word(dev, 0x4A, udma_timing);
223
224 /* Select a 33/66/100Mhz clock */
225 pci_read_config_word(dev, 0x54, &ideconf);
226 ideconf &= ~(0x1001 << devid);
227 ideconf |= u_clock << devid;
228 pci_write_config_word(dev, 0x54, ideconf);
229 } else {
230 /*
231 * MWDMA is driven by the PIO timings. We must also enable
232 * IORDY unconditionally along with TIME1. PPE has already
233 * been set when the PIO timing was set.
234 */
235 unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
236 unsigned int control;
237 u8 slave_data;
238 const unsigned int needed_pio[3] = {
239 XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
240 };
241 int pio = needed_pio[mwdma] - XFER_PIO_0;
242
243 control = 3; /* IORDY|TIME1 */
244
245 /* If the drive MWDMA is faster than it can do PIO then
246 we must force PIO into PIO0 */
247
248 if (adev->pio_mode < needed_pio[mwdma])
249 /* Enable DMA timing only */
250 control |= 8; /* PIO cycles in PIO0 */
251
252 if (adev->devno) { /* Slave */
253 master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
254 master_data |= control << 4;
255 pci_read_config_byte(dev, 0x44, &slave_data);
256 slave_data &= (ap->port_no ? 0x0f : 0xf0);
257 /* Load the matching timing */
258 slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
259 pci_write_config_byte(dev, 0x44, slave_data);
260 } else { /* Master */
261 master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
262 and master timing bits */
263 master_data |= control;
264 master_data |=
265 (timings[pio][0] << 12) |
266 (timings[pio][1] << 8);
267 }
268
269 udma_enable &= ~(1 << devid);
270 pci_write_config_word(dev, master_port, master_data);
271 }
272 pci_write_config_byte(dev, 0x48, udma_enable);
273}
274
275static struct ata_port_operations rdc_pata_ops = {
276 .inherits = &ata_bmdma32_port_ops,
277 .cable_detect = rdc_pata_cable_detect,
278 .set_piomode = rdc_set_piomode,
279 .set_dmamode = rdc_set_dmamode,
280 .prereset = rdc_pata_prereset,
281};
282
283static struct ata_port_info rdc_port_info = {
284
285 .flags = ATA_FLAG_SLAVE_POSS,
286 .pio_mask = ATA_PIO4,
287 .mwdma_mask = ATA_MWDMA2,
288 .udma_mask = ATA_UDMA5,
289 .port_ops = &rdc_pata_ops,
290};
291
292static struct scsi_host_template rdc_sht = {
293 ATA_BMDMA_SHT(DRV_NAME),
294};
295
296/**
297 * rdc_init_one - Register PIIX ATA PCI device with kernel services
298 * @pdev: PCI device to register
299 * @ent: Entry in rdc_pci_tbl matching with @pdev
300 *
301 * Called from kernel PCI layer. We probe for combined mode (sigh),
302 * and then hand over control to libata, for it to do the rest.
303 *
304 * LOCKING:
305 * Inherited from PCI layer (may sleep).
306 *
307 * RETURNS:
308 * Zero on success, or -ERRNO value.
309 */
310
311static int __devinit rdc_init_one(struct pci_dev *pdev,
312 const struct pci_device_id *ent)
313{
314 static int printed_version;
315 struct device *dev = &pdev->dev;
316 struct ata_port_info port_info[2];
317 const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
318 unsigned long port_flags;
319 struct ata_host *host;
320 struct rdc_host_priv *hpriv;
321 int rc;
322
323 if (!printed_version++)
324 dev_printk(KERN_DEBUG, &pdev->dev,
325 "version " DRV_VERSION "\n");
326
327 port_info[0] = rdc_port_info;
328 port_info[1] = rdc_port_info;
329
330 port_flags = port_info[0].flags;
331
332 /* enable device and prepare host */
333 rc = pcim_enable_device(pdev);
334 if (rc)
335 return rc;
336
337 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
338 if (!hpriv)
339 return -ENOMEM;
340
341 /* Save IOCFG, this will be used for cable detection, quirk
342 * detection and restoration on detach.
343 */
344 pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
345
346 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
347 if (rc)
348 return rc;
349 host->private_data = hpriv;
350
351 pci_intx(pdev, 1);
352
353 host->flags |= ATA_HOST_PARALLEL_SCAN;
354
355 pci_set_master(pdev);
356 return ata_pci_sff_activate_host(host, ata_sff_interrupt, &rdc_sht);
357}
358
359static void rdc_remove_one(struct pci_dev *pdev)
360{
361 struct ata_host *host = dev_get_drvdata(&pdev->dev);
362 struct rdc_host_priv *hpriv = host->private_data;
363
364 pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
365
366 ata_pci_remove_one(pdev);
367}
368
369static const struct pci_device_id rdc_pci_tbl[] = {
370 { PCI_DEVICE(0x17F3, 0x1011), },
371 { PCI_DEVICE(0x17F3, 0x1012), },
372 { } /* terminate list */
373};
374
375static struct pci_driver rdc_pci_driver = {
376 .name = DRV_NAME,
377 .id_table = rdc_pci_tbl,
378 .probe = rdc_init_one,
379 .remove = rdc_remove_one,
380};
381
382
383static int __init rdc_init(void)
384{
385 return pci_register_driver(&rdc_pci_driver);
386}
387
388static void __exit rdc_exit(void)
389{
390 pci_unregister_driver(&rdc_pci_driver);
391}
392
393module_init(rdc_init);
394module_exit(rdc_exit);
395
396MODULE_AUTHOR("Alan Cox (based on ata_piix)");
397MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
398MODULE_LICENSE("GPL");
399MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
400MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 0c574c065c62..a5e4dfe60b41 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -85,7 +85,6 @@ static int rz1000_fifo_disable(struct pci_dev *pdev)
85 85
86static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 86static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
87{ 87{
88 static int printed_version;
89 static const struct ata_port_info info = { 88 static const struct ata_port_info info = {
90 .flags = ATA_FLAG_SLAVE_POSS, 89 .flags = ATA_FLAG_SLAVE_POSS,
91 .pio_mask = ATA_PIO4, 90 .pio_mask = ATA_PIO4,
@@ -93,8 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
93 }; 92 };
94 const struct ata_port_info *ppi[] = { &info, NULL }; 93 const struct ata_port_info *ppi[] = { &info, NULL };
95 94
96 if (!printed_version++) 95 printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
97 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
98 96
99 if (rz1000_fifo_disable(pdev) == 0) 97 if (rz1000_fifo_disable(pdev) == 0)
100 return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL); 98 return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 94eaa432c40a..d344db42a002 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1257,6 +1257,7 @@ static struct scsi_host_template sata_fsl_sht = {
1257static struct ata_port_operations sata_fsl_ops = { 1257static struct ata_port_operations sata_fsl_ops = {
1258 .inherits = &sata_pmp_port_ops, 1258 .inherits = &sata_pmp_port_ops,
1259 1259
1260 .qc_defer = ata_std_qc_defer,
1260 .qc_prep = sata_fsl_qc_prep, 1261 .qc_prep = sata_fsl_qc_prep,
1261 .qc_issue = sata_fsl_qc_issue, 1262 .qc_issue = sata_fsl_qc_issue,
1262 .qc_fill_rtf = sata_fsl_qc_fill_rtf, 1263 .qc_fill_rtf = sata_fsl_qc_fill_rtf,
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 8d890cc5a7ee..4406902b4293 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -405,7 +405,7 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance)
405 struct ata_host *host = dev_instance; 405 struct ata_host *host = dev_instance;
406 struct inic_host_priv *hpriv = host->private_data; 406 struct inic_host_priv *hpriv = host->private_data;
407 u16 host_irq_stat; 407 u16 host_irq_stat;
408 int i, handled = 0;; 408 int i, handled = 0;
409 409
410 host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); 410 host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT);
411 411
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c19417e02208..17f9ff9067a2 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4013,7 +4013,7 @@ static int mv_platform_probe(struct platform_device *pdev)
4013 4013
4014 host->iomap = NULL; 4014 host->iomap = NULL;
4015 hpriv->base = devm_ioremap(&pdev->dev, res->start, 4015 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4016 res->end - res->start + 1); 4016 resource_size(res));
4017 hpriv->base -= SATAHC0_REG_BASE; 4017 hpriv->base -= SATAHC0_REG_BASE;
4018 4018
4019 /* 4019 /*
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 35bd5cc7f285..3cb69d5fb817 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -565,6 +565,19 @@ static void sil_freeze(struct ata_port *ap)
565 tmp |= SIL_MASK_IDE0_INT << ap->port_no; 565 tmp |= SIL_MASK_IDE0_INT << ap->port_no;
566 writel(tmp, mmio_base + SIL_SYSCFG); 566 writel(tmp, mmio_base + SIL_SYSCFG);
567 readl(mmio_base + SIL_SYSCFG); /* flush */ 567 readl(mmio_base + SIL_SYSCFG); /* flush */
568
569 /* Ensure DMA_ENABLE is off.
570 *
571 * This is because the controller will not give us access to the
572 * taskfile registers while a DMA is in progress
573 */
574 iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
575 ap->ioaddr.bmdma_addr);
576
577 /* According to ata_bmdma_stop, an HDMA transition requires
578 * on PIO cycle. But we can't read a taskfile register.
579 */
580 ioread8(ap->ioaddr.bmdma_addr);
568} 581}
569 582
570static void sil_thaw(struct ata_port *ap) 583static void sil_thaw(struct ata_port *ap)
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 77aa8d7ecec4..e6946fc527d0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -846,6 +846,17 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
846 if (!ata_is_atapi(qc->tf.protocol)) { 846 if (!ata_is_atapi(qc->tf.protocol)) {
847 prb = &cb->ata.prb; 847 prb = &cb->ata.prb;
848 sge = cb->ata.sge; 848 sge = cb->ata.sge;
849 if (ata_is_data(qc->tf.protocol)) {
850 u16 prot = 0;
851 ctrl = PRB_CTRL_PROTOCOL;
852 if (ata_is_ncq(qc->tf.protocol))
853 prot |= PRB_PROT_NCQ;
854 if (qc->tf.flags & ATA_TFLAG_WRITE)
855 prot |= PRB_PROT_WRITE;
856 else
857 prot |= PRB_PROT_READ;
858 prb->prot = cpu_to_le16(prot);
859 }
849 } else { 860 } else {
850 prb = &cb->atapi.prb; 861 prb = &cb->atapi.prb;
851 sge = cb->atapi.sge; 862 sge = cb->atapi.sge;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 8f9833228619..f8a91bfd66a8 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -109,8 +109,9 @@ MODULE_LICENSE("GPL");
109MODULE_DEVICE_TABLE(pci, sis_pci_tbl); 109MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
110MODULE_VERSION(DRV_VERSION); 110MODULE_VERSION(DRV_VERSION);
111 111
112static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) 112static unsigned int get_scr_cfg_addr(struct ata_link *link, unsigned int sc_reg)
113{ 113{
114 struct ata_port *ap = link->ap;
114 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 115 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
115 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg); 116 unsigned int addr = SIS_SCR_BASE + (4 * sc_reg);
116 u8 pmr; 117 u8 pmr;
@@ -131,6 +132,9 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
131 break; 132 break;
132 } 133 }
133 } 134 }
135 if (link->pmp)
136 addr += 0x10;
137
134 return addr; 138 return addr;
135} 139}
136 140
@@ -138,24 +142,12 @@ static u32 sis_scr_cfg_read(struct ata_link *link,
138 unsigned int sc_reg, u32 *val) 142 unsigned int sc_reg, u32 *val)
139{ 143{
140 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 144 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
141 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); 145 unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
142 u32 val2 = 0;
143 u8 pmr;
144 146
145 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */ 147 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
146 return -EINVAL; 148 return -EINVAL;
147 149
148 pci_read_config_byte(pdev, SIS_PMR, &pmr);
149
150 pci_read_config_dword(pdev, cfg_addr, val); 150 pci_read_config_dword(pdev, cfg_addr, val);
151
152 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
153 (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
154 pci_read_config_dword(pdev, cfg_addr+0x10, &val2);
155
156 *val |= val2;
157 *val &= 0xfffffffb; /* avoid problems with powerdowned ports */
158
159 return 0; 151 return 0;
160} 152}
161 153
@@ -163,28 +155,16 @@ static int sis_scr_cfg_write(struct ata_link *link,
163 unsigned int sc_reg, u32 val) 155 unsigned int sc_reg, u32 val)
164{ 156{
165 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); 157 struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
166 unsigned int cfg_addr = get_scr_cfg_addr(link->ap, sc_reg); 158 unsigned int cfg_addr = get_scr_cfg_addr(link, sc_reg);
167 u8 pmr;
168
169 if (sc_reg == SCR_ERROR) /* doesn't exist in PCI cfg space */
170 return -EINVAL;
171
172 pci_read_config_byte(pdev, SIS_PMR, &pmr);
173 159
174 pci_write_config_dword(pdev, cfg_addr, val); 160 pci_write_config_dword(pdev, cfg_addr, val);
175
176 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
177 (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
178 pci_write_config_dword(pdev, cfg_addr+0x10, val);
179
180 return 0; 161 return 0;
181} 162}
182 163
183static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) 164static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
184{ 165{
185 struct ata_port *ap = link->ap; 166 struct ata_port *ap = link->ap;
186 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 167 void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
187 u8 pmr;
188 168
189 if (sc_reg > SCR_CONTROL) 169 if (sc_reg > SCR_CONTROL)
190 return -EINVAL; 170 return -EINVAL;
@@ -192,39 +172,23 @@ static int sis_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
192 if (ap->flags & SIS_FLAG_CFGSCR) 172 if (ap->flags & SIS_FLAG_CFGSCR)
193 return sis_scr_cfg_read(link, sc_reg, val); 173 return sis_scr_cfg_read(link, sc_reg, val);
194 174
195 pci_read_config_byte(pdev, SIS_PMR, &pmr); 175 *val = ioread32(base + sc_reg * 4);
196
197 *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
198
199 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) ||
200 (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
201 *val |= ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
202
203 *val &= 0xfffffffb;
204
205 return 0; 176 return 0;
206} 177}
207 178
208static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) 179static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
209{ 180{
210 struct ata_port *ap = link->ap; 181 struct ata_port *ap = link->ap;
211 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 182 void __iomem *base = ap->ioaddr.scr_addr + link->pmp * 0x10;
212 u8 pmr;
213 183
214 if (sc_reg > SCR_CONTROL) 184 if (sc_reg > SCR_CONTROL)
215 return -EINVAL; 185 return -EINVAL;
216 186
217 pci_read_config_byte(pdev, SIS_PMR, &pmr);
218
219 if (ap->flags & SIS_FLAG_CFGSCR) 187 if (ap->flags & SIS_FLAG_CFGSCR)
220 return sis_scr_cfg_write(link, sc_reg, val); 188 return sis_scr_cfg_write(link, sc_reg, val);
221 else { 189
222 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)); 190 iowrite32(val, base + (sc_reg * 4));
223 if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || 191 return 0;
224 (pdev->device == 0x1182) || (pmr & SIS_PMR_COMBINED))
225 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
226 return 0;
227 }
228} 192}
229 193
230static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 194static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -236,7 +200,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
236 u32 genctl, val; 200 u32 genctl, val;
237 u8 pmr; 201 u8 pmr;
238 u8 port2_start = 0x20; 202 u8 port2_start = 0x20;
239 int rc; 203 int i, rc;
240 204
241 if (!printed_version++) 205 if (!printed_version++)
242 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); 206 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
@@ -319,6 +283,17 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
319 if (rc) 283 if (rc)
320 return rc; 284 return rc;
321 285
286 for (i = 0; i < 2; i++) {
287 struct ata_port *ap = host->ports[i];
288
289 if (ap->flags & ATA_FLAG_SATA &&
290 ap->flags & ATA_FLAG_SLAVE_POSS) {
291 rc = ata_slave_link_init(ap);
292 if (rc)
293 return rc;
294 }
295 }
296
322 if (!(pi.flags & SIS_FLAG_CFGSCR)) { 297 if (!(pi.flags & SIS_FLAG_CFGSCR)) {
323 void __iomem *mmio; 298 void __iomem *mmio;
324 299
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5e41e6dd657b..db195abad698 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -155,7 +155,7 @@ struct aoedev {
155 u16 fw_ver; /* version of blade's firmware */ 155 u16 fw_ver; /* version of blade's firmware */
156 struct work_struct work;/* disk create work struct */ 156 struct work_struct work;/* disk create work struct */
157 struct gendisk *gd; 157 struct gendisk *gd;
158 struct request_queue blkq; 158 struct request_queue *blkq;
159 struct hd_geometry geo; 159 struct hd_geometry geo;
160 sector_t ssize; 160 sector_t ssize;
161 struct timer_list timer; 161 struct timer_list timer;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 2307a271bdc9..95d344971eda 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -264,9 +264,13 @@ aoeblk_gdalloc(void *vp)
264 goto err_disk; 264 goto err_disk;
265 } 265 }
266 266
267 blk_queue_make_request(&d->blkq, aoeblk_make_request); 267 d->blkq = blk_alloc_queue(GFP_KERNEL);
268 if (bdi_init(&d->blkq.backing_dev_info)) 268 if (!d->blkq)
269 goto err_mempool; 269 goto err_mempool;
270 blk_queue_make_request(d->blkq, aoeblk_make_request);
271 d->blkq->backing_dev_info.name = "aoe";
272 if (bdi_init(&d->blkq->backing_dev_info))
273 goto err_blkq;
270 spin_lock_irqsave(&d->lock, flags); 274 spin_lock_irqsave(&d->lock, flags);
271 gd->major = AOE_MAJOR; 275 gd->major = AOE_MAJOR;
272 gd->first_minor = d->sysminor * AOE_PARTITIONS; 276 gd->first_minor = d->sysminor * AOE_PARTITIONS;
@@ -276,7 +280,7 @@ aoeblk_gdalloc(void *vp)
276 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 280 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
277 d->aoemajor, d->aoeminor); 281 d->aoemajor, d->aoeminor);
278 282
279 gd->queue = &d->blkq; 283 gd->queue = d->blkq;
280 d->gd = gd; 284 d->gd = gd;
281 d->flags &= ~DEVFL_GDALLOC; 285 d->flags &= ~DEVFL_GDALLOC;
282 d->flags |= DEVFL_UP; 286 d->flags |= DEVFL_UP;
@@ -287,6 +291,9 @@ aoeblk_gdalloc(void *vp)
287 aoedisk_add_sysfs(d); 291 aoedisk_add_sysfs(d);
288 return; 292 return;
289 293
294err_blkq:
295 blk_cleanup_queue(d->blkq);
296 d->blkq = NULL;
290err_mempool: 297err_mempool:
291 mempool_destroy(d->bufpool); 298 mempool_destroy(d->bufpool);
292err_disk: 299err_disk:
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index eeea477d9601..fa67027789aa 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -113,6 +113,7 @@ aoedev_freedev(struct aoedev *d)
113 if (d->bufpool) 113 if (d->bufpool)
114 mempool_destroy(d->bufpool); 114 mempool_destroy(d->bufpool);
115 skbpoolfree(d); 115 skbpoolfree(d);
116 blk_cleanup_queue(d->blkq);
116 kfree(d); 117 kfree(d);
117} 118}
118 119
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8c9d50db5c3a..c58557790585 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -49,6 +49,7 @@
49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 49#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040
50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 50#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 51#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
52#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
52#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 53#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
53 54
54/* cover 915 and 945 variants */ 55/* cover 915 and 945 variants */
@@ -81,7 +82,8 @@
81 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
82 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
83 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB) 85 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
85 87
86extern int agp_memory_reserved; 88extern int agp_memory_reserved;
87 89
@@ -1216,6 +1218,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1216 case PCI_DEVICE_ID_INTEL_G41_HB: 1218 case PCI_DEVICE_ID_INTEL_G41_HB:
1217 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1219 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1218 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1220 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1221 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
1219 *gtt_offset = *gtt_size = MB(2); 1222 *gtt_offset = *gtt_size = MB(2);
1220 break; 1223 break;
1221 default: 1224 default:
@@ -2195,6 +2198,8 @@ static const struct intel_driver_description {
2195 "IGDNG/D", NULL, &intel_i965_driver }, 2198 "IGDNG/D", NULL, &intel_i965_driver },
2196 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2199 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2197 "IGDNG/M", NULL, &intel_i965_driver }, 2200 "IGDNG/M", NULL, &intel_i965_driver },
2201 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2202 "IGDNG/MA", NULL, &intel_i965_driver },
2198 { 0, 0, 0, NULL, NULL, NULL } 2203 { 0, 0, 0, NULL, NULL, NULL }
2199}; 2204};
2200 2205
@@ -2398,6 +2403,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2398 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2403 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2399 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2404 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2400 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2405 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2406 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
2401 { } 2407 { }
2402}; 2408};
2403 2409
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 86105efb4eb6..0ecac7e532f6 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -1006,7 +1006,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1006 priv->dev->release = (void (*)(struct device *)) kfree; 1006 priv->dev->release = (void (*)(struct device *)) kfree;
1007 rc = device_register(priv->dev); 1007 rc = device_register(priv->dev);
1008 if (rc) { 1008 if (rc) {
1009 kfree(priv->dev); 1009 put_device(priv->dev);
1010 goto out_error_dev; 1010 goto out_error_dev;
1011 } 1011 }
1012 1012
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index cd0ba51f7c80..0d8c5788b8e4 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -44,8 +44,8 @@
44 * want to register another driver on the same PCI id. 44 * want to register another driver on the same PCI id.
45 */ 45 */
46static const struct pci_device_id pci_tbl[] = { 46static const struct pci_device_id pci_tbl[] = {
47 { 0x1022, 0x7443, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 47 { PCI_VDEVICE(AMD, 0x7443), 0, },
48 { 0x1022, 0x746b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, }, 48 { PCI_VDEVICE(AMD, 0x746b), 0, },
49 { 0, }, /* terminate list */ 49 { 0, }, /* terminate list */
50}; 50};
51MODULE_DEVICE_TABLE(pci, pci_tbl); 51MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index 64d513f68368..4c4d4e140f98 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -46,8 +46,7 @@
46 * want to register another driver on the same PCI id. 46 * want to register another driver on the same PCI id.
47 */ 47 */
48static const struct pci_device_id pci_tbl[] = { 48static const struct pci_device_id pci_tbl[] = {
49 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, 49 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, },
50 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
51 { 0, }, /* terminate list */ 50 { 0, }, /* terminate list */
52}; 51};
53MODULE_DEVICE_TABLE(pci, pci_tbl); 52MODULE_DEVICE_TABLE(pci, pci_tbl);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index afa8813e737a..645237bda682 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -822,6 +822,7 @@ static const struct file_operations zero_fops = {
822 * - permits private mappings, "copies" are taken of the source of zeros 822 * - permits private mappings, "copies" are taken of the source of zeros
823 */ 823 */
824static struct backing_dev_info zero_bdi = { 824static struct backing_dev_info zero_bdi = {
825 .name = "char/mem",
825 .capabilities = BDI_CAP_MAP_COPY, 826 .capabilities = BDI_CAP_MAP_COPY,
826}; 827};
827 828
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 973be2f44195..4e28b35024ec 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
300 if (space < 2) 300 if (space < 2)
301 return -1; 301 return -1;
302 tty->canon_column = tty->column = 0; 302 tty->canon_column = tty->column = 0;
303 tty_put_char(tty, '\r'); 303 tty->ops->write(tty, "\r\n", 2);
304 tty_put_char(tty, c);
305 return 2; 304 return 2;
306 } 305 }
307 tty->canon_column = tty->column; 306 tty->canon_column = tty->column;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d083c73d784a..b33d6688e910 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
109 * the other side of the pty/tty pair. 109 * the other side of the pty/tty pair.
110 */ 110 */
111 111
112static int pty_write(struct tty_struct *tty, const unsigned char *buf, 112static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
113 int count)
114{ 113{
115 struct tty_struct *to = tty->link; 114 struct tty_struct *to = tty->link;
116 int c;
117 115
118 if (tty->stopped) 116 if (tty->stopped)
119 return 0; 117 return 0;
120 118
121 /* This isn't locked but our 8K is quite sloppy so no
122 big deal */
123
124 c = pty_space(to);
125 if (c > count)
126 c = count;
127 if (c > 0) { 119 if (c > 0) {
128 /* Stuff the data into the input queue of the other end */ 120 /* Stuff the data into the input queue of the other end */
129 c = tty_insert_flip_string(to, buf, c); 121 c = tty_insert_flip_string(to, buf, c);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8c7444857a4b..d8a9255e1a3f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -240,6 +240,7 @@
240#include <linux/spinlock.h> 240#include <linux/spinlock.h>
241#include <linux/percpu.h> 241#include <linux/percpu.h>
242#include <linux/cryptohash.h> 242#include <linux/cryptohash.h>
243#include <linux/fips.h>
243 244
244#ifdef CONFIG_GENERIC_HARDIRQS 245#ifdef CONFIG_GENERIC_HARDIRQS
245# include <linux/irq.h> 246# include <linux/irq.h>
@@ -413,6 +414,7 @@ struct entropy_store {
413 unsigned add_ptr; 414 unsigned add_ptr;
414 int entropy_count; 415 int entropy_count;
415 int input_rotate; 416 int input_rotate;
417 __u8 *last_data;
416}; 418};
417 419
418static __u32 input_pool_data[INPUT_POOL_WORDS]; 420static __u32 input_pool_data[INPUT_POOL_WORDS];
@@ -852,12 +854,21 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
852{ 854{
853 ssize_t ret = 0, i; 855 ssize_t ret = 0, i;
854 __u8 tmp[EXTRACT_SIZE]; 856 __u8 tmp[EXTRACT_SIZE];
857 unsigned long flags;
855 858
856 xfer_secondary_pool(r, nbytes); 859 xfer_secondary_pool(r, nbytes);
857 nbytes = account(r, nbytes, min, reserved); 860 nbytes = account(r, nbytes, min, reserved);
858 861
859 while (nbytes) { 862 while (nbytes) {
860 extract_buf(r, tmp); 863 extract_buf(r, tmp);
864
865 if (r->last_data) {
866 spin_lock_irqsave(&r->lock, flags);
867 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
868 panic("Hardware RNG duplicated output!\n");
869 memcpy(r->last_data, tmp, EXTRACT_SIZE);
870 spin_unlock_irqrestore(&r->lock, flags);
871 }
861 i = min_t(int, nbytes, EXTRACT_SIZE); 872 i = min_t(int, nbytes, EXTRACT_SIZE);
862 memcpy(buf, tmp, i); 873 memcpy(buf, tmp, i);
863 nbytes -= i; 874 nbytes -= i;
@@ -940,6 +951,9 @@ static void init_std_data(struct entropy_store *r)
940 now = ktime_get_real(); 951 now = ktime_get_real();
941 mix_pool_bytes(r, &now, sizeof(now)); 952 mix_pool_bytes(r, &now, sizeof(now));
942 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 953 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
954 /* Enable continuous test in fips mode */
955 if (fips_enabled)
956 r->last_data = kmalloc(EXTRACT_SIZE, GFP_KERNEL);
943} 957}
944 958
945static int rand_initialize(void) 959static int rand_initialize(void)
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 5d7a02f63e1c..50eecfe1d724 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -24,6 +24,7 @@
24#include <linux/sysrq.h> 24#include <linux/sysrq.h>
25#include <linux/kbd_kern.h> 25#include <linux/kbd_kern.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/nmi.h>
27#include <linux/quotaops.h> 28#include <linux/quotaops.h>
28#include <linux/perf_counter.h> 29#include <linux/perf_counter.h>
29#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -222,12 +223,20 @@ static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
222 223
223static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 224static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
224{ 225{
225 struct pt_regs *regs = get_irq_regs(); 226 /*
226 if (regs) { 227 * Fall back to the workqueue based printing if the
227 printk(KERN_INFO "CPU%d:\n", smp_processor_id()); 228 * backtrace printing did not succeed or the
228 show_regs(regs); 229 * architecture has no support for it:
230 */
231 if (!trigger_all_cpu_backtrace()) {
232 struct pt_regs *regs = get_irq_regs();
233
234 if (regs) {
235 printk(KERN_INFO "CPU%d:\n", smp_processor_id());
236 show_regs(regs);
237 }
238 schedule_work(&sysrq_showallcpus);
229 } 239 }
230 schedule_work(&sysrq_showallcpus);
231} 240}
232 241
233static struct sysrq_key_op sysrq_showallcpus_op = { 242static struct sysrq_key_op sysrq_showallcpus_op = {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index aec1931608aa..0b73e4ec1add 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -450,6 +450,12 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
450 goto out_err; 450 goto out_err;
451 } 451 }
452 452
453 /* Default timeouts */
454 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
455 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
456 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
457 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
458
453 if (request_locality(chip, 0) != 0) { 459 if (request_locality(chip, 0) != 0) {
454 rc = -ENODEV; 460 rc = -ENODEV;
455 goto out_err; 461 goto out_err;
@@ -457,12 +463,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
457 463
458 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0)); 464 vendor = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
459 465
460 /* Default timeouts */
461 chip->vendor.timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
462 chip->vendor.timeout_b = msecs_to_jiffies(TIS_LONG_TIMEOUT);
463 chip->vendor.timeout_c = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
464 chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
465
466 dev_info(dev, 466 dev_info(dev,
467 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 467 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fd69086d08d5..2968ed6a9c49 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1250{ 1250{
1251 int ret = 0; 1251 int ret = 0;
1252 1252
1253#ifdef __powerpc__
1254 int cpu = sysdev->id; 1253 int cpu = sysdev->id;
1255 unsigned int cur_freq = 0;
1256 struct cpufreq_policy *cpu_policy; 1254 struct cpufreq_policy *cpu_policy;
1257 1255
1258 dprintk("suspending cpu %u\n", cpu); 1256 dprintk("suspending cpu %u\n", cpu);
1259 1257
1260 /*
1261 * This whole bogosity is here because Powerbooks are made of fail.
1262 * No sane platform should need any of the code below to be run.
1263 * (it's entirely the wrong thing to do, as driver->get may
1264 * reenable interrupts on some architectures).
1265 */
1266
1267 if (!cpu_online(cpu)) 1258 if (!cpu_online(cpu))
1268 return 0; 1259 return 0;
1269 1260
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1282 1273
1283 if (cpufreq_driver->suspend) { 1274 if (cpufreq_driver->suspend) {
1284 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1275 ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1285 if (ret) { 1276 if (ret)
1286 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1277 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1287 "step on CPU %u\n", cpu_policy->cpu); 1278 "step on CPU %u\n", cpu_policy->cpu);
1288 goto out;
1289 }
1290 }
1291
1292 if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1293 goto out;
1294
1295 if (cpufreq_driver->get)
1296 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1297
1298 if (!cur_freq || !cpu_policy->cur) {
1299 printk(KERN_ERR "cpufreq: suspend failed to assert current "
1300 "frequency is what timing core thinks it is.\n");
1301 goto out;
1302 }
1303
1304 if (unlikely(cur_freq != cpu_policy->cur)) {
1305 struct cpufreq_freqs freqs;
1306
1307 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1308 dprintk("Warning: CPU frequency is %u, "
1309 "cpufreq assumed %u kHz.\n",
1310 cur_freq, cpu_policy->cur);
1311
1312 freqs.cpu = cpu;
1313 freqs.old = cpu_policy->cur;
1314 freqs.new = cur_freq;
1315
1316 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1317 CPUFREQ_SUSPENDCHANGE, &freqs);
1318 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1319
1320 cpu_policy->cur = cur_freq;
1321 } 1279 }
1322 1280
1323out: 1281out:
1324 cpufreq_cpu_put(cpu_policy); 1282 cpufreq_cpu_put(cpu_policy);
1325#endif /* __powerpc__ */
1326 return ret; 1283 return ret;
1327} 1284}
1328 1285
@@ -1330,24 +1287,21 @@ out:
1330 * cpufreq_resume - restore proper CPU frequency handling after resume 1287 * cpufreq_resume - restore proper CPU frequency handling after resume
1331 * 1288 *
1332 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1289 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1333 * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync 1290 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1334 * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are 1291 * restored. It will verify that the current freq is in sync with
1335 * restored. 1292 * what we believe it to be. This is a bit later than when it
1293 * should be, but nonethteless it's better than calling
1294 * cpufreq_driver->get() here which might re-enable interrupts...
1336 */ 1295 */
1337static int cpufreq_resume(struct sys_device *sysdev) 1296static int cpufreq_resume(struct sys_device *sysdev)
1338{ 1297{
1339 int ret = 0; 1298 int ret = 0;
1340 1299
1341#ifdef __powerpc__
1342 int cpu = sysdev->id; 1300 int cpu = sysdev->id;
1343 struct cpufreq_policy *cpu_policy; 1301 struct cpufreq_policy *cpu_policy;
1344 1302
1345 dprintk("resuming cpu %u\n", cpu); 1303 dprintk("resuming cpu %u\n", cpu);
1346 1304
1347 /* As with the ->suspend method, all the code below is
1348 * only necessary because Powerbooks suck.
1349 * See commit 42d4dc3f4e1e for jokes. */
1350
1351 if (!cpu_online(cpu)) 1305 if (!cpu_online(cpu))
1352 return 0; 1306 return 0;
1353 1307
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev)
1373 } 1327 }
1374 } 1328 }
1375 1329
1376 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1377 unsigned int cur_freq = 0;
1378
1379 if (cpufreq_driver->get)
1380 cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1381
1382 if (!cur_freq || !cpu_policy->cur) {
1383 printk(KERN_ERR "cpufreq: resume failed to assert "
1384 "current frequency is what timing core "
1385 "thinks it is.\n");
1386 goto out;
1387 }
1388
1389 if (unlikely(cur_freq != cpu_policy->cur)) {
1390 struct cpufreq_freqs freqs;
1391
1392 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1393 dprintk("Warning: CPU frequency "
1394 "is %u, cpufreq assumed %u kHz.\n",
1395 cur_freq, cpu_policy->cur);
1396
1397 freqs.cpu = cpu;
1398 freqs.old = cpu_policy->cur;
1399 freqs.new = cur_freq;
1400
1401 srcu_notifier_call_chain(
1402 &cpufreq_transition_notifier_list,
1403 CPUFREQ_RESUMECHANGE, &freqs);
1404 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1405
1406 cpu_policy->cur = cur_freq;
1407 }
1408 }
1409
1410out:
1411 schedule_work(&cpu_policy->update); 1330 schedule_work(&cpu_policy->update);
1331
1412fail: 1332fail:
1413 cpufreq_cpu_put(cpu_policy); 1333 cpufreq_cpu_put(cpu_policy);
1414#endif /* __powerpc__ */
1415 return ret; 1334 return ret;
1416} 1335}
1417 1336
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5b27692372bf..b08403d7d1ca 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -13,7 +13,6 @@ if CRYPTO_HW
13config CRYPTO_DEV_PADLOCK 13config CRYPTO_DEV_PADLOCK
14 tristate "Support for VIA PadLock ACE" 14 tristate "Support for VIA PadLock ACE"
15 depends on X86 && !UML 15 depends on X86 && !UML
16 select CRYPTO_ALGAPI
17 help 16 help
18 Some VIA processors come with an integrated crypto engine 17 Some VIA processors come with an integrated crypto engine
19 (so called VIA PadLock ACE, Advanced Cryptography Engine) 18 (so called VIA PadLock ACE, Advanced Cryptography Engine)
@@ -39,6 +38,7 @@ config CRYPTO_DEV_PADLOCK_AES
39config CRYPTO_DEV_PADLOCK_SHA 38config CRYPTO_DEV_PADLOCK_SHA
40 tristate "PadLock driver for SHA1 and SHA256 algorithms" 39 tristate "PadLock driver for SHA1 and SHA256 algorithms"
41 depends on CRYPTO_DEV_PADLOCK 40 depends on CRYPTO_DEV_PADLOCK
41 select CRYPTO_HASH
42 select CRYPTO_SHA1 42 select CRYPTO_SHA1
43 select CRYPTO_SHA256 43 select CRYPTO_SHA256
44 help 44 help
@@ -157,6 +157,19 @@ config S390_PRNG
157 ANSI X9.17 standard. The PRNG is usable via the char device 157 ANSI X9.17 standard. The PRNG is usable via the char device
158 /dev/prandom. 158 /dev/prandom.
159 159
160config CRYPTO_DEV_MV_CESA
161 tristate "Marvell's Cryptographic Engine"
162 depends on PLAT_ORION
163 select CRYPTO_ALGAPI
164 select CRYPTO_AES
165 select CRYPTO_BLKCIPHER2
166 help
167 This driver allows you to utilize the Cryptographic Engines and
168 Security Accelerator (CESA) which can be found on the Marvell Orion
169 and Kirkwood SoCs, such as QNAP's TS-209.
170
171 Currently the driver supports AES in ECB and CBC mode without DMA.
172
160config CRYPTO_DEV_HIFN_795X 173config CRYPTO_DEV_HIFN_795X
161 tristate "Driver HIFN 795x crypto accelerator chips" 174 tristate "Driver HIFN 795x crypto accelerator chips"
162 select CRYPTO_DES 175 select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2bc8846..6ffcb3f7f942 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o 2obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o 3obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o 4obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
5obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
5obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o 6obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
6obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o 7obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
7obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ 8obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 61b6e1bec8c6..a33243c17b00 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -208,7 +208,8 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
208 } 208 }
209 } 209 }
210 210
211 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 211 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
212 sizeof(struct crypto4xx_ctx));
212 sa = (struct dynamic_sa_ctl *) ctx->sa_in; 213 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
213 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV, 214 set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
214 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA, 215 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 4c0dfb2b872e..46e899ac924e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -31,8 +31,6 @@
31#include <asm/dcr.h> 31#include <asm/dcr.h>
32#include <asm/dcr-regs.h> 32#include <asm/dcr-regs.h>
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34#include <crypto/internal/hash.h>
35#include <crypto/algapi.h>
36#include <crypto/aes.h> 34#include <crypto/aes.h>
37#include <crypto/sha.h> 35#include <crypto/sha.h>
38#include "crypto4xx_reg_def.h" 36#include "crypto4xx_reg_def.h"
@@ -998,10 +996,15 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
998 ctx->sa_out_dma_addr = 0; 996 ctx->sa_out_dma_addr = 0;
999 ctx->sa_len = 0; 997 ctx->sa_len = 0;
1000 998
1001 if (alg->cra_type == &crypto_ablkcipher_type) 999 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1000 default:
1002 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx); 1001 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1003 else if (alg->cra_type == &crypto_ahash_type) 1002 break;
1004 tfm->crt_ahash.reqsize = sizeof(struct crypto4xx_ctx); 1003 case CRYPTO_ALG_TYPE_AHASH:
1004 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1005 sizeof(struct crypto4xx_ctx));
1006 break;
1007 }
1005 1008
1006 return 0; 1009 return 0;
1007} 1010}
@@ -1015,7 +1018,8 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1015} 1018}
1016 1019
1017int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, 1020int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1018 struct crypto_alg *crypto_alg, int array_size) 1021 struct crypto4xx_alg_common *crypto_alg,
1022 int array_size)
1019{ 1023{
1020 struct crypto4xx_alg *alg; 1024 struct crypto4xx_alg *alg;
1021 int i; 1025 int i;
@@ -1027,13 +1031,18 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1027 return -ENOMEM; 1031 return -ENOMEM;
1028 1032
1029 alg->alg = crypto_alg[i]; 1033 alg->alg = crypto_alg[i];
1030 INIT_LIST_HEAD(&alg->alg.cra_list);
1031 if (alg->alg.cra_init == NULL)
1032 alg->alg.cra_init = crypto4xx_alg_init;
1033 if (alg->alg.cra_exit == NULL)
1034 alg->alg.cra_exit = crypto4xx_alg_exit;
1035 alg->dev = sec_dev; 1034 alg->dev = sec_dev;
1036 rc = crypto_register_alg(&alg->alg); 1035
1036 switch (alg->alg.type) {
1037 case CRYPTO_ALG_TYPE_AHASH:
1038 rc = crypto_register_ahash(&alg->alg.u.hash);
1039 break;
1040
1041 default:
1042 rc = crypto_register_alg(&alg->alg.u.cipher);
1043 break;
1044 }
1045
1037 if (rc) { 1046 if (rc) {
1038 list_del(&alg->entry); 1047 list_del(&alg->entry);
1039 kfree(alg); 1048 kfree(alg);
@@ -1051,7 +1060,14 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1051 1060
1052 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) { 1061 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1053 list_del(&alg->entry); 1062 list_del(&alg->entry);
1054 crypto_unregister_alg(&alg->alg); 1063 switch (alg->alg.type) {
1064 case CRYPTO_ALG_TYPE_AHASH:
1065 crypto_unregister_ahash(&alg->alg.u.hash);
1066 break;
1067
1068 default:
1069 crypto_unregister_alg(&alg->alg.u.cipher);
1070 }
1055 kfree(alg); 1071 kfree(alg);
1056 } 1072 }
1057} 1073}
@@ -1104,17 +1120,18 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1104/** 1120/**
1105 * Supported Crypto Algorithms 1121 * Supported Crypto Algorithms
1106 */ 1122 */
1107struct crypto_alg crypto4xx_alg[] = { 1123struct crypto4xx_alg_common crypto4xx_alg[] = {
1108 /* Crypto AES modes */ 1124 /* Crypto AES modes */
1109 { 1125 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1110 .cra_name = "cbc(aes)", 1126 .cra_name = "cbc(aes)",
1111 .cra_driver_name = "cbc-aes-ppc4xx", 1127 .cra_driver_name = "cbc-aes-ppc4xx",
1112 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY, 1128 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1113 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, 1129 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1114 .cra_blocksize = AES_BLOCK_SIZE, 1130 .cra_blocksize = AES_BLOCK_SIZE,
1115 .cra_ctxsize = sizeof(struct crypto4xx_ctx), 1131 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1116 .cra_alignmask = 0,
1117 .cra_type = &crypto_ablkcipher_type, 1132 .cra_type = &crypto_ablkcipher_type,
1133 .cra_init = crypto4xx_alg_init,
1134 .cra_exit = crypto4xx_alg_exit,
1118 .cra_module = THIS_MODULE, 1135 .cra_module = THIS_MODULE,
1119 .cra_u = { 1136 .cra_u = {
1120 .ablkcipher = { 1137 .ablkcipher = {
@@ -1126,29 +1143,7 @@ struct crypto_alg crypto4xx_alg[] = {
1126 .decrypt = crypto4xx_decrypt, 1143 .decrypt = crypto4xx_decrypt,
1127 } 1144 }
1128 } 1145 }
1129 }, 1146 }},
1130 /* Hash SHA1 */
1131 {
1132 .cra_name = "sha1",
1133 .cra_driver_name = "sha1-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = SHA1_BLOCK_SIZE,
1137 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1138 .cra_alignmask = 0,
1139 .cra_type = &crypto_ahash_type,
1140 .cra_init = crypto4xx_sha1_alg_init,
1141 .cra_module = THIS_MODULE,
1142 .cra_u = {
1143 .ahash = {
1144 .digestsize = SHA1_DIGEST_SIZE,
1145 .init = crypto4xx_hash_init,
1146 .update = crypto4xx_hash_update,
1147 .final = crypto4xx_hash_final,
1148 .digest = crypto4xx_hash_digest,
1149 }
1150 }
1151 },
1152}; 1147};
1153 1148
1154/** 1149/**
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 1ef103449364..da9cbe3b9fc3 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -22,6 +22,8 @@
22#ifndef __CRYPTO4XX_CORE_H__ 22#ifndef __CRYPTO4XX_CORE_H__
23#define __CRYPTO4XX_CORE_H__ 23#define __CRYPTO4XX_CORE_H__
24 24
25#include <crypto/internal/hash.h>
26
25#define PPC460SX_SDR0_SRST 0x201 27#define PPC460SX_SDR0_SRST 0x201
26#define PPC405EX_SDR0_SRST 0x200 28#define PPC405EX_SDR0_SRST 0x200
27#define PPC460EX_SDR0_SRST 0x201 29#define PPC460EX_SDR0_SRST 0x201
@@ -138,14 +140,31 @@ struct crypto4xx_req_ctx {
138 u16 sa_len; 140 u16 sa_len;
139}; 141};
140 142
143struct crypto4xx_alg_common {
144 u32 type;
145 union {
146 struct crypto_alg cipher;
147 struct ahash_alg hash;
148 } u;
149};
150
141struct crypto4xx_alg { 151struct crypto4xx_alg {
142 struct list_head entry; 152 struct list_head entry;
143 struct crypto_alg alg; 153 struct crypto4xx_alg_common alg;
144 struct crypto4xx_device *dev; 154 struct crypto4xx_device *dev;
145}; 155};
146 156
147#define crypto_alg_to_crypto4xx_alg(x) \ 157static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
148 container_of(x, struct crypto4xx_alg, alg) 158 struct crypto_alg *x)
159{
160 switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
161 case CRYPTO_ALG_TYPE_AHASH:
162 return container_of(__crypto_ahash_alg(x),
163 struct crypto4xx_alg, alg.u.hash);
164 }
165
166 return container_of(x, struct crypto4xx_alg, alg.u.cipher);
167}
149 168
150extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size); 169extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
151extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx); 170extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
new file mode 100644
index 000000000000..b21ef635f352
--- /dev/null
+++ b/drivers/crypto/mv_cesa.c
@@ -0,0 +1,606 @@
1/*
2 * Support for Marvell's crypto engine which can be found on some Orion5X
3 * boards.
4 *
5 * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6 * License: GPLv2
7 *
8 */
9#include <crypto/aes.h>
10#include <crypto/algapi.h>
11#include <linux/crypto.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kthread.h>
15#include <linux/platform_device.h>
16#include <linux/scatterlist.h>
17
18#include "mv_cesa.h"
19/*
20 * STM:
21 * /---------------------------------------\
22 * | | request complete
23 * \./ |
24 * IDLE -> new request -> BUSY -> done -> DEQUEUE
25 * /°\ |
26 * | | more scatter entries
27 * \________________/
28 */
29enum engine_status {
30 ENGINE_IDLE,
31 ENGINE_BUSY,
32 ENGINE_W_DEQUEUE,
33};
34
35/**
36 * struct req_progress - used for every crypt request
37 * @src_sg_it: sg iterator for src
38 * @dst_sg_it: sg iterator for dst
39 * @sg_src_left: bytes left in src to process (scatter list)
40 * @src_start: offset to add to src start position (scatter list)
41 * @crypt_len: length of current crypt process
42 * @sg_dst_left: bytes left dst to process in this scatter list
43 * @dst_start: offset to add to dst start position (scatter list)
44 * @total_req_bytes: total number of bytes processed (request).
45 *
46 * sg helper are used to iterate over the scatterlist. Since the size of the
47 * SRAM may be less than the scatter size, this struct struct is used to keep
48 * track of progress within current scatterlist.
49 */
50struct req_progress {
51 struct sg_mapping_iter src_sg_it;
52 struct sg_mapping_iter dst_sg_it;
53
54 /* src mostly */
55 int sg_src_left;
56 int src_start;
57 int crypt_len;
58 /* dst mostly */
59 int sg_dst_left;
60 int dst_start;
61 int total_req_bytes;
62};
63
64struct crypto_priv {
65 void __iomem *reg;
66 void __iomem *sram;
67 int irq;
68 struct task_struct *queue_th;
69
70 /* the lock protects queue and eng_st */
71 spinlock_t lock;
72 struct crypto_queue queue;
73 enum engine_status eng_st;
74 struct ablkcipher_request *cur_req;
75 struct req_progress p;
76 int max_req_size;
77 int sram_size;
78};
79
80static struct crypto_priv *cpg;
81
82struct mv_ctx {
83 u8 aes_enc_key[AES_KEY_LEN];
84 u32 aes_dec_key[8];
85 int key_len;
86 u32 need_calc_aes_dkey;
87};
88
89enum crypto_op {
90 COP_AES_ECB,
91 COP_AES_CBC,
92};
93
94struct mv_req_ctx {
95 enum crypto_op op;
96 int decrypt;
97};
98
99static void compute_aes_dec_key(struct mv_ctx *ctx)
100{
101 struct crypto_aes_ctx gen_aes_key;
102 int key_pos;
103
104 if (!ctx->need_calc_aes_dkey)
105 return;
106
107 crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
108
109 key_pos = ctx->key_len + 24;
110 memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
111 switch (ctx->key_len) {
112 case AES_KEYSIZE_256:
113 key_pos -= 2;
114 /* fall */
115 case AES_KEYSIZE_192:
116 key_pos -= 2;
117 memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
118 4 * 4);
119 break;
120 }
121 ctx->need_calc_aes_dkey = 0;
122}
123
124static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
125 unsigned int len)
126{
127 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
128 struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
129
130 switch (len) {
131 case AES_KEYSIZE_128:
132 case AES_KEYSIZE_192:
133 case AES_KEYSIZE_256:
134 break;
135 default:
136 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
137 return -EINVAL;
138 }
139 ctx->key_len = len;
140 ctx->need_calc_aes_dkey = 1;
141
142 memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
143 return 0;
144}
145
146static void setup_data_in(struct ablkcipher_request *req)
147{
148 int ret;
149 void *buf;
150
151 if (!cpg->p.sg_src_left) {
152 ret = sg_miter_next(&cpg->p.src_sg_it);
153 BUG_ON(!ret);
154 cpg->p.sg_src_left = cpg->p.src_sg_it.length;
155 cpg->p.src_start = 0;
156 }
157
158 cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
159
160 buf = cpg->p.src_sg_it.addr;
161 buf += cpg->p.src_start;
162
163 memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
164
165 cpg->p.sg_src_left -= cpg->p.crypt_len;
166 cpg->p.src_start += cpg->p.crypt_len;
167}
168
169static void mv_process_current_q(int first_block)
170{
171 struct ablkcipher_request *req = cpg->cur_req;
172 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
173 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
174 struct sec_accel_config op;
175
176 switch (req_ctx->op) {
177 case COP_AES_ECB:
178 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
179 break;
180 case COP_AES_CBC:
181 op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
182 op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
183 ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
184 if (first_block)
185 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
186 break;
187 }
188 if (req_ctx->decrypt) {
189 op.config |= CFG_DIR_DEC;
190 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
191 AES_KEY_LEN);
192 } else {
193 op.config |= CFG_DIR_ENC;
194 memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
195 AES_KEY_LEN);
196 }
197
198 switch (ctx->key_len) {
199 case AES_KEYSIZE_128:
200 op.config |= CFG_AES_LEN_128;
201 break;
202 case AES_KEYSIZE_192:
203 op.config |= CFG_AES_LEN_192;
204 break;
205 case AES_KEYSIZE_256:
206 op.config |= CFG_AES_LEN_256;
207 break;
208 }
209 op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
210 ENC_P_DST(SRAM_DATA_OUT_START);
211 op.enc_key_p = SRAM_DATA_KEY_P;
212
213 setup_data_in(req);
214 op.enc_len = cpg->p.crypt_len;
215 memcpy(cpg->sram + SRAM_CONFIG, &op,
216 sizeof(struct sec_accel_config));
217
218 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
219 /* GO */
220 writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
221
222 /*
223 * XXX: add timer if the interrupt does not occur for some mystery
224 * reason
225 */
226}
227
228static void mv_crypto_algo_completion(void)
229{
230 struct ablkcipher_request *req = cpg->cur_req;
231 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
232
233 if (req_ctx->op != COP_AES_CBC)
234 return ;
235
236 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
237}
238
239static void dequeue_complete_req(void)
240{
241 struct ablkcipher_request *req = cpg->cur_req;
242 void *buf;
243 int ret;
244
245 cpg->p.total_req_bytes += cpg->p.crypt_len;
246 do {
247 int dst_copy;
248
249 if (!cpg->p.sg_dst_left) {
250 ret = sg_miter_next(&cpg->p.dst_sg_it);
251 BUG_ON(!ret);
252 cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
253 cpg->p.dst_start = 0;
254 }
255
256 buf = cpg->p.dst_sg_it.addr;
257 buf += cpg->p.dst_start;
258
259 dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
260
261 memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
262
263 cpg->p.sg_dst_left -= dst_copy;
264 cpg->p.crypt_len -= dst_copy;
265 cpg->p.dst_start += dst_copy;
266 } while (cpg->p.crypt_len > 0);
267
268 BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
269 if (cpg->p.total_req_bytes < req->nbytes) {
270 /* process next scatter list entry */
271 cpg->eng_st = ENGINE_BUSY;
272 mv_process_current_q(0);
273 } else {
274 sg_miter_stop(&cpg->p.src_sg_it);
275 sg_miter_stop(&cpg->p.dst_sg_it);
276 mv_crypto_algo_completion();
277 cpg->eng_st = ENGINE_IDLE;
278 req->base.complete(&req->base, 0);
279 }
280}
281
282static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
283{
284 int i = 0;
285
286 do {
287 total_bytes -= sl[i].length;
288 i++;
289
290 } while (total_bytes > 0);
291
292 return i;
293}
294
295static void mv_enqueue_new_req(struct ablkcipher_request *req)
296{
297 int num_sgs;
298
299 cpg->cur_req = req;
300 memset(&cpg->p, 0, sizeof(struct req_progress));
301
302 num_sgs = count_sgs(req->src, req->nbytes);
303 sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
304
305 num_sgs = count_sgs(req->dst, req->nbytes);
306 sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
307 mv_process_current_q(1);
308}
309
310static int queue_manag(void *data)
311{
312 cpg->eng_st = ENGINE_IDLE;
313 do {
314 struct ablkcipher_request *req;
315 struct crypto_async_request *async_req = NULL;
316 struct crypto_async_request *backlog;
317
318 __set_current_state(TASK_INTERRUPTIBLE);
319
320 if (cpg->eng_st == ENGINE_W_DEQUEUE)
321 dequeue_complete_req();
322
323 spin_lock_irq(&cpg->lock);
324 if (cpg->eng_st == ENGINE_IDLE) {
325 backlog = crypto_get_backlog(&cpg->queue);
326 async_req = crypto_dequeue_request(&cpg->queue);
327 if (async_req) {
328 BUG_ON(cpg->eng_st != ENGINE_IDLE);
329 cpg->eng_st = ENGINE_BUSY;
330 }
331 }
332 spin_unlock_irq(&cpg->lock);
333
334 if (backlog) {
335 backlog->complete(backlog, -EINPROGRESS);
336 backlog = NULL;
337 }
338
339 if (async_req) {
340 req = container_of(async_req,
341 struct ablkcipher_request, base);
342 mv_enqueue_new_req(req);
343 async_req = NULL;
344 }
345
346 schedule();
347
348 } while (!kthread_should_stop());
349 return 0;
350}
351
352static int mv_handle_req(struct ablkcipher_request *req)
353{
354 unsigned long flags;
355 int ret;
356
357 spin_lock_irqsave(&cpg->lock, flags);
358 ret = ablkcipher_enqueue_request(&cpg->queue, req);
359 spin_unlock_irqrestore(&cpg->lock, flags);
360 wake_up_process(cpg->queue_th);
361 return ret;
362}
363
364static int mv_enc_aes_ecb(struct ablkcipher_request *req)
365{
366 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
367
368 req_ctx->op = COP_AES_ECB;
369 req_ctx->decrypt = 0;
370
371 return mv_handle_req(req);
372}
373
374static int mv_dec_aes_ecb(struct ablkcipher_request *req)
375{
376 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
377 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
378
379 req_ctx->op = COP_AES_ECB;
380 req_ctx->decrypt = 1;
381
382 compute_aes_dec_key(ctx);
383 return mv_handle_req(req);
384}
385
386static int mv_enc_aes_cbc(struct ablkcipher_request *req)
387{
388 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
389
390 req_ctx->op = COP_AES_CBC;
391 req_ctx->decrypt = 0;
392
393 return mv_handle_req(req);
394}
395
396static int mv_dec_aes_cbc(struct ablkcipher_request *req)
397{
398 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
399 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
400
401 req_ctx->op = COP_AES_CBC;
402 req_ctx->decrypt = 1;
403
404 compute_aes_dec_key(ctx);
405 return mv_handle_req(req);
406}
407
408static int mv_cra_init(struct crypto_tfm *tfm)
409{
410 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
411 return 0;
412}
413
414irqreturn_t crypto_int(int irq, void *priv)
415{
416 u32 val;
417
418 val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
419 if (!(val & SEC_INT_ACCEL0_DONE))
420 return IRQ_NONE;
421
422 val &= ~SEC_INT_ACCEL0_DONE;
423 writel(val, cpg->reg + FPGA_INT_STATUS);
424 writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
425 BUG_ON(cpg->eng_st != ENGINE_BUSY);
426 cpg->eng_st = ENGINE_W_DEQUEUE;
427 wake_up_process(cpg->queue_th);
428 return IRQ_HANDLED;
429}
430
431struct crypto_alg mv_aes_alg_ecb = {
432 .cra_name = "ecb(aes)",
433 .cra_driver_name = "mv-ecb-aes",
434 .cra_priority = 300,
435 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
436 .cra_blocksize = 16,
437 .cra_ctxsize = sizeof(struct mv_ctx),
438 .cra_alignmask = 0,
439 .cra_type = &crypto_ablkcipher_type,
440 .cra_module = THIS_MODULE,
441 .cra_init = mv_cra_init,
442 .cra_u = {
443 .ablkcipher = {
444 .min_keysize = AES_MIN_KEY_SIZE,
445 .max_keysize = AES_MAX_KEY_SIZE,
446 .setkey = mv_setkey_aes,
447 .encrypt = mv_enc_aes_ecb,
448 .decrypt = mv_dec_aes_ecb,
449 },
450 },
451};
452
453struct crypto_alg mv_aes_alg_cbc = {
454 .cra_name = "cbc(aes)",
455 .cra_driver_name = "mv-cbc-aes",
456 .cra_priority = 300,
457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
458 .cra_blocksize = AES_BLOCK_SIZE,
459 .cra_ctxsize = sizeof(struct mv_ctx),
460 .cra_alignmask = 0,
461 .cra_type = &crypto_ablkcipher_type,
462 .cra_module = THIS_MODULE,
463 .cra_init = mv_cra_init,
464 .cra_u = {
465 .ablkcipher = {
466 .ivsize = AES_BLOCK_SIZE,
467 .min_keysize = AES_MIN_KEY_SIZE,
468 .max_keysize = AES_MAX_KEY_SIZE,
469 .setkey = mv_setkey_aes,
470 .encrypt = mv_enc_aes_cbc,
471 .decrypt = mv_dec_aes_cbc,
472 },
473 },
474};
475
476static int mv_probe(struct platform_device *pdev)
477{
478 struct crypto_priv *cp;
479 struct resource *res;
480 int irq;
481 int ret;
482
483 if (cpg) {
484 printk(KERN_ERR "Second crypto dev?\n");
485 return -EEXIST;
486 }
487
488 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
489 if (!res)
490 return -ENXIO;
491
492 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
493 if (!cp)
494 return -ENOMEM;
495
496 spin_lock_init(&cp->lock);
497 crypto_init_queue(&cp->queue, 50);
498 cp->reg = ioremap(res->start, res->end - res->start + 1);
499 if (!cp->reg) {
500 ret = -ENOMEM;
501 goto err;
502 }
503
504 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
505 if (!res) {
506 ret = -ENXIO;
507 goto err_unmap_reg;
508 }
509 cp->sram_size = res->end - res->start + 1;
510 cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
511 cp->sram = ioremap(res->start, cp->sram_size);
512 if (!cp->sram) {
513 ret = -ENOMEM;
514 goto err_unmap_reg;
515 }
516
517 irq = platform_get_irq(pdev, 0);
518 if (irq < 0 || irq == NO_IRQ) {
519 ret = irq;
520 goto err_unmap_sram;
521 }
522 cp->irq = irq;
523
524 platform_set_drvdata(pdev, cp);
525 cpg = cp;
526
527 cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
528 if (IS_ERR(cp->queue_th)) {
529 ret = PTR_ERR(cp->queue_th);
530 goto err_thread;
531 }
532
533 ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
534 cp);
535 if (ret)
536 goto err_unmap_sram;
537
538 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
539 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
540
541 ret = crypto_register_alg(&mv_aes_alg_ecb);
542 if (ret)
543 goto err_reg;
544
545 ret = crypto_register_alg(&mv_aes_alg_cbc);
546 if (ret)
547 goto err_unreg_ecb;
548 return 0;
549err_unreg_ecb:
550 crypto_unregister_alg(&mv_aes_alg_ecb);
551err_thread:
552 free_irq(irq, cp);
553err_reg:
554 kthread_stop(cp->queue_th);
555err_unmap_sram:
556 iounmap(cp->sram);
557err_unmap_reg:
558 iounmap(cp->reg);
559err:
560 kfree(cp);
561 cpg = NULL;
562 platform_set_drvdata(pdev, NULL);
563 return ret;
564}
565
566static int mv_remove(struct platform_device *pdev)
567{
568 struct crypto_priv *cp = platform_get_drvdata(pdev);
569
570 crypto_unregister_alg(&mv_aes_alg_ecb);
571 crypto_unregister_alg(&mv_aes_alg_cbc);
572 kthread_stop(cp->queue_th);
573 free_irq(cp->irq, cp);
574 memset(cp->sram, 0, cp->sram_size);
575 iounmap(cp->sram);
576 iounmap(cp->reg);
577 kfree(cp);
578 cpg = NULL;
579 return 0;
580}
581
582static struct platform_driver marvell_crypto = {
583 .probe = mv_probe,
584 .remove = mv_remove,
585 .driver = {
586 .owner = THIS_MODULE,
587 .name = "mv_crypto",
588 },
589};
590MODULE_ALIAS("platform:mv_crypto");
591
592static int __init mv_crypto_init(void)
593{
594 return platform_driver_register(&marvell_crypto);
595}
596module_init(mv_crypto_init);
597
598static void __exit mv_crypto_exit(void)
599{
600 platform_driver_unregister(&marvell_crypto);
601}
602module_exit(mv_crypto_exit);
603
604MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
605MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
606MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/mv_cesa.h b/drivers/crypto/mv_cesa.h
new file mode 100644
index 000000000000..c3e25d3bb171
--- /dev/null
+++ b/drivers/crypto/mv_cesa.h
@@ -0,0 +1,119 @@
1#ifndef __MV_CRYPTO_H__
2
3#define DIGEST_INITIAL_VAL_A 0xdd00
4#define DES_CMD_REG 0xdd58
5
6#define SEC_ACCEL_CMD 0xde00
7#define SEC_CMD_EN_SEC_ACCL0 (1 << 0)
8#define SEC_CMD_EN_SEC_ACCL1 (1 << 1)
9#define SEC_CMD_DISABLE_SEC (1 << 2)
10
11#define SEC_ACCEL_DESC_P0 0xde04
12#define SEC_DESC_P0_PTR(x) (x)
13
14#define SEC_ACCEL_DESC_P1 0xde14
15#define SEC_DESC_P1_PTR(x) (x)
16
17#define SEC_ACCEL_CFG 0xde08
18#define SEC_CFG_STOP_DIG_ERR (1 << 0)
19#define SEC_CFG_CH0_W_IDMA (1 << 7)
20#define SEC_CFG_CH1_W_IDMA (1 << 8)
21#define SEC_CFG_ACT_CH0_IDMA (1 << 9)
22#define SEC_CFG_ACT_CH1_IDMA (1 << 10)
23
24#define SEC_ACCEL_STATUS 0xde0c
25#define SEC_ST_ACT_0 (1 << 0)
26#define SEC_ST_ACT_1 (1 << 1)
27
28/*
29 * FPGA_INT_STATUS looks like a FPGA leftover and is documented only in Errata
30 * 4.12. It looks like that it was part of an IRQ-controller in FPGA and
31 * someone forgot to remove it while switching to the core and moving to
32 * SEC_ACCEL_INT_STATUS.
33 */
34#define FPGA_INT_STATUS 0xdd68
35#define SEC_ACCEL_INT_STATUS 0xde20
36#define SEC_INT_AUTH_DONE (1 << 0)
37#define SEC_INT_DES_E_DONE (1 << 1)
38#define SEC_INT_AES_E_DONE (1 << 2)
39#define SEC_INT_AES_D_DONE (1 << 3)
40#define SEC_INT_ENC_DONE (1 << 4)
41#define SEC_INT_ACCEL0_DONE (1 << 5)
42#define SEC_INT_ACCEL1_DONE (1 << 6)
43#define SEC_INT_ACC0_IDMA_DONE (1 << 7)
44#define SEC_INT_ACC1_IDMA_DONE (1 << 8)
45
46#define SEC_ACCEL_INT_MASK 0xde24
47
48#define AES_KEY_LEN (8 * 4)
49
50struct sec_accel_config {
51
52 u32 config;
53#define CFG_OP_MAC_ONLY 0
54#define CFG_OP_CRYPT_ONLY 1
55#define CFG_OP_MAC_CRYPT 2
56#define CFG_OP_CRYPT_MAC 3
57#define CFG_MACM_MD5 (4 << 4)
58#define CFG_MACM_SHA1 (5 << 4)
59#define CFG_MACM_HMAC_MD5 (6 << 4)
60#define CFG_MACM_HMAC_SHA1 (7 << 4)
61#define CFG_ENCM_DES (1 << 8)
62#define CFG_ENCM_3DES (2 << 8)
63#define CFG_ENCM_AES (3 << 8)
64#define CFG_DIR_ENC (0 << 12)
65#define CFG_DIR_DEC (1 << 12)
66#define CFG_ENC_MODE_ECB (0 << 16)
67#define CFG_ENC_MODE_CBC (1 << 16)
68#define CFG_3DES_EEE (0 << 20)
69#define CFG_3DES_EDE (1 << 20)
70#define CFG_AES_LEN_128 (0 << 24)
71#define CFG_AES_LEN_192 (1 << 24)
72#define CFG_AES_LEN_256 (2 << 24)
73
74 u32 enc_p;
75#define ENC_P_SRC(x) (x)
76#define ENC_P_DST(x) ((x) << 16)
77
78 u32 enc_len;
79#define ENC_LEN(x) (x)
80
81 u32 enc_key_p;
82#define ENC_KEY_P(x) (x)
83
84 u32 enc_iv;
85#define ENC_IV_POINT(x) ((x) << 0)
86#define ENC_IV_BUF_POINT(x) ((x) << 16)
87
88 u32 mac_src_p;
89#define MAC_SRC_DATA_P(x) (x)
90#define MAC_SRC_TOTAL_LEN(x) ((x) << 16)
91
92 u32 mac_digest;
93 u32 mac_iv;
94}__attribute__ ((packed));
95 /*
96 * /-----------\ 0
97 * | ACCEL CFG | 4 * 8
98 * |-----------| 0x20
99 * | CRYPT KEY | 8 * 4
100 * |-----------| 0x40
101 * | IV IN | 4 * 4
102 * |-----------| 0x40 (inplace)
103 * | IV BUF | 4 * 4
104 * |-----------| 0x50
105 * | DATA IN | 16 * x (max ->max_req_size)
106 * |-----------| 0x50 (inplace operation)
107 * | DATA OUT | 16 * x (max ->max_req_size)
108 * \-----------/ SRAM size
109 */
110#define SRAM_CONFIG 0x00
111#define SRAM_DATA_KEY_P 0x20
112#define SRAM_DATA_IV 0x40
113#define SRAM_DATA_IV_BUF 0x40
114#define SRAM_DATA_IN_START 0x50
115#define SRAM_DATA_OUT_START 0x50
116
117#define SRAM_CFG_SPACE 0x50
118
119#endif
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index a2c8e8514b63..76cb6b345e7b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -12,81 +12,43 @@
12 * 12 *
13 */ 13 */
14 14
15#include <crypto/algapi.h> 15#include <crypto/internal/hash.h>
16#include <crypto/sha.h> 16#include <crypto/sha.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/cryptohash.h>
22#include <linux/interrupt.h> 21#include <linux/interrupt.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
24#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
25#include <asm/i387.h> 24#include <asm/i387.h>
26#include "padlock.h" 25#include "padlock.h"
27 26
28#define SHA1_DEFAULT_FALLBACK "sha1-generic" 27struct padlock_sha_desc {
29#define SHA256_DEFAULT_FALLBACK "sha256-generic" 28 struct shash_desc fallback;
29};
30 30
31struct padlock_sha_ctx { 31struct padlock_sha_ctx {
32 char *data; 32 struct crypto_shash *fallback;
33 size_t used;
34 int bypass;
35 void (*f_sha_padlock)(const char *in, char *out, int count);
36 struct hash_desc fallback;
37}; 33};
38 34
39static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) 35static int padlock_sha_init(struct shash_desc *desc)
40{
41 return crypto_tfm_ctx(tfm);
42}
43
44/* We'll need aligned address on the stack */
45#define NEAREST_ALIGNED(ptr) \
46 ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
47
48static struct crypto_alg sha1_alg, sha256_alg;
49
50static void padlock_sha_bypass(struct crypto_tfm *tfm)
51{ 36{
52 if (ctx(tfm)->bypass) 37 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
53 return; 38 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
54 39
55 crypto_hash_init(&ctx(tfm)->fallback); 40 dctx->fallback.tfm = ctx->fallback;
56 if (ctx(tfm)->data && ctx(tfm)->used) { 41 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
57 struct scatterlist sg; 42 return crypto_shash_init(&dctx->fallback);
58
59 sg_init_one(&sg, ctx(tfm)->data, ctx(tfm)->used);
60 crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
61 }
62
63 ctx(tfm)->used = 0;
64 ctx(tfm)->bypass = 1;
65}
66
67static void padlock_sha_init(struct crypto_tfm *tfm)
68{
69 ctx(tfm)->used = 0;
70 ctx(tfm)->bypass = 0;
71} 43}
72 44
73static void padlock_sha_update(struct crypto_tfm *tfm, 45static int padlock_sha_update(struct shash_desc *desc,
74 const uint8_t *data, unsigned int length) 46 const u8 *data, unsigned int length)
75{ 47{
76 /* Our buffer is always one page. */ 48 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
77 if (unlikely(!ctx(tfm)->bypass &&
78 (ctx(tfm)->used + length > PAGE_SIZE)))
79 padlock_sha_bypass(tfm);
80
81 if (unlikely(ctx(tfm)->bypass)) {
82 struct scatterlist sg;
83 sg_init_one(&sg, (uint8_t *)data, length);
84 crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
85 return;
86 }
87 49
88 memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); 50 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
89 ctx(tfm)->used += length; 51 return crypto_shash_update(&dctx->fallback, data, length);
90} 52}
91 53
92static inline void padlock_output_block(uint32_t *src, 54static inline void padlock_output_block(uint32_t *src,
@@ -96,165 +58,206 @@ static inline void padlock_output_block(uint32_t *src,
96 *dst++ = swab32(*src++); 58 *dst++ = swab32(*src++);
97} 59}
98 60
99static void padlock_do_sha1(const char *in, char *out, int count) 61static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 unsigned int count, u8 *out)
100{ 63{
101 /* We can't store directly to *out as it may be unaligned. */ 64 /* We can't store directly to *out as it may be unaligned. */
102 /* BTW Don't reduce the buffer size below 128 Bytes! 65 /* BTW Don't reduce the buffer size below 128 Bytes!
103 * PadLock microcode needs it that big. */ 66 * PadLock microcode needs it that big. */
104 char buf[128+16]; 67 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
105 char *result = NEAREST_ALIGNED(buf); 68 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 struct sha1_state state;
70 unsigned int space;
71 unsigned int leftover;
106 int ts_state; 72 int ts_state;
73 int err;
74
75 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 err = crypto_shash_export(&dctx->fallback, &state);
77 if (err)
78 goto out;
79
80 if (state.count + count > ULONG_MAX)
81 return crypto_shash_finup(&dctx->fallback, in, count, out);
82
83 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 space = SHA1_BLOCK_SIZE - leftover;
85 if (space) {
86 if (count > space) {
87 err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 crypto_shash_export(&dctx->fallback, &state);
89 if (err)
90 goto out;
91 count -= space;
92 in += space;
93 } else {
94 memcpy(state.buffer + leftover, in, count);
95 in = state.buffer;
96 count += leftover;
97 state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 }
99 }
100
101 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
107 102
108 ((uint32_t *)result)[0] = SHA1_H0;
109 ((uint32_t *)result)[1] = SHA1_H1;
110 ((uint32_t *)result)[2] = SHA1_H2;
111 ((uint32_t *)result)[3] = SHA1_H3;
112 ((uint32_t *)result)[4] = SHA1_H4;
113
114 /* prevent taking the spurious DNA fault with padlock. */ 103 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save(); 104 ts_state = irq_ts_save();
116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 105 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 : "+S"(in), "+D"(result) 106 : \
118 : "c"(count), "a"(0)); 107 : "c"((unsigned long)state.count + count), \
108 "a"((unsigned long)state.count), \
109 "S"(in), "D"(result));
119 irq_ts_restore(ts_state); 110 irq_ts_restore(ts_state);
120 111
121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 112 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113
114out:
115 return err;
122} 116}
123 117
124static void padlock_do_sha256(const char *in, char *out, int count) 118static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
119{
120 u8 buf[4];
121
122 return padlock_sha1_finup(desc, buf, 0, out);
123}
124
125static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
126 unsigned int count, u8 *out)
125{ 127{
126 /* We can't store directly to *out as it may be unaligned. */ 128 /* We can't store directly to *out as it may be unaligned. */
127 /* BTW Don't reduce the buffer size below 128 Bytes! 129 /* BTW Don't reduce the buffer size below 128 Bytes!
128 * PadLock microcode needs it that big. */ 130 * PadLock microcode needs it that big. */
129 char buf[128+16]; 131 char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
130 char *result = NEAREST_ALIGNED(buf); 132 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 struct sha256_state state;
134 unsigned int space;
135 unsigned int leftover;
131 int ts_state; 136 int ts_state;
137 int err;
138
139 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
140 err = crypto_shash_export(&dctx->fallback, &state);
141 if (err)
142 goto out;
143
144 if (state.count + count > ULONG_MAX)
145 return crypto_shash_finup(&dctx->fallback, in, count, out);
146
147 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
148 space = SHA256_BLOCK_SIZE - leftover;
149 if (space) {
150 if (count > space) {
151 err = crypto_shash_update(&dctx->fallback, in, space) ?:
152 crypto_shash_export(&dctx->fallback, &state);
153 if (err)
154 goto out;
155 count -= space;
156 in += space;
157 } else {
158 memcpy(state.buf + leftover, in, count);
159 in = state.buf;
160 count += leftover;
161 state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 }
163 }
132 164
133 ((uint32_t *)result)[0] = SHA256_H0; 165 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
134 ((uint32_t *)result)[1] = SHA256_H1;
135 ((uint32_t *)result)[2] = SHA256_H2;
136 ((uint32_t *)result)[3] = SHA256_H3;
137 ((uint32_t *)result)[4] = SHA256_H4;
138 ((uint32_t *)result)[5] = SHA256_H5;
139 ((uint32_t *)result)[6] = SHA256_H6;
140 ((uint32_t *)result)[7] = SHA256_H7;
141 166
142 /* prevent taking the spurious DNA fault with padlock. */ 167 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save(); 168 ts_state = irq_ts_save();
144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 169 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
145 : "+S"(in), "+D"(result) 170 : \
146 : "c"(count), "a"(0)); 171 : "c"((unsigned long)state.count + count), \
172 "a"((unsigned long)state.count), \
173 "S"(in), "D"(result));
147 irq_ts_restore(ts_state); 174 irq_ts_restore(ts_state);
148 175
149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 176 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177
178out:
179 return err;
150} 180}
151 181
152static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) 182static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
153{ 183{
154 if (unlikely(ctx(tfm)->bypass)) { 184 u8 buf[4];
155 crypto_hash_final(&ctx(tfm)->fallback, out);
156 ctx(tfm)->bypass = 0;
157 return;
158 }
159 185
160 /* Pass the input buffer to PadLock microcode... */ 186 return padlock_sha256_finup(desc, buf, 0, out);
161 ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
162
163 ctx(tfm)->used = 0;
164} 187}
165 188
166static int padlock_cra_init(struct crypto_tfm *tfm) 189static int padlock_cra_init(struct crypto_tfm *tfm)
167{ 190{
191 struct crypto_shash *hash = __crypto_shash_cast(tfm);
168 const char *fallback_driver_name = tfm->__crt_alg->cra_name; 192 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
169 struct crypto_hash *fallback_tfm; 193 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
170 194 struct crypto_shash *fallback_tfm;
171 /* For now we'll allocate one page. This 195 int err = -ENOMEM;
172 * could eventually be configurable one day. */
173 ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
174 if (!ctx(tfm)->data)
175 return -ENOMEM;
176 196
177 /* Allocate a fallback and abort if it failed. */ 197 /* Allocate a fallback and abort if it failed. */
178 fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, 198 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
179 CRYPTO_ALG_ASYNC | 199 CRYPTO_ALG_NEED_FALLBACK);
180 CRYPTO_ALG_NEED_FALLBACK);
181 if (IS_ERR(fallback_tfm)) { 200 if (IS_ERR(fallback_tfm)) {
182 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", 201 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
183 fallback_driver_name); 202 fallback_driver_name);
184 free_page((unsigned long)(ctx(tfm)->data)); 203 err = PTR_ERR(fallback_tfm);
185 return PTR_ERR(fallback_tfm); 204 goto out;
186 } 205 }
187 206
188 ctx(tfm)->fallback.tfm = fallback_tfm; 207 ctx->fallback = fallback_tfm;
208 hash->descsize += crypto_shash_descsize(fallback_tfm);
189 return 0; 209 return 0;
190}
191
192static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
193{
194 ctx(tfm)->f_sha_padlock = padlock_do_sha1;
195 210
196 return padlock_cra_init(tfm); 211out:
197} 212 return err;
198
199static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
200{
201 ctx(tfm)->f_sha_padlock = padlock_do_sha256;
202
203 return padlock_cra_init(tfm);
204} 213}
205 214
206static void padlock_cra_exit(struct crypto_tfm *tfm) 215static void padlock_cra_exit(struct crypto_tfm *tfm)
207{ 216{
208 if (ctx(tfm)->data) { 217 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
209 free_page((unsigned long)(ctx(tfm)->data));
210 ctx(tfm)->data = NULL;
211 }
212 218
213 crypto_free_hash(ctx(tfm)->fallback.tfm); 219 crypto_free_shash(ctx->fallback);
214 ctx(tfm)->fallback.tfm = NULL;
215} 220}
216 221
217static struct crypto_alg sha1_alg = { 222static struct shash_alg sha1_alg = {
218 .cra_name = "sha1", 223 .digestsize = SHA1_DIGEST_SIZE,
219 .cra_driver_name = "sha1-padlock", 224 .init = padlock_sha_init,
220 .cra_priority = PADLOCK_CRA_PRIORITY, 225 .update = padlock_sha_update,
221 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 226 .finup = padlock_sha1_finup,
222 CRYPTO_ALG_NEED_FALLBACK, 227 .final = padlock_sha1_final,
223 .cra_blocksize = SHA1_BLOCK_SIZE, 228 .descsize = sizeof(struct padlock_sha_desc),
224 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 229 .base = {
225 .cra_module = THIS_MODULE, 230 .cra_name = "sha1",
226 .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), 231 .cra_driver_name = "sha1-padlock",
227 .cra_init = padlock_sha1_cra_init, 232 .cra_priority = PADLOCK_CRA_PRIORITY,
228 .cra_exit = padlock_cra_exit, 233 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
229 .cra_u = { 234 CRYPTO_ALG_NEED_FALLBACK,
230 .digest = { 235 .cra_blocksize = SHA1_BLOCK_SIZE,
231 .dia_digestsize = SHA1_DIGEST_SIZE, 236 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
232 .dia_init = padlock_sha_init, 237 .cra_module = THIS_MODULE,
233 .dia_update = padlock_sha_update, 238 .cra_init = padlock_cra_init,
234 .dia_final = padlock_sha_final, 239 .cra_exit = padlock_cra_exit,
235 }
236 } 240 }
237}; 241};
238 242
239static struct crypto_alg sha256_alg = { 243static struct shash_alg sha256_alg = {
240 .cra_name = "sha256", 244 .digestsize = SHA256_DIGEST_SIZE,
241 .cra_driver_name = "sha256-padlock", 245 .init = padlock_sha_init,
242 .cra_priority = PADLOCK_CRA_PRIORITY, 246 .update = padlock_sha_update,
243 .cra_flags = CRYPTO_ALG_TYPE_DIGEST | 247 .finup = padlock_sha256_finup,
244 CRYPTO_ALG_NEED_FALLBACK, 248 .final = padlock_sha256_final,
245 .cra_blocksize = SHA256_BLOCK_SIZE, 249 .descsize = sizeof(struct padlock_sha_desc),
246 .cra_ctxsize = sizeof(struct padlock_sha_ctx), 250 .base = {
247 .cra_module = THIS_MODULE, 251 .cra_name = "sha256",
248 .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), 252 .cra_driver_name = "sha256-padlock",
249 .cra_init = padlock_sha256_cra_init, 253 .cra_priority = PADLOCK_CRA_PRIORITY,
250 .cra_exit = padlock_cra_exit, 254 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
251 .cra_u = { 255 CRYPTO_ALG_NEED_FALLBACK,
252 .digest = { 256 .cra_blocksize = SHA256_BLOCK_SIZE,
253 .dia_digestsize = SHA256_DIGEST_SIZE, 257 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
254 .dia_init = padlock_sha_init, 258 .cra_module = THIS_MODULE,
255 .dia_update = padlock_sha_update, 259 .cra_init = padlock_cra_init,
256 .dia_final = padlock_sha_final, 260 .cra_exit = padlock_cra_exit,
257 }
258 } 261 }
259}; 262};
260 263
@@ -272,11 +275,11 @@ static int __init padlock_init(void)
272 return -ENODEV; 275 return -ENODEV;
273 } 276 }
274 277
275 rc = crypto_register_alg(&sha1_alg); 278 rc = crypto_register_shash(&sha1_alg);
276 if (rc) 279 if (rc)
277 goto out; 280 goto out;
278 281
279 rc = crypto_register_alg(&sha256_alg); 282 rc = crypto_register_shash(&sha256_alg);
280 if (rc) 283 if (rc)
281 goto out_unreg1; 284 goto out_unreg1;
282 285
@@ -285,7 +288,7 @@ static int __init padlock_init(void)
285 return 0; 288 return 0;
286 289
287out_unreg1: 290out_unreg1:
288 crypto_unregister_alg(&sha1_alg); 291 crypto_unregister_shash(&sha1_alg);
289out: 292out:
290 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); 293 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
291 return rc; 294 return rc;
@@ -293,8 +296,8 @@ out:
293 296
294static void __exit padlock_fini(void) 297static void __exit padlock_fini(void)
295{ 298{
296 crypto_unregister_alg(&sha1_alg); 299 crypto_unregister_shash(&sha1_alg);
297 crypto_unregister_alg(&sha256_alg); 300 crypto_unregister_shash(&sha256_alg);
298} 301}
299 302
300module_init(padlock_init); 303module_init(padlock_init);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index c70775fd3ce2..c47ffe8a73ef 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -86,6 +86,25 @@ struct talitos_request {
86 void *context; 86 void *context;
87}; 87};
88 88
89/* per-channel fifo management */
90struct talitos_channel {
91 /* request fifo */
92 struct talitos_request *fifo;
93
94 /* number of requests pending in channel h/w fifo */
95 atomic_t submit_count ____cacheline_aligned;
96
97 /* request submission (head) lock */
98 spinlock_t head_lock ____cacheline_aligned;
99 /* index to next free descriptor request */
100 int head;
101
102 /* request release (tail) lock */
103 spinlock_t tail_lock ____cacheline_aligned;
104 /* index to next in-progress/done descriptor request */
105 int tail;
106};
107
89struct talitos_private { 108struct talitos_private {
90 struct device *dev; 109 struct device *dev;
91 struct of_device *ofdev; 110 struct of_device *ofdev;
@@ -101,15 +120,6 @@ struct talitos_private {
101 /* SEC Compatibility info */ 120 /* SEC Compatibility info */
102 unsigned long features; 121 unsigned long features;
103 122
104 /* next channel to be assigned next incoming descriptor */
105 atomic_t last_chan;
106
107 /* per-channel number of requests pending in channel h/w fifo */
108 atomic_t *submit_count;
109
110 /* per-channel request fifo */
111 struct talitos_request **fifo;
112
113 /* 123 /*
114 * length of the request fifo 124 * length of the request fifo
115 * fifo_len is chfifo_len rounded up to next power of 2 125 * fifo_len is chfifo_len rounded up to next power of 2
@@ -117,15 +127,10 @@ struct talitos_private {
117 */ 127 */
118 unsigned int fifo_len; 128 unsigned int fifo_len;
119 129
120 /* per-channel index to next free descriptor request */ 130 struct talitos_channel *chan;
121 int *head;
122
123 /* per-channel index to next in-progress/done descriptor request */
124 int *tail;
125 131
126 /* per-channel request submission (head) and release (tail) locks */ 132 /* next channel to be assigned next incoming descriptor */
127 spinlock_t *head_lock; 133 atomic_t last_chan ____cacheline_aligned;
128 spinlock_t *tail_lock;
129 134
130 /* request callback tasklet */ 135 /* request callback tasklet */
131 struct tasklet_struct done_task; 136 struct tasklet_struct done_task;
@@ -141,6 +146,12 @@ struct talitos_private {
141#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001 146#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
142#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002 147#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
143 148
149static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
150{
151 talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
152 talitos_ptr->eptr = cpu_to_be32(upper_32_bits(dma_addr));
153}
154
144/* 155/*
145 * map virtual single (contiguous) pointer to h/w descriptor pointer 156 * map virtual single (contiguous) pointer to h/w descriptor pointer
146 */ 157 */
@@ -150,8 +161,10 @@ static void map_single_talitos_ptr(struct device *dev,
150 unsigned char extent, 161 unsigned char extent,
151 enum dma_data_direction dir) 162 enum dma_data_direction dir)
152{ 163{
164 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
165
153 talitos_ptr->len = cpu_to_be16(len); 166 talitos_ptr->len = cpu_to_be16(len);
154 talitos_ptr->ptr = cpu_to_be32(dma_map_single(dev, data, len, dir)); 167 to_talitos_ptr(talitos_ptr, dma_addr);
155 talitos_ptr->j_extent = extent; 168 talitos_ptr->j_extent = extent;
156} 169}
157 170
@@ -182,9 +195,9 @@ static int reset_channel(struct device *dev, int ch)
182 return -EIO; 195 return -EIO;
183 } 196 }
184 197
185 /* set done writeback and IRQ */ 198 /* set 36-bit addressing, done writeback enable and done IRQ enable */
186 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_CDWE | 199 setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
187 TALITOS_CCCR_LO_CDIE); 200 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
188 201
189 /* and ICCR writeback, if available */ 202 /* and ICCR writeback, if available */
190 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 203 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
@@ -282,16 +295,16 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
282 /* emulate SEC's round-robin channel fifo polling scheme */ 295 /* emulate SEC's round-robin channel fifo polling scheme */
283 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1); 296 ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
284 297
285 spin_lock_irqsave(&priv->head_lock[ch], flags); 298 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
286 299
287 if (!atomic_inc_not_zero(&priv->submit_count[ch])) { 300 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
288 /* h/w fifo is full */ 301 /* h/w fifo is full */
289 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 302 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
290 return -EAGAIN; 303 return -EAGAIN;
291 } 304 }
292 305
293 head = priv->head[ch]; 306 head = priv->chan[ch].head;
294 request = &priv->fifo[ch][head]; 307 request = &priv->chan[ch].fifo[head];
295 308
296 /* map descriptor and save caller data */ 309 /* map descriptor and save caller data */
297 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 310 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
@@ -300,16 +313,19 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
300 request->context = context; 313 request->context = context;
301 314
302 /* increment fifo head */ 315 /* increment fifo head */
303 priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1); 316 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
304 317
305 smp_wmb(); 318 smp_wmb();
306 request->desc = desc; 319 request->desc = desc;
307 320
308 /* GO! */ 321 /* GO! */
309 wmb(); 322 wmb();
310 out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc); 323 out_be32(priv->reg + TALITOS_FF(ch),
324 cpu_to_be32(upper_32_bits(request->dma_desc)));
325 out_be32(priv->reg + TALITOS_FF_LO(ch),
326 cpu_to_be32(lower_32_bits(request->dma_desc)));
311 327
312 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 328 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
313 329
314 return -EINPROGRESS; 330 return -EINPROGRESS;
315} 331}
@@ -324,11 +340,11 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
324 unsigned long flags; 340 unsigned long flags;
325 int tail, status; 341 int tail, status;
326 342
327 spin_lock_irqsave(&priv->tail_lock[ch], flags); 343 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
328 344
329 tail = priv->tail[ch]; 345 tail = priv->chan[ch].tail;
330 while (priv->fifo[ch][tail].desc) { 346 while (priv->chan[ch].fifo[tail].desc) {
331 request = &priv->fifo[ch][tail]; 347 request = &priv->chan[ch].fifo[tail];
332 348
333 /* descriptors with their done bits set don't get the error */ 349 /* descriptors with their done bits set don't get the error */
334 rmb(); 350 rmb();
@@ -354,22 +370,22 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
354 request->desc = NULL; 370 request->desc = NULL;
355 371
356 /* increment fifo tail */ 372 /* increment fifo tail */
357 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
358 374
359 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
360 376
361 atomic_dec(&priv->submit_count[ch]); 377 atomic_dec(&priv->chan[ch].submit_count);
362 378
363 saved_req.callback(dev, saved_req.desc, saved_req.context, 379 saved_req.callback(dev, saved_req.desc, saved_req.context,
364 status); 380 status);
365 /* channel may resume processing in single desc error case */ 381 /* channel may resume processing in single desc error case */
366 if (error && !reset_ch && status == error) 382 if (error && !reset_ch && status == error)
367 return; 383 return;
368 spin_lock_irqsave(&priv->tail_lock[ch], flags); 384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
369 tail = priv->tail[ch]; 385 tail = priv->chan[ch].tail;
370 } 386 }
371 387
372 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
373} 389}
374 390
375/* 391/*
@@ -397,20 +413,20 @@ static void talitos_done(unsigned long data)
397static struct talitos_desc *current_desc(struct device *dev, int ch) 413static struct talitos_desc *current_desc(struct device *dev, int ch)
398{ 414{
399 struct talitos_private *priv = dev_get_drvdata(dev); 415 struct talitos_private *priv = dev_get_drvdata(dev);
400 int tail = priv->tail[ch]; 416 int tail = priv->chan[ch].tail;
401 dma_addr_t cur_desc; 417 dma_addr_t cur_desc;
402 418
403 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch)); 419 cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
404 420
405 while (priv->fifo[ch][tail].dma_desc != cur_desc) { 421 while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
406 tail = (tail + 1) & (priv->fifo_len - 1); 422 tail = (tail + 1) & (priv->fifo_len - 1);
407 if (tail == priv->tail[ch]) { 423 if (tail == priv->chan[ch].tail) {
408 dev_err(dev, "couldn't locate current descriptor\n"); 424 dev_err(dev, "couldn't locate current descriptor\n");
409 return NULL; 425 return NULL;
410 } 426 }
411 } 427 }
412 428
413 return priv->fifo[ch][tail].desc; 429 return priv->chan[ch].fifo[tail].desc;
414} 430}
415 431
416/* 432/*
@@ -929,7 +945,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
929 int n_sg = sg_count; 945 int n_sg = sg_count;
930 946
931 while (n_sg--) { 947 while (n_sg--) {
932 link_tbl_ptr->ptr = cpu_to_be32(sg_dma_address(sg)); 948 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
933 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 949 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
934 link_tbl_ptr->j_extent = 0; 950 link_tbl_ptr->j_extent = 0;
935 link_tbl_ptr++; 951 link_tbl_ptr++;
@@ -970,7 +986,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
970 struct talitos_desc *desc = &edesc->desc; 986 struct talitos_desc *desc = &edesc->desc;
971 unsigned int cryptlen = areq->cryptlen; 987 unsigned int cryptlen = areq->cryptlen;
972 unsigned int authsize = ctx->authsize; 988 unsigned int authsize = ctx->authsize;
973 unsigned int ivsize; 989 unsigned int ivsize = crypto_aead_ivsize(aead);
974 int sg_count, ret; 990 int sg_count, ret;
975 int sg_link_tbl_len; 991 int sg_link_tbl_len;
976 992
@@ -978,11 +994,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
978 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 994 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
979 0, DMA_TO_DEVICE); 995 0, DMA_TO_DEVICE);
980 /* hmac data */ 996 /* hmac data */
981 map_single_talitos_ptr(dev, &desc->ptr[1], sg_virt(areq->src) - 997 map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
982 sg_virt(areq->assoc), sg_virt(areq->assoc), 0, 998 sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
983 DMA_TO_DEVICE);
984 /* cipher iv */ 999 /* cipher iv */
985 ivsize = crypto_aead_ivsize(aead);
986 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0, 1000 map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
987 DMA_TO_DEVICE); 1001 DMA_TO_DEVICE);
988 1002
@@ -1006,7 +1020,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1006 edesc->src_is_chained); 1020 edesc->src_is_chained);
1007 1021
1008 if (sg_count == 1) { 1022 if (sg_count == 1) {
1009 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1023 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1010 } else { 1024 } else {
1011 sg_link_tbl_len = cryptlen; 1025 sg_link_tbl_len = cryptlen;
1012 1026
@@ -1017,14 +1031,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1017 &edesc->link_tbl[0]); 1031 &edesc->link_tbl[0]);
1018 if (sg_count > 1) { 1032 if (sg_count > 1) {
1019 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1033 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1020 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 1034 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1021 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1035 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1022 edesc->dma_len, 1036 edesc->dma_len,
1023 DMA_BIDIRECTIONAL); 1037 DMA_BIDIRECTIONAL);
1024 } else { 1038 } else {
1025 /* Only one segment now, so no link tbl needed */ 1039 /* Only one segment now, so no link tbl needed */
1026 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq-> 1040 to_talitos_ptr(&desc->ptr[4],
1027 src)); 1041 sg_dma_address(areq->src));
1028 } 1042 }
1029 } 1043 }
1030 1044
@@ -1039,14 +1053,14 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1039 edesc->dst_is_chained); 1053 edesc->dst_is_chained);
1040 1054
1041 if (sg_count == 1) { 1055 if (sg_count == 1) {
1042 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1056 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1043 } else { 1057 } else {
1044 struct talitos_ptr *link_tbl_ptr = 1058 struct talitos_ptr *link_tbl_ptr =
1045 &edesc->link_tbl[edesc->src_nents + 1]; 1059 &edesc->link_tbl[edesc->src_nents + 1];
1046 1060
1047 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 1061 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1048 edesc->dma_link_tbl + 1062 (edesc->src_nents + 1) *
1049 edesc->src_nents + 1); 1063 sizeof(struct talitos_ptr));
1050 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1064 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1051 link_tbl_ptr); 1065 link_tbl_ptr);
1052 1066
@@ -1059,11 +1073,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1059 link_tbl_ptr->len = cpu_to_be16(authsize); 1073 link_tbl_ptr->len = cpu_to_be16(authsize);
1060 1074
1061 /* icv data follows link tables */ 1075 /* icv data follows link tables */
1062 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 1076 to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1063 edesc->dma_link_tbl + 1077 (edesc->src_nents + edesc->dst_nents + 2) *
1064 edesc->src_nents + 1078 sizeof(struct talitos_ptr));
1065 edesc->dst_nents + 2);
1066
1067 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1079 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1068 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1080 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1069 edesc->dma_len, DMA_BIDIRECTIONAL); 1081 edesc->dma_len, DMA_BIDIRECTIONAL);
@@ -1338,7 +1350,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1338 1350
1339 /* first DWORD empty */ 1351 /* first DWORD empty */
1340 desc->ptr[0].len = 0; 1352 desc->ptr[0].len = 0;
1341 desc->ptr[0].ptr = 0; 1353 to_talitos_ptr(&desc->ptr[0], 0);
1342 desc->ptr[0].j_extent = 0; 1354 desc->ptr[0].j_extent = 0;
1343 1355
1344 /* cipher iv */ 1356 /* cipher iv */
@@ -1362,20 +1374,20 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1362 edesc->src_is_chained); 1374 edesc->src_is_chained);
1363 1375
1364 if (sg_count == 1) { 1376 if (sg_count == 1) {
1365 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq->src)); 1377 to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1366 } else { 1378 } else {
1367 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 1379 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1368 &edesc->link_tbl[0]); 1380 &edesc->link_tbl[0]);
1369 if (sg_count > 1) { 1381 if (sg_count > 1) {
1382 to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1370 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP; 1383 desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1371 desc->ptr[3].ptr = cpu_to_be32(edesc->dma_link_tbl);
1372 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1384 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1373 edesc->dma_len, 1385 edesc->dma_len,
1374 DMA_BIDIRECTIONAL); 1386 DMA_BIDIRECTIONAL);
1375 } else { 1387 } else {
1376 /* Only one segment now, so no link tbl needed */ 1388 /* Only one segment now, so no link tbl needed */
1377 desc->ptr[3].ptr = cpu_to_be32(sg_dma_address(areq-> 1389 to_talitos_ptr(&desc->ptr[3],
1378 src)); 1390 sg_dma_address(areq->src));
1379 } 1391 }
1380 } 1392 }
1381 1393
@@ -1390,15 +1402,15 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1390 edesc->dst_is_chained); 1402 edesc->dst_is_chained);
1391 1403
1392 if (sg_count == 1) { 1404 if (sg_count == 1) {
1393 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 1405 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1394 } else { 1406 } else {
1395 struct talitos_ptr *link_tbl_ptr = 1407 struct talitos_ptr *link_tbl_ptr =
1396 &edesc->link_tbl[edesc->src_nents + 1]; 1408 &edesc->link_tbl[edesc->src_nents + 1];
1397 1409
1410 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1411 (edesc->src_nents + 1) *
1412 sizeof(struct talitos_ptr));
1398 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1413 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1399 desc->ptr[4].ptr = cpu_to_be32((struct talitos_ptr *)
1400 edesc->dma_link_tbl +
1401 edesc->src_nents + 1);
1402 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1414 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1403 link_tbl_ptr); 1415 link_tbl_ptr);
1404 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1416 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1411,7 +1423,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
1411 1423
1412 /* last DWORD empty */ 1424 /* last DWORD empty */
1413 desc->ptr[6].len = 0; 1425 desc->ptr[6].len = 0;
1414 desc->ptr[6].ptr = 0; 1426 to_talitos_ptr(&desc->ptr[6], 0);
1415 desc->ptr[6].j_extent = 0; 1427 desc->ptr[6].j_extent = 0;
1416 1428
1417 ret = talitos_submit(dev, desc, callback, areq); 1429 ret = talitos_submit(dev, desc, callback, areq);
@@ -1742,17 +1754,11 @@ static int talitos_remove(struct of_device *ofdev)
1742 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1754 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1743 talitos_unregister_rng(dev); 1755 talitos_unregister_rng(dev);
1744 1756
1745 kfree(priv->submit_count); 1757 for (i = 0; i < priv->num_channels; i++)
1746 kfree(priv->tail); 1758 if (priv->chan[i].fifo)
1747 kfree(priv->head); 1759 kfree(priv->chan[i].fifo);
1748
1749 if (priv->fifo)
1750 for (i = 0; i < priv->num_channels; i++)
1751 kfree(priv->fifo[i]);
1752 1760
1753 kfree(priv->fifo); 1761 kfree(priv->chan);
1754 kfree(priv->head_lock);
1755 kfree(priv->tail_lock);
1756 1762
1757 if (priv->irq != NO_IRQ) { 1763 if (priv->irq != NO_IRQ) {
1758 free_irq(priv->irq, dev); 1764 free_irq(priv->irq, dev);
@@ -1872,58 +1878,36 @@ static int talitos_probe(struct of_device *ofdev,
1872 if (of_device_is_compatible(np, "fsl,sec2.1")) 1878 if (of_device_is_compatible(np, "fsl,sec2.1"))
1873 priv->features |= TALITOS_FTR_HW_AUTH_CHECK; 1879 priv->features |= TALITOS_FTR_HW_AUTH_CHECK;
1874 1880
1875 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1881 priv->chan = kzalloc(sizeof(struct talitos_channel) *
1876 GFP_KERNEL); 1882 priv->num_channels, GFP_KERNEL);
1877 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1883 if (!priv->chan) {
1878 GFP_KERNEL); 1884 dev_err(dev, "failed to allocate channel management space\n");
1879 if (!priv->head_lock || !priv->tail_lock) {
1880 dev_err(dev, "failed to allocate fifo locks\n");
1881 err = -ENOMEM; 1885 err = -ENOMEM;
1882 goto err_out; 1886 goto err_out;
1883 } 1887 }
1884 1888
1885 for (i = 0; i < priv->num_channels; i++) { 1889 for (i = 0; i < priv->num_channels; i++) {
1886 spin_lock_init(&priv->head_lock[i]); 1890 spin_lock_init(&priv->chan[i].head_lock);
1887 spin_lock_init(&priv->tail_lock[i]); 1891 spin_lock_init(&priv->chan[i].tail_lock);
1888 }
1889
1890 priv->fifo = kmalloc(sizeof(struct talitos_request *) *
1891 priv->num_channels, GFP_KERNEL);
1892 if (!priv->fifo) {
1893 dev_err(dev, "failed to allocate request fifo\n");
1894 err = -ENOMEM;
1895 goto err_out;
1896 } 1892 }
1897 1893
1898 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 1894 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
1899 1895
1900 for (i = 0; i < priv->num_channels; i++) { 1896 for (i = 0; i < priv->num_channels; i++) {
1901 priv->fifo[i] = kzalloc(sizeof(struct talitos_request) * 1897 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
1902 priv->fifo_len, GFP_KERNEL); 1898 priv->fifo_len, GFP_KERNEL);
1903 if (!priv->fifo[i]) { 1899 if (!priv->chan[i].fifo) {
1904 dev_err(dev, "failed to allocate request fifo %d\n", i); 1900 dev_err(dev, "failed to allocate request fifo %d\n", i);
1905 err = -ENOMEM; 1901 err = -ENOMEM;
1906 goto err_out; 1902 goto err_out;
1907 } 1903 }
1908 } 1904 }
1909 1905
1910 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1911 GFP_KERNEL);
1912 if (!priv->submit_count) {
1913 dev_err(dev, "failed to allocate fifo submit count space\n");
1914 err = -ENOMEM;
1915 goto err_out;
1916 }
1917 for (i = 0; i < priv->num_channels; i++) 1906 for (i = 0; i < priv->num_channels; i++)
1918 atomic_set(&priv->submit_count[i], -(priv->chfifo_len - 1)); 1907 atomic_set(&priv->chan[i].submit_count,
1908 -(priv->chfifo_len - 1));
1919 1909
1920 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1910 dma_set_mask(dev, DMA_BIT_MASK(36));
1921 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1922 if (!priv->head || !priv->tail) {
1923 dev_err(dev, "failed to allocate request index space\n");
1924 err = -ENOMEM;
1925 goto err_out;
1926 }
1927 1911
1928 /* reset and initialize the h/w */ 1912 /* reset and initialize the h/w */
1929 err = init_device(dev); 1913 err = init_device(dev);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 575981f0cfda..ff5a1450e145 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -57,6 +57,7 @@
57#define TALITOS_CCCR_RESET 0x1 /* channel reset */ 57#define TALITOS_CCCR_RESET 0x1 /* channel reset */
58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c) 58#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */ 59#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
60#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
60#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */ 61#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
61#define TALITOS_CCCR_LO_NT 0x4 /* notification type */ 62#define TALITOS_CCCR_LO_NT 0x4 /* notification type */
62#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */ 63#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 110e731f5574..1c0b504a42f3 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
196 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 196 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
197 irm_id, generation, SCODE_100, 197 irm_id, generation, SCODE_100,
198 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, 198 CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
199 data, sizeof(data))) { 199 data, 8)) {
200 case RCODE_GENERATION: 200 case RCODE_GENERATION:
201 /* A generation change frees all bandwidth. */ 201 /* A generation change frees all bandwidth. */
202 return allocate ? -EAGAIN : bandwidth; 202 return allocate ? -EAGAIN : bandwidth;
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
233 data[1] = old ^ c; 233 data[1] = old ^ c;
234 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, 234 switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
235 irm_id, generation, SCODE_100, 235 irm_id, generation, SCODE_100,
236 offset, data, sizeof(data))) { 236 offset, data, 8)) {
237 case RCODE_GENERATION: 237 case RCODE_GENERATION:
238 /* A generation change frees all channels. */ 238 /* A generation change frees all channels. */
239 return allocate ? -EAGAIN : i; 239 return allocate ? -EAGAIN : i;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ecddd11b797a..76b321bb73f9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/pci_ids.h>
37#include <linux/spinlock.h> 38#include <linux/spinlock.h>
38#include <linux/string.h> 39#include <linux/string.h>
39 40
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev)
2372#define ohci_pmac_off(dev) 2373#define ohci_pmac_off(dev)
2373#endif /* CONFIG_PPC_PMAC */ 2374#endif /* CONFIG_PPC_PMAC */
2374 2375
2376#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
2377#define PCI_DEVICE_ID_AGERE_FW643 0x5901
2378
2375static int __devinit pci_probe(struct pci_dev *dev, 2379static int __devinit pci_probe(struct pci_dev *dev,
2376 const struct pci_device_id *ent) 2380 const struct pci_device_id *ent)
2377{ 2381{
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev,
2422 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2426 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2423 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; 2427 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2424 2428
2429 /* dual-buffer mode is broken if more than one IR context is active */
2430 if (dev->vendor == PCI_VENDOR_ID_AGERE &&
2431 dev->device == PCI_DEVICE_ID_AGERE_FW643)
2432 ohci->use_dualbuffer = false;
2433
2434 /* dual-buffer mode is broken */
2435 if (dev->vendor == PCI_VENDOR_ID_RICOH &&
2436 dev->device == PCI_DEVICE_ID_RICOH_R5C832)
2437 ohci->use_dualbuffer = false;
2438
2425/* x86-32 currently doesn't use highmem for dma_alloc_coherent */ 2439/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2426#if !defined(CONFIG_X86_32) 2440#if !defined(CONFIG_X86_32)
2427 /* dual-buffer mode is broken with descriptor addresses above 2G */ 2441 /* dual-buffer mode is broken with descriptor addresses above 2G */
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 8d51568ee143..e5df822a8130 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
456 } 456 }
457 spin_unlock_irqrestore(&card->lock, flags); 457 spin_unlock_irqrestore(&card->lock, flags);
458 458
459 if (&orb->link != &lu->orb_list) 459 if (&orb->link != &lu->orb_list) {
460 orb->callback(orb, &status); 460 orb->callback(orb, &status);
461 else 461 kref_put(&orb->kref, free_orb);
462 } else {
462 fw_error("status write for unknown orb\n"); 463 fw_error("status write for unknown orb\n");
463 464 }
464 kref_put(&orb->kref, free_orb);
465 465
466 fw_send_response(card, request, RCODE_COMPLETE); 466 fw_send_response(card, request, RCODE_COMPLETE);
467} 467}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 24c84ae81527..938100f14b16 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -568,35 +568,76 @@ const struct dmi_device * dmi_find_device(int type, const char *name,
568EXPORT_SYMBOL(dmi_find_device); 568EXPORT_SYMBOL(dmi_find_device);
569 569
570/** 570/**
571 * dmi_get_year - Return year of a DMI date 571 * dmi_get_date - parse a DMI date
572 * @field: data index (like dmi_get_system_info) 572 * @field: data index (see enum dmi_field)
573 * @yearp: optional out parameter for the year
574 * @monthp: optional out parameter for the month
575 * @dayp: optional out parameter for the day
573 * 576 *
574 * Returns -1 when the field doesn't exist. 0 when it is broken. 577 * The date field is assumed to be in the form resembling
578 * [mm[/dd]]/yy[yy] and the result is stored in the out
579 * parameters any or all of which can be omitted.
580 *
581 * If the field doesn't exist, all out parameters are set to zero
582 * and false is returned. Otherwise, true is returned with any
583 * invalid part of date set to zero.
584 *
585 * On return, year, month and day are guaranteed to be in the
586 * range of [0,9999], [0,12] and [0,31] respectively.
575 */ 587 */
576int dmi_get_year(int field) 588bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
577{ 589{
578 int year; 590 int year = 0, month = 0, day = 0;
579 const char *s = dmi_get_system_info(field); 591 bool exists;
592 const char *s, *y;
593 char *e;
580 594
581 if (!s) 595 s = dmi_get_system_info(field);
582 return -1; 596 exists = s;
583 if (*s == '\0') 597 if (!exists)
584 return 0; 598 goto out;
585 s = strrchr(s, '/');
586 if (!s)
587 return 0;
588 599
589 s += 1; 600 /*
590 year = simple_strtoul(s, NULL, 0); 601 * Determine year first. We assume the date string resembles
591 if (year && year < 100) { /* 2-digit year */ 602 * mm/dd/yy[yy] but the original code extracted only the year
603 * from the end. Keep the behavior in the spirit of no
604 * surprises.
605 */
606 y = strrchr(s, '/');
607 if (!y)
608 goto out;
609
610 y++;
611 year = simple_strtoul(y, &e, 10);
612 if (y != e && year < 100) { /* 2-digit year */
592 year += 1900; 613 year += 1900;
593 if (year < 1996) /* no dates < spec 1.0 */ 614 if (year < 1996) /* no dates < spec 1.0 */
594 year += 100; 615 year += 100;
595 } 616 }
617 if (year > 9999) /* year should fit in %04d */
618 year = 0;
619
620 /* parse the mm and dd */
621 month = simple_strtoul(s, &e, 10);
622 if (s == e || *e != '/' || !month || month > 12) {
623 month = 0;
624 goto out;
625 }
596 626
597 return year; 627 s = e + 1;
628 day = simple_strtoul(s, &e, 10);
629 if (s == y || s == e || *e != '/' || day > 31)
630 day = 0;
631out:
632 if (yearp)
633 *yearp = year;
634 if (monthp)
635 *monthp = month;
636 if (dayp)
637 *dayp = day;
638 return exists;
598} 639}
599EXPORT_SYMBOL(dmi_get_year); 640EXPORT_SYMBOL(dmi_get_date);
600 641
601/** 642/**
602 * dmi_walk - Walk the DMI table and get called back for every record 643 * dmi_walk - Walk the DMI table and get called back for every record
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c07a755b3a3..80e5ba490dc2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2267,8 +2267,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2267 fence_list) { 2267 fence_list) {
2268 old_obj = old_obj_priv->obj; 2268 old_obj = old_obj_priv->obj;
2269 2269
2270 reg = &dev_priv->fence_regs[old_obj_priv->fence_reg];
2271
2272 if (old_obj_priv->pin_count) 2270 if (old_obj_priv->pin_count)
2273 continue; 2271 continue;
2274 2272
@@ -2290,8 +2288,11 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2290 */ 2288 */
2291 i915_gem_object_flush_gpu_write_domain(old_obj); 2289 i915_gem_object_flush_gpu_write_domain(old_obj);
2292 ret = i915_gem_object_wait_rendering(old_obj); 2290 ret = i915_gem_object_wait_rendering(old_obj);
2293 if (ret != 0) 2291 if (ret != 0) {
2292 drm_gem_object_unreference(old_obj);
2294 return ret; 2293 return ret;
2294 }
2295
2295 break; 2296 break;
2296 } 2297 }
2297 2298
@@ -2299,10 +2300,14 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2299 * Zap this virtual mapping so we can set up a fence again 2300 * Zap this virtual mapping so we can set up a fence again
2300 * for this object next time we need it. 2301 * for this object next time we need it.
2301 */ 2302 */
2302 i915_gem_release_mmap(reg->obj); 2303 i915_gem_release_mmap(old_obj);
2304
2303 i = old_obj_priv->fence_reg; 2305 i = old_obj_priv->fence_reg;
2306 reg = &dev_priv->fence_regs[i];
2307
2304 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2308 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2305 list_del_init(&old_obj_priv->fence_list); 2309 list_del_init(&old_obj_priv->fence_list);
2310
2306 drm_gem_object_unreference(old_obj); 2311 drm_gem_object_unreference(old_obj);
2307 } 2312 }
2308 2313
@@ -4227,15 +4232,11 @@ int
4227i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 4232i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4228 struct drm_file *file_priv) 4233 struct drm_file *file_priv)
4229{ 4234{
4230 int ret;
4231
4232 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4235 if (drm_core_check_feature(dev, DRIVER_MODESET))
4233 return 0; 4236 return 0;
4234 4237
4235 ret = i915_gem_idle(dev);
4236 drm_irq_uninstall(dev); 4238 drm_irq_uninstall(dev);
4237 4239 return i915_gem_idle(dev);
4238 return ret;
4239} 4240}
4240 4241
4241void 4242void
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3fadb5358858..748ed50c55ca 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2005,7 +2005,21 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2005 return; 2005 return;
2006} 2006}
2007 2007
2008const static int latency_ns = 3000; /* default for non-igd platforms */ 2008/*
2009 * Latency for FIFO fetches is dependent on several factors:
2010 * - memory configuration (speed, channels)
2011 * - chipset
2012 * - current MCH state
2013 * It can be fairly high in some situations, so here we assume a fairly
2014 * pessimal value. It's a tradeoff between extra memory fetches (if we
2015 * set this value too high, the FIFO will fetch frequently to stay full)
2016 * and power consumption (set it too low to save power and we might see
2017 * FIFO underruns and display "flicker").
2018 *
2019 * A value of 5us seems to be a good balance; safe for very low end
2020 * platforms but not overly aggressive on lower latency configs.
2021 */
2022const static int latency_ns = 5000;
2009 2023
2010static int intel_get_fifo_size(struct drm_device *dev, int plane) 2024static int intel_get_fifo_size(struct drm_device *dev, int plane)
2011{ 2025{
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f2afc4af4bc9..2b914d732076 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1263,7 +1263,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1263 1263
1264 if (IS_eDP(intel_output)) { 1264 if (IS_eDP(intel_output)) {
1265 intel_output->crtc_mask = (1 << 1); 1265 intel_output->crtc_mask = (1 << 1);
1266 intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP); 1266 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1267 } else 1267 } else
1268 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1268 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1269 connector->interlace_allowed = true; 1269 connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25aa6facc12d..26a6227c15fe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -74,6 +74,7 @@
74#define INTEL_LVDS_CLONE_BIT 14 74#define INTEL_LVDS_CLONE_BIT 14
75#define INTEL_DVO_TMDS_CLONE_BIT 15 75#define INTEL_DVO_TMDS_CLONE_BIT 15
76#define INTEL_DVO_LVDS_CLONE_BIT 16 76#define INTEL_DVO_LVDS_CLONE_BIT 16
77#define INTEL_EDP_CLONE_BIT 17
77 78
78#define INTEL_DVO_CHIP_NONE 0 79#define INTEL_DVO_CHIP_NONE 0
79#define INTEL_DVO_CHIP_LVDS 1 80#define INTEL_DVO_CHIP_LVDS 1
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2fbe13a0de81..5b1c9e9fdba0 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1730,6 +1730,7 @@ intel_tv_init(struct drm_device *dev)
1730 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1730 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1731 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1731 tv_priv = (struct intel_tv_priv *)(intel_output + 1);
1732 intel_output->type = INTEL_OUTPUT_TVOUT; 1732 intel_output->type = INTEL_OUTPUT_TVOUT;
1733 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1733 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1734 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1734 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1735 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1735 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1736 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 053f4ec397f7..051bca6e3a4f 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -995,7 +995,7 @@ static const unsigned r300_reg_safe_bm[159] = {
995 0x00000000, 0x00000000, 0x00000000, 0x00000000, 995 0x00000000, 0x00000000, 0x00000000, 0x00000000,
996 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, 996 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
997 0x00000000, 0x00000000, 0x00000000, 0x00000000, 997 0x00000000, 0x00000000, 0x00000000, 0x00000000,
998 0x0003FC01, 0xFFFFFFF8, 0xFE800B19, 998 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
999}; 999};
1000 1000
1001static int r300_packet0_check(struct radeon_cs_parser *p, 1001static int r300_packet0_check(struct radeon_cs_parser *p,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7ca6c13569b5..93d8f8889302 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -266,6 +266,7 @@ static struct radeon_asic rs400_asic = {
266/* 266/*
267 * rs600. 267 * rs600.
268 */ 268 */
269int rs600_init(struct radeon_device *dev);
269void rs600_errata(struct radeon_device *rdev); 270void rs600_errata(struct radeon_device *rdev);
270void rs600_vram_info(struct radeon_device *rdev); 271void rs600_vram_info(struct radeon_device *rdev);
271int rs600_mc_init(struct radeon_device *rdev); 272int rs600_mc_init(struct radeon_device *rdev);
@@ -281,7 +282,7 @@ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
281void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 282void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
282void rs600_bandwidth_update(struct radeon_device *rdev); 283void rs600_bandwidth_update(struct radeon_device *rdev);
283static struct radeon_asic rs600_asic = { 284static struct radeon_asic rs600_asic = {
284 .init = &r300_init, 285 .init = &rs600_init,
285 .errata = &rs600_errata, 286 .errata = &rs600_errata,
286 .vram_info = &rs600_vram_info, 287 .vram_info = &rs600_vram_info,
287 .gpu_reset = &r300_gpu_reset, 288 .gpu_reset = &r300_gpu_reset,
@@ -316,7 +317,6 @@ static struct radeon_asic rs600_asic = {
316/* 317/*
317 * rs690,rs740 318 * rs690,rs740
318 */ 319 */
319int rs690_init(struct radeon_device *rdev);
320void rs690_errata(struct radeon_device *rdev); 320void rs690_errata(struct radeon_device *rdev);
321void rs690_vram_info(struct radeon_device *rdev); 321void rs690_vram_info(struct radeon_device *rdev);
322int rs690_mc_init(struct radeon_device *rdev); 322int rs690_mc_init(struct radeon_device *rdev);
@@ -325,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
325void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 325void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
326void rs690_bandwidth_update(struct radeon_device *rdev); 326void rs690_bandwidth_update(struct radeon_device *rdev);
327static struct radeon_asic rs690_asic = { 327static struct radeon_asic rs690_asic = {
328 .init = &rs690_init, 328 .init = &rs600_init,
329 .errata = &rs690_errata, 329 .errata = &rs690_errata,
330 .vram_info = &rs690_vram_info, 330 .vram_info = &rs690_vram_info,
331 .gpu_reset = &r300_gpu_reset, 331 .gpu_reset = &r300_gpu_reset,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 7e8ce983a908..02fd11aad6a2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -409,3 +409,68 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
409 ((reg) & RS600_MC_ADDR_MASK)); 409 ((reg) & RS600_MC_ADDR_MASK));
410 WREG32(RS600_MC_DATA, v); 410 WREG32(RS600_MC_DATA, v);
411} 411}
412
413static const unsigned rs600_reg_safe_bm[219] = {
414 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
415 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
416 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
417 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
418 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
419 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
420 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
421 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
422 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
423 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
424 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
425 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
426 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
427 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
428 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
429 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
430 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
431 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
432 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
433 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
434 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
435 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
436 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
437 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
438 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
439 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
440 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
441 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
442 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
443 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
444 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
445 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
446 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
447 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
448 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
449 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
450 0x00000000, 0x00000000, 0x00000000, 0x00000000,
451 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
452 0x00000000, 0x00000000, 0x00000000, 0x00000000,
453 0x0003FC01, 0xFFFFFCF8, 0xFF800B19, 0xFFFFFFFF,
454 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
455 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
456 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
457 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
458 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
459 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
460 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
461 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
462 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
463 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
464 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
465 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
466 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
467 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
468 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
469};
470
471int rs600_init(struct radeon_device *rdev)
472{
473 rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm;
474 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm);
475 return 0;
476}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index bc6b7c5339bc..879882533e45 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -653,67 +653,3 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); 653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
654} 654}
655 655
656static const unsigned rs690_reg_safe_bm[219] = {
657 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
658 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
659 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
660 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
661 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
662 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
663 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
664 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
665 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
666 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
667 0x17FF1FFF,0xFFFFFFFC,0xFFFFFFFF,0xFF30FFBF,
668 0xFFFFFFF8,0xC3E6FFFF,0xFFFFF6DF,0xFFFFFFFF,
669 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
670 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
671 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFF03F,
672 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
673 0xFFFFFFFF,0xFFFFEFCE,0xF00EBFFF,0x007C0000,
674 0xF0000078,0xFF000009,0xFFFFFFFF,0xFFFFFFFF,
675 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
676 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
677 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
678 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
679 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
680 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
681 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
682 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
683 0xFFFFF7FF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
684 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
685 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
686 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
687 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
688 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
689 0xFFFFFC78,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
690 0x38FF8F50,0xFFF88082,0xF000000C,0xFAE009FF,
691 0x0000FFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
692 0x00000000,0x0000C100,0x00000000,0x00000000,
693 0x00000000,0x00000000,0x00000000,0x00000000,
694 0x00000000,0xFFFF0000,0xFFFFFFFF,0xFF80FFFF,
695 0x00000000,0x00000000,0x00000000,0x00000000,
696 0x0003FC01,0xFFFFFFF8,0xFE800B19,0xFFFFFFFF,
697 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
698 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
699 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
700 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
701 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
702 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
703 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
704 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
705 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
706 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
707 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
708 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
709 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
710 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
711 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
712};
713
714int rs690_init(struct radeon_device *rdev)
715{
716 rdev->config.r300.reg_safe_bm = rs690_reg_safe_bm;
717 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs690_reg_safe_bm);
718 return 0;
719}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 31a7f668ae5a..0566fb67e460 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -508,7 +508,7 @@ static const unsigned r500_reg_safe_bm[219] = {
508 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 508 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
509 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, 509 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF,
510 0x00000000, 0x00000000, 0x00000000, 0x00000000, 510 0x00000000, 0x00000000, 0x00000000, 0x00000000,
511 0x0003FC01, 0x3FFFFCF8, 0xFE800B19, 0xFFFFFFFF, 511 0x0003FC01, 0x3FFFFCF8, 0xFF800B19, 0xFFDFFFFF,
512 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 512 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 513 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
514 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 514 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 923cbfe259d3..6396c3ad3252 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -177,6 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = {
177 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, 177 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
178 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, 178 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
179 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, 179 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
180 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 },
180 { 0, }, 181 { 0, },
181}; 182};
182MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); 183MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 527908ff298c..063b933d864a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = {
408 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), 408 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
409 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), 409 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
410 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), 410 PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
411 PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591),
411 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), 412 PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
412 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), 413 PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
413 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), 414 PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 8f9509e1ebf7..55d093a36ae4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
362 * In either case, must tell the provider to reject. 362 * In either case, must tell the provider to reject.
363 */ 363 */
364 cm_id_priv->state = IW_CM_STATE_DESTROYING; 364 cm_id_priv->state = IW_CM_STATE_DESTROYING;
365 cm_id->device->iwcm->reject(cm_id, NULL, 0);
365 break; 366 break;
366 case IW_CM_STATE_CONN_SENT: 367 case IW_CM_STATE_CONN_SENT:
367 case IW_CM_STATE_DESTROYING: 368 case IW_CM_STATE_DESTROYING:
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2d..7522008fda86 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
45MODULE_AUTHOR("Hal Rosenstock"); 46MODULE_AUTHOR("Hal Rosenstock");
46MODULE_AUTHOR("Sean Hefty"); 47MODULE_AUTHOR("Sean Hefty");
47 48
49int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
50int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
51
52module_param_named(send_queue_size, mad_sendq_size, int, 0444);
53MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
54module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
55MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
56
48static struct kmem_cache *ib_mad_cache; 57static struct kmem_cache *ib_mad_cache;
49 58
50static struct list_head ib_mad_port_list; 59static struct list_head ib_mad_port_list;
51static u32 ib_mad_client_id = 0; 60static u32 ib_mad_client_id = 0;
52 61
53/* Port list lock */ 62/* Port list lock */
54static spinlock_t ib_mad_port_list_lock; 63static DEFINE_SPINLOCK(ib_mad_port_list_lock);
55
56 64
57/* Forward declarations */ 65/* Forward declarations */
58static int method_in_use(struct ib_mad_mgmt_method_table **method, 66static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1974 unsigned long delay; 1982 unsigned long delay;
1975 1983
1976 if (list_empty(&mad_agent_priv->wait_list)) { 1984 if (list_empty(&mad_agent_priv->wait_list)) {
1977 cancel_delayed_work(&mad_agent_priv->timed_work); 1985 __cancel_delayed_work(&mad_agent_priv->timed_work);
1978 } else { 1986 } else {
1979 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 1987 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1980 struct ib_mad_send_wr_private, 1988 struct ib_mad_send_wr_private,
@@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1983 if (time_after(mad_agent_priv->timeout, 1991 if (time_after(mad_agent_priv->timeout,
1984 mad_send_wr->timeout)) { 1992 mad_send_wr->timeout)) {
1985 mad_agent_priv->timeout = mad_send_wr->timeout; 1993 mad_agent_priv->timeout = mad_send_wr->timeout;
1986 cancel_delayed_work(&mad_agent_priv->timed_work); 1994 __cancel_delayed_work(&mad_agent_priv->timed_work);
1987 delay = mad_send_wr->timeout - jiffies; 1995 delay = mad_send_wr->timeout - jiffies;
1988 if ((long)delay <= 0) 1996 if ((long)delay <= 0)
1989 delay = 1; 1997 delay = 1;
@@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2023 2031
2024 /* Reschedule a work item if we have a shorter timeout */ 2032 /* Reschedule a work item if we have a shorter timeout */
2025 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2033 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2026 cancel_delayed_work(&mad_agent_priv->timed_work); 2034 __cancel_delayed_work(&mad_agent_priv->timed_work);
2027 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2035 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2028 &mad_agent_priv->timed_work, delay); 2036 &mad_agent_priv->timed_work, delay);
2029 } 2037 }
@@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2736 qp_init_attr.send_cq = qp_info->port_priv->cq; 2744 qp_init_attr.send_cq = qp_info->port_priv->cq;
2737 qp_init_attr.recv_cq = qp_info->port_priv->cq; 2745 qp_init_attr.recv_cq = qp_info->port_priv->cq;
2738 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; 2746 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2739 qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; 2747 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2740 qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; 2748 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2741 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG; 2749 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2742 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG; 2750 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2743 qp_init_attr.qp_type = qp_type; 2751 qp_init_attr.qp_type = qp_type;
@@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2752 goto error; 2760 goto error;
2753 } 2761 }
2754 /* Use minimum queue sizes unless the CQ is resized */ 2762 /* Use minimum queue sizes unless the CQ is resized */
2755 qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; 2763 qp_info->send_queue.max_active = mad_sendq_size;
2756 qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; 2764 qp_info->recv_queue.max_active = mad_recvq_size;
2757 return 0; 2765 return 0;
2758 2766
2759error: 2767error:
@@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
2792 init_mad_qp(port_priv, &port_priv->qp_info[0]); 2800 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2793 init_mad_qp(port_priv, &port_priv->qp_info[1]); 2801 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2794 2802
2795 cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; 2803 cq_size = (mad_sendq_size + mad_recvq_size) * 2;
2796 port_priv->cq = ib_create_cq(port_priv->device, 2804 port_priv->cq = ib_create_cq(port_priv->device,
2797 ib_mad_thread_completion_handler, 2805 ib_mad_thread_completion_handler,
2798 NULL, port_priv, cq_size, 0); 2806 NULL, port_priv, cq_size, 0);
@@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
2984{ 2992{
2985 int ret; 2993 int ret;
2986 2994
2987 spin_lock_init(&ib_mad_port_list_lock); 2995 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
2996 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
2997
2998 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
2999 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
2988 3000
2989 ib_mad_cache = kmem_cache_create("ib_mad", 3001 ib_mad_cache = kmem_cache_create("ib_mad",
2990 sizeof(struct ib_mad_private), 3002 sizeof(struct ib_mad_private),
@@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
3021 3033
3022module_init(ib_mad_init_module); 3034module_init(ib_mad_init_module);
3023module_exit(ib_mad_cleanup_module); 3035module_exit(ib_mad_cleanup_module);
3024
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 05ce331733b0..9430ab4969c5 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -2,6 +2,7 @@
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
5 * 6 *
6 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 8 * licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,8 @@
49/* QP and CQ parameters */ 50/* QP and CQ parameters */
50#define IB_MAD_QP_SEND_SIZE 128 51#define IB_MAD_QP_SEND_SIZE 128
51#define IB_MAD_QP_RECV_SIZE 512 52#define IB_MAD_QP_RECV_SIZE 512
53#define IB_MAD_QP_MIN_SIZE 64
54#define IB_MAD_QP_MAX_SIZE 8192
52#define IB_MAD_SEND_REQ_MAX_SG 2 55#define IB_MAD_SEND_REQ_MAX_SG 2
53#define IB_MAD_RECV_REQ_MAX_SG 1 56#define IB_MAD_RECV_REQ_MAX_SG 1
54 57
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 107f170c57cd..8d82ba171353 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,6 +106,8 @@ struct mcast_group {
106 struct ib_sa_query *query; 106 struct ib_sa_query *query;
107 int query_id; 107 int query_id;
108 u16 pkey_index; 108 u16 pkey_index;
109 u8 leave_state;
110 int retries;
109}; 111};
110 112
111struct mcast_member { 113struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
350 352
351 rec = group->rec; 353 rec = group->rec;
352 rec.join_state = leave_state; 354 rec.join_state = leave_state;
355 group->leave_state = leave_state;
353 356
354 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 357 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
355 port->port_num, IB_SA_METHOD_DELETE, &rec, 358 port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
542{ 545{
543 struct mcast_group *group = context; 546 struct mcast_group *group = context;
544 547
545 mcast_work_handler(&group->work); 548 if (status && group->retries > 0 &&
549 !send_leave(group, group->leave_state))
550 group->retries--;
551 else
552 mcast_work_handler(&group->work);
546} 553}
547 554
548static struct mcast_group *acquire_group(struct mcast_port *port, 555static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
565 if (!group) 572 if (!group)
566 return NULL; 573 return NULL;
567 574
575 group->retries = 3;
568 group->port = port; 576 group->port = port;
569 group->rec.mgid = *mgid; 577 group->rec.mgid = *mgid;
570 group->pkey_index = MCAST_INVALID_PKEY_INDEX; 578 group->pkey_index = MCAST_INVALID_PKEY_INDEX;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 1865049e80f7..82543716d59e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
109 .remove = ib_sa_remove_one 109 .remove = ib_sa_remove_one
110}; 110};
111 111
112static spinlock_t idr_lock; 112static DEFINE_SPINLOCK(idr_lock);
113static DEFINE_IDR(query_idr); 113static DEFINE_IDR(query_idr);
114 114
115static spinlock_t tid_lock; 115static DEFINE_SPINLOCK(tid_lock);
116static u32 tid; 116static u32 tid;
117 117
118#define PATH_REC_FIELD(field) \ 118#define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
1077{ 1077{
1078 int ret; 1078 int ret;
1079 1079
1080 spin_lock_init(&idr_lock);
1081 spin_lock_init(&tid_lock);
1082
1083 get_random_bytes(&tid, sizeof tid); 1080 get_random_bytes(&tid, sizeof tid);
1084 1081
1085 ret = ib_register_client(&sa_client); 1082 ret = ib_register_client(&sa_client);
diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c
index 87236753bce9..5855e4405d9b 100644
--- a/drivers/infiniband/core/smi.c
+++ b/drivers/infiniband/core/smi.c
@@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
52 hop_cnt = smp->hop_cnt; 52 hop_cnt = smp->hop_cnt;
53 53
54 /* See section 14.2.2.2, Vol 1 IB spec */ 54 /* See section 14.2.2.2, Vol 1 IB spec */
55 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
56 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
57 return IB_SMI_DISCARD;
58
55 if (!ib_get_smp_direction(smp)) { 59 if (!ib_get_smp_direction(smp)) {
56 /* C14-9:1 */ 60 /* C14-9:1 */
57 if (hop_cnt && hop_ptr == 0) { 61 if (hop_cnt && hop_ptr == 0) {
@@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
133 hop_cnt = smp->hop_cnt; 137 hop_cnt = smp->hop_cnt;
134 138
135 /* See section 14.2.2.2, Vol 1 IB spec */ 139 /* See section 14.2.2.2, Vol 1 IB spec */
140 /* C14-6 -- valid hop_cnt values are from 0 to 63 */
141 if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
142 return IB_SMI_DISCARD;
143
136 if (!ib_get_smp_direction(smp)) { 144 if (!ib_get_smp_direction(smp)) {
137 /* C14-9:1 -- sender should have incremented hop_ptr */ 145 /* C14-9:1 -- sender should have incremented hop_ptr */
138 if (hop_cnt && hop_ptr == 0) 146 if (hop_cnt && hop_ptr == 0)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index eb36a81dd09b..d3fff9e008a3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
73DEFINE_IDR(ib_uverbs_qp_idr); 73DEFINE_IDR(ib_uverbs_qp_idr);
74DEFINE_IDR(ib_uverbs_srq_idr); 74DEFINE_IDR(ib_uverbs_srq_idr);
75 75
76static spinlock_t map_lock; 76static DEFINE_SPINLOCK(map_lock);
77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; 77static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); 78static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
79 79
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
584 584
585 if (hdr.command < 0 || 585 if (hdr.command < 0 ||
586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || 586 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
587 !uverbs_cmd_table[hdr.command] || 587 !uverbs_cmd_table[hdr.command])
588 !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
589 return -EINVAL; 588 return -EINVAL;
590 589
591 if (!file->ucontext && 590 if (!file->ucontext &&
592 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT) 591 hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
593 return -EINVAL; 592 return -EINVAL;
594 593
594 if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
595 return -ENOSYS;
596
595 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr, 597 return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
596 hdr.in_words * 4, hdr.out_words * 4); 598 hdr.in_words * 4, hdr.out_words * 4);
597} 599}
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
836{ 838{
837 int ret; 839 int ret;
838 840
839 spin_lock_init(&map_lock);
840
841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES, 841 ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
842 "infiniband_verbs"); 842 "infiniband_verbs");
843 if (ret) { 843 if (ret) {
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 0cfbb6d2f762..8250740c94b0 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
86 86
87static void c2_print_macaddr(struct net_device *netdev) 87static void c2_print_macaddr(struct net_device *netdev)
88{ 88{
89 pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " 89 pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
90 "IRQ %u\n", netdev->name,
91 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
92 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
93 netdev->irq);
94} 90}
95 91
96static void c2_set_rxbufsize(struct c2_port *c2_port) 92static void c2_set_rxbufsize(struct c2_port *c2_port)
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index f1948fad85d7..ad723bd8bf49 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
780 /* Register pseudo network device */ 780 /* Register pseudo network device */
781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 781 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
782 if (!dev->pseudo_netdev) 782 if (!dev->pseudo_netdev)
783 goto out3; 783 goto out;
784 784
785 ret = register_netdev(dev->pseudo_netdev); 785 ret = register_netdev(dev->pseudo_netdev);
786 if (ret) 786 if (ret)
787 goto out2; 787 goto out_free_netdev;
788 788
789 pr_debug("%s:%u\n", __func__, __LINE__); 789 pr_debug("%s:%u\n", __func__, __LINE__);
790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 790 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
851 dev->ibdev.post_recv = c2_post_receive; 851 dev->ibdev.post_recv = c2_post_receive;
852 852
853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); 853 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
854 if (dev->ibdev.iwcm == NULL) {
855 ret = -ENOMEM;
856 goto out_unregister_netdev;
857 }
854 dev->ibdev.iwcm->add_ref = c2_add_ref; 858 dev->ibdev.iwcm->add_ref = c2_add_ref;
855 dev->ibdev.iwcm->rem_ref = c2_rem_ref; 859 dev->ibdev.iwcm->rem_ref = c2_rem_ref;
856 dev->ibdev.iwcm->get_qp = c2_get_qp; 860 dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
862 866
863 ret = ib_register_device(&dev->ibdev); 867 ret = ib_register_device(&dev->ibdev);
864 if (ret) 868 if (ret)
865 goto out1; 869 goto out_free_iwcm;
866 870
867 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { 871 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
868 ret = device_create_file(&dev->ibdev.dev, 872 ret = device_create_file(&dev->ibdev.dev,
869 c2_dev_attributes[i]); 873 c2_dev_attributes[i]);
870 if (ret) 874 if (ret)
871 goto out0; 875 goto out_unregister_ibdev;
872 } 876 }
873 goto out3; 877 goto out;
874 878
875out0: 879out_unregister_ibdev:
876 ib_unregister_device(&dev->ibdev); 880 ib_unregister_device(&dev->ibdev);
877out1: 881out_free_iwcm:
882 kfree(dev->ibdev.iwcm);
883out_unregister_netdev:
878 unregister_netdev(dev->pseudo_netdev); 884 unregister_netdev(dev->pseudo_netdev);
879out2: 885out_free_netdev:
880 free_netdev(dev->pseudo_netdev); 886 free_netdev(dev->pseudo_netdev);
881out3: 887out:
882 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); 888 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
883 return ret; 889 return ret;
884} 890}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 62f9cf2f94ec..72ed3396b721 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
852 wqe->qpcaps = attr->qpcaps; 852 wqe->qpcaps = attr->qpcaps;
853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); 853 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
854 wqe->rqe_count = cpu_to_be16(attr->rqe_count); 854 wqe->rqe_count = cpu_to_be16(attr->rqe_count);
855 wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type)); 855 wqe->flags_rtr_type = cpu_to_be16(attr->flags |
856 V_RTR_TYPE(attr->rtr_type) |
857 V_CHAN(attr->chan));
856 wqe->ord = cpu_to_be32(attr->ord); 858 wqe->ord = cpu_to_be32(attr->ord);
857 wqe->ird = cpu_to_be32(attr->ird); 859 wqe->ird = cpu_to_be32(attr->ird);
858 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); 860 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
1032err2: 1034err2:
1033 cxio_hal_destroy_ctrl_qp(rdev_p); 1035 cxio_hal_destroy_ctrl_qp(rdev_p);
1034err1: 1036err1:
1037 rdev_p->t3cdev_p->ulp = NULL;
1035 list_del(&rdev_p->entry); 1038 list_del(&rdev_p->entry);
1036 return err; 1039 return err;
1037} 1040}
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 32e3b1461d81..a197a5b7ac7f 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE) 327#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE) 328#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
329 329
330#define S_CHAN 4
331#define M_CHAN 0x3
332#define V_CHAN(x) ((x) << S_CHAN)
333#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
334
330struct t3_rdma_init_attr { 335struct t3_rdma_init_attr {
331 u32 tid; 336 u32 tid;
332 u32 qpid; 337 u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
346 u16 flags; 351 u16 flags;
347 u16 rqe_count; 352 u16 rqe_count;
348 u32 irs; 353 u32 irs;
354 u32 chan;
349}; 355};
350 356
351struct t3_rdma_init_wr { 357struct t3_rdma_init_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 26fc0a4eaa74..b0ea0105ddf6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51 51
52static void open_rnic_dev(struct t3cdev *); 52static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 53static void close_rnic_dev(struct t3cdev *);
54static void iwch_err_handler(struct t3cdev *, u32, u32); 54static void iwch_event_handler(struct t3cdev *, u32, u32);
55 55
56struct cxgb3_client t3c_client = { 56struct cxgb3_client t3c_client = {
57 .name = "iw_cxgb3", 57 .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
59 .remove = close_rnic_dev, 59 .remove = close_rnic_dev,
60 .handlers = t3c_handlers, 60 .handlers = t3c_handlers,
61 .redirect = iwch_ep_redirect, 61 .redirect = iwch_ep_redirect,
62 .err_handler = iwch_err_handler 62 .event_handler = iwch_event_handler
63}; 63};
64 64
65static LIST_HEAD(dev_list); 65static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
105static void open_rnic_dev(struct t3cdev *tdev) 105static void open_rnic_dev(struct t3cdev *tdev)
106{ 106{
107 struct iwch_dev *rnicp; 107 struct iwch_dev *rnicp;
108 static int vers_printed;
109 108
110 PDBG("%s t3cdev %p\n", __func__, tdev); 109 PDBG("%s t3cdev %p\n", __func__, tdev);
111 if (!vers_printed++) 110 printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
112 printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
113 DRV_VERSION); 111 DRV_VERSION);
114 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp)); 112 rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
115 if (!rnicp) { 113 if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
162 mutex_unlock(&dev_mutex); 160 mutex_unlock(&dev_mutex);
163} 161}
164 162
165static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) 163static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
166{ 164{
167 struct cxio_rdev *rdev = tdev->ulp; 165 struct cxio_rdev *rdev = tdev->ulp;
168 struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); 166 struct iwch_dev *rnicp;
169 struct ib_event event; 167 struct ib_event event;
168 u32 portnum = port_id + 1;
170 169
171 if (status == OFFLOAD_STATUS_DOWN) { 170 if (!rdev)
171 return;
172 rnicp = rdev_to_iwch_dev(rdev);
173 switch (evt) {
174 case OFFLOAD_STATUS_DOWN: {
172 rdev->flags = CXIO_ERROR_FATAL; 175 rdev->flags = CXIO_ERROR_FATAL;
173
174 event.device = &rnicp->ibdev;
175 event.event = IB_EVENT_DEVICE_FATAL; 176 event.event = IB_EVENT_DEVICE_FATAL;
176 event.element.port_num = 0; 177 break;
177 ib_dispatch_event(&event); 178 }
179 case OFFLOAD_PORT_DOWN: {
180 event.event = IB_EVENT_PORT_ERR;
181 break;
182 }
183 case OFFLOAD_PORT_UP: {
184 event.event = IB_EVENT_PORT_ACTIVE;
185 break;
186 }
178 } 187 }
179 188
189 event.device = &rnicp->ibdev;
190 event.element.port_num = portnum;
191 ib_dispatch_event(&event);
192
180 return; 193 return;
181} 194}
182 195
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 52d7bb0c2a12..66b41351910a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
286 ep = container_of(container_of(kref, struct iwch_ep_common, kref), 286 ep = container_of(container_of(kref, struct iwch_ep_common, kref),
287 struct iwch_ep, com); 287 struct iwch_ep, com);
288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 288 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
289 if (ep->com.flags & RELEASE_RESOURCES) { 289 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 290 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
291 dst_release(ep->dst); 291 dst_release(ep->dst);
292 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 292 l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
297static void release_ep_resources(struct iwch_ep *ep) 297static void release_ep_resources(struct iwch_ep *ep)
298{ 298{
299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); 299 PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
300 ep->com.flags |= RELEASE_RESOURCES; 300 set_bit(RELEASE_RESOURCES, &ep->com.flags);
301 put_ep(&ep->com); 301 put_ep(&ep->com);
302} 302}
303 303
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
786 event.private_data_len = ep->plen; 786 event.private_data_len = ep->plen;
787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 787 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
788 event.provider_data = ep; 788 event.provider_data = ep;
789 if (state_read(&ep->parent_ep->com) != DEAD) 789 if (state_read(&ep->parent_ep->com) != DEAD) {
790 get_ep(&ep->com);
790 ep->parent_ep->com.cm_id->event_handler( 791 ep->parent_ep->com.cm_id->event_handler(
791 ep->parent_ep->com.cm_id, 792 ep->parent_ep->com.cm_id,
792 &event); 793 &event);
794 }
793 put_ep(&ep->parent_ep->com); 795 put_ep(&ep->parent_ep->com);
794 ep->parent_ep = NULL; 796 ep->parent_ep = NULL;
795} 797}
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1156 * We get 2 abort replies from the HW. The first one must 1158 * We get 2 abort replies from the HW. The first one must
1157 * be ignored except for scribbling that we need one more. 1159 * be ignored except for scribbling that we need one more.
1158 */ 1160 */
1159 if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { 1161 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
1160 ep->com.flags |= ABORT_REQ_IN_PROGRESS;
1161 return CPL_RET_BUF_DONE; 1162 return CPL_RET_BUF_DONE;
1162 } 1163 }
1163 1164
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1477 /* 1478 /*
1478 * We're gonna mark this puppy DEAD, but keep 1479 * We're gonna mark this puppy DEAD, but keep
1479 * the reference on it until the ULP accepts or 1480 * the reference on it until the ULP accepts or
1480 * rejects the CR. 1481 * rejects the CR. Also wake up anyone waiting
1482 * in rdma connection migration (see iwch_accept_cr()).
1481 */ 1483 */
1482 __state_set(&ep->com, CLOSING); 1484 __state_set(&ep->com, CLOSING);
1483 get_ep(&ep->com); 1485 ep->com.rpl_done = 1;
1486 ep->com.rpl_err = -ECONNRESET;
1487 PDBG("waking up ep %p\n", ep);
1488 wake_up(&ep->com.waitq);
1484 break; 1489 break;
1485 case MPA_REP_SENT: 1490 case MPA_REP_SENT:
1486 __state_set(&ep->com, CLOSING); 1491 __state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1561 * We get 2 peer aborts from the HW. The first one must 1566 * We get 2 peer aborts from the HW. The first one must
1562 * be ignored except for scribbling that we need one more. 1567 * be ignored except for scribbling that we need one more.
1563 */ 1568 */
1564 if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { 1569 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
1565 ep->com.flags |= PEER_ABORT_IN_PROGRESS;
1566 return CPL_RET_BUF_DONE; 1570 return CPL_RET_BUF_DONE;
1567 } 1571 }
1568 1572
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1589 /* 1593 /*
1590 * We're gonna mark this puppy DEAD, but keep 1594 * We're gonna mark this puppy DEAD, but keep
1591 * the reference on it until the ULP accepts or 1595 * the reference on it until the ULP accepts or
1592 * rejects the CR. 1596 * rejects the CR. Also wake up anyone waiting
1597 * in rdma connection migration (see iwch_accept_cr()).
1593 */ 1598 */
1594 get_ep(&ep->com); 1599 ep->com.rpl_done = 1;
1600 ep->com.rpl_err = -ECONNRESET;
1601 PDBG("waking up ep %p\n", ep);
1602 wake_up(&ep->com.waitq);
1595 break; 1603 break;
1596 case MORIBUND: 1604 case MORIBUND:
1597 case CLOSING: 1605 case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1797 err = send_mpa_reject(ep, pdata, pdata_len); 1805 err = send_mpa_reject(ep, pdata, pdata_len);
1798 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); 1806 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
1799 } 1807 }
1808 put_ep(&ep->com);
1800 return 0; 1809 return 0;
1801} 1810}
1802 1811
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1810 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1819 struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
1811 1820
1812 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1821 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1813 if (state_read(&ep->com) == DEAD) 1822 if (state_read(&ep->com) == DEAD) {
1814 return -ECONNRESET; 1823 err = -ECONNRESET;
1824 goto err;
1825 }
1815 1826
1816 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1827 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1817 BUG_ON(!qp); 1828 BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1819 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || 1830 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
1820 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { 1831 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
1821 abort_connection(ep, NULL, GFP_KERNEL); 1832 abort_connection(ep, NULL, GFP_KERNEL);
1822 return -EINVAL; 1833 err = -EINVAL;
1834 goto err;
1823 } 1835 }
1824 1836
1825 cm_id->add_ref(cm_id); 1837 cm_id->add_ref(cm_id);
1826 ep->com.cm_id = cm_id; 1838 ep->com.cm_id = cm_id;
1827 ep->com.qp = qp; 1839 ep->com.qp = qp;
1828 1840
1829 ep->com.rpl_done = 0;
1830 ep->com.rpl_err = 0;
1831 ep->ird = conn_param->ird; 1841 ep->ird = conn_param->ird;
1832 ep->ord = conn_param->ord; 1842 ep->ord = conn_param->ord;
1833 1843
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1836 1846
1837 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 1847 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838 1848
1839 get_ep(&ep->com);
1840
1841 /* bind QP to EP and move to RTS */ 1849 /* bind QP to EP and move to RTS */
1842 attrs.mpa_attr = ep->mpa_attr; 1850 attrs.mpa_attr = ep->mpa_attr;
1843 attrs.max_ird = ep->ird; 1851 attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1855 err = iwch_modify_qp(ep->com.qp->rhp, 1863 err = iwch_modify_qp(ep->com.qp->rhp,
1856 ep->com.qp, mask, &attrs, 1); 1864 ep->com.qp, mask, &attrs, 1);
1857 if (err) 1865 if (err)
1858 goto err; 1866 goto err1;
1859 1867
1860 /* if needed, wait for wr_ack */ 1868 /* if needed, wait for wr_ack */
1861 if (iwch_rqes_posted(qp)) { 1869 if (iwch_rqes_posted(qp)) {
1862 wait_event(ep->com.waitq, ep->com.rpl_done); 1870 wait_event(ep->com.waitq, ep->com.rpl_done);
1863 err = ep->com.rpl_err; 1871 err = ep->com.rpl_err;
1864 if (err) 1872 if (err)
1865 goto err; 1873 goto err1;
1866 } 1874 }
1867 1875
1868 err = send_mpa_reply(ep, conn_param->private_data, 1876 err = send_mpa_reply(ep, conn_param->private_data,
1869 conn_param->private_data_len); 1877 conn_param->private_data_len);
1870 if (err) 1878 if (err)
1871 goto err; 1879 goto err1;
1872 1880
1873 1881
1874 state_set(&ep->com, FPDU_MODE); 1882 state_set(&ep->com, FPDU_MODE);
1875 established_upcall(ep); 1883 established_upcall(ep);
1876 put_ep(&ep->com); 1884 put_ep(&ep->com);
1877 return 0; 1885 return 0;
1878err: 1886err1:
1879 ep->com.cm_id = NULL; 1887 ep->com.cm_id = NULL;
1880 ep->com.qp = NULL; 1888 ep->com.qp = NULL;
1881 cm_id->rem_ref(cm_id); 1889 cm_id->rem_ref(cm_id);
1890err:
1882 put_ep(&ep->com); 1891 put_ep(&ep->com);
1883 return err; 1892 return err;
1884} 1893}
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
2097 ep->com.state = CLOSING; 2106 ep->com.state = CLOSING;
2098 start_ep_timer(ep); 2107 start_ep_timer(ep);
2099 } 2108 }
2109 set_bit(CLOSE_SENT, &ep->com.flags);
2100 break; 2110 break;
2101 case CLOSING: 2111 case CLOSING:
2102 close = 1; 2112 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2103 if (abrupt) { 2113 close = 1;
2104 stop_ep_timer(ep); 2114 if (abrupt) {
2105 ep->com.state = ABORTING; 2115 stop_ep_timer(ep);
2106 } else 2116 ep->com.state = ABORTING;
2107 ep->com.state = MORIBUND; 2117 } else
2118 ep->com.state = MORIBUND;
2119 }
2108 break; 2120 break;
2109 case MORIBUND: 2121 case MORIBUND:
2110 case ABORTING: 2122 case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h
index 43c0aea7eadc..b9efadfffb4f 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h
@@ -145,9 +145,10 @@ enum iwch_ep_state {
145}; 145};
146 146
147enum iwch_ep_flags { 147enum iwch_ep_flags {
148 PEER_ABORT_IN_PROGRESS = (1 << 0), 148 PEER_ABORT_IN_PROGRESS = 0,
149 ABORT_REQ_IN_PROGRESS = (1 << 1), 149 ABORT_REQ_IN_PROGRESS = 1,
150 RELEASE_RESOURCES = (1 << 2), 150 RELEASE_RESOURCES = 2,
151 CLOSE_SENT = 3,
151}; 152};
152 153
153struct iwch_ep_common { 154struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
162 wait_queue_head_t waitq; 163 wait_queue_head_t waitq;
163 int rpl_done; 164 int rpl_done;
164 int rpl_err; 165 int rpl_err;
165 u32 flags; 166 unsigned long flags;
166}; 167};
167 168
168struct iwch_listen_ep { 169struct iwch_listen_ep {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index ec49a5cbdebb..e1ec65ebb016 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -39,7 +39,7 @@
39#include "iwch.h" 39#include "iwch.h"
40#include "iwch_provider.h" 40#include "iwch_provider.h"
41 41
42static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag) 42static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
43{ 43{
44 u32 mmid; 44 u32 mmid;
45 45
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
47 mhp->attr.stag = stag; 47 mhp->attr.stag = stag;
48 mmid = stag >> 8; 48 mmid = stag >> 8;
49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 49 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
50 insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
51 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); 50 PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
51 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
52} 52}
53 53
54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 54int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
55 struct iwch_mr *mhp, int shift) 55 struct iwch_mr *mhp, int shift)
56{ 56{
57 u32 stag; 57 u32 stag;
58 int ret;
58 59
59 if (cxio_register_phys_mem(&rhp->rdev, 60 if (cxio_register_phys_mem(&rhp->rdev,
60 &stag, mhp->attr.pdid, 61 &stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
66 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 67 mhp->attr.pbl_size, mhp->attr.pbl_addr))
67 return -ENOMEM; 68 return -ENOMEM;
68 69
69 iwch_finish_mem_reg(mhp, stag); 70 ret = iwch_finish_mem_reg(mhp, stag);
70 71 if (ret)
71 return 0; 72 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
73 mhp->attr.pbl_addr);
74 return ret;
72} 75}
73 76
74int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, 77int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
77 int npages) 80 int npages)
78{ 81{
79 u32 stag; 82 u32 stag;
83 int ret;
80 84
81 /* We could support this... */ 85 /* We could support this... */
82 if (npages > mhp->attr.pbl_size) 86 if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
93 mhp->attr.pbl_size, mhp->attr.pbl_addr)) 97 mhp->attr.pbl_size, mhp->attr.pbl_addr))
94 return -ENOMEM; 98 return -ENOMEM;
95 99
96 iwch_finish_mem_reg(mhp, stag); 100 ret = iwch_finish_mem_reg(mhp, stag);
101 if (ret)
102 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
103 mhp->attr.pbl_addr);
97 104
98 return 0; 105 return ret;
99} 106}
100 107
101int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) 108int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e2a63214008a..6895523779d0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
195 spin_lock_init(&chp->lock); 195 spin_lock_init(&chp->lock);
196 atomic_set(&chp->refcnt, 1); 196 atomic_set(&chp->refcnt, 1);
197 init_waitqueue_head(&chp->wait); 197 init_waitqueue_head(&chp->wait);
198 insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); 198 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
199 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
200 kfree(chp);
201 return ERR_PTR(-ENOMEM);
202 }
199 203
200 if (ucontext) { 204 if (ucontext) {
201 struct iwch_mm_entry *mm; 205 struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
750 mhp->attr.stag = stag; 754 mhp->attr.stag = stag;
751 mmid = (stag) >> 8; 755 mmid = (stag) >> 8;
752 mhp->ibmw.rkey = stag; 756 mhp->ibmw.rkey = stag;
753 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 757 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
758 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
759 kfree(mhp);
760 return ERR_PTR(-ENOMEM);
761 }
754 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 762 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
755 return &(mhp->ibmw); 763 return &(mhp->ibmw);
756} 764}
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
778 struct iwch_mr *mhp; 786 struct iwch_mr *mhp;
779 u32 mmid; 787 u32 mmid;
780 u32 stag = 0; 788 u32 stag = 0;
781 int ret; 789 int ret = 0;
782 790
783 php = to_iwch_pd(pd); 791 php = to_iwch_pd(pd);
784 rhp = php->rhp; 792 rhp = php->rhp;
785 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 793 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
786 if (!mhp) 794 if (!mhp)
787 return ERR_PTR(-ENOMEM); 795 goto err;
788 796
789 mhp->rhp = rhp; 797 mhp->rhp = rhp;
790 ret = iwch_alloc_pbl(mhp, pbl_depth); 798 ret = iwch_alloc_pbl(mhp, pbl_depth);
791 if (ret) { 799 if (ret)
792 kfree(mhp); 800 goto err1;
793 return ERR_PTR(ret);
794 }
795 mhp->attr.pbl_size = pbl_depth; 801 mhp->attr.pbl_size = pbl_depth;
796 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, 802 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
797 mhp->attr.pbl_size, mhp->attr.pbl_addr); 803 mhp->attr.pbl_size, mhp->attr.pbl_addr);
798 if (ret) { 804 if (ret)
799 iwch_free_pbl(mhp); 805 goto err2;
800 kfree(mhp);
801 return ERR_PTR(ret);
802 }
803 mhp->attr.pdid = php->pdid; 806 mhp->attr.pdid = php->pdid;
804 mhp->attr.type = TPT_NON_SHARED_MR; 807 mhp->attr.type = TPT_NON_SHARED_MR;
805 mhp->attr.stag = stag; 808 mhp->attr.stag = stag;
806 mhp->attr.state = 1; 809 mhp->attr.state = 1;
807 mmid = (stag) >> 8; 810 mmid = (stag) >> 8;
808 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 811 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
809 insert_handle(rhp, &rhp->mmidr, mhp, mmid); 812 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
813 goto err3;
814
810 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 815 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
811 return &(mhp->ibmr); 816 return &(mhp->ibmr);
817err3:
818 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
819 mhp->attr.pbl_addr);
820err2:
821 iwch_free_pbl(mhp);
822err1:
823 kfree(mhp);
824err:
825 return ERR_PTR(ret);
812} 826}
813 827
814static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( 828static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
961 spin_lock_init(&qhp->lock); 975 spin_lock_init(&qhp->lock);
962 init_waitqueue_head(&qhp->wait); 976 init_waitqueue_head(&qhp->wait);
963 atomic_set(&qhp->refcnt, 1); 977 atomic_set(&qhp->refcnt, 1);
964 insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); 978
979 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
980 cxio_destroy_qp(&rhp->rdev, &qhp->wq,
981 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
982 kfree(qhp);
983 return ERR_PTR(-ENOMEM);
984 }
965 985
966 if (udata) { 986 if (udata) {
967 987
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
1418bail2: 1438bail2:
1419 ib_unregister_device(&dev->ibdev); 1439 ib_unregister_device(&dev->ibdev);
1420bail1: 1440bail1:
1441 kfree(dev->ibdev.iwcm);
1421 return ret; 1442 return ret;
1422} 1443}
1423 1444
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
1430 device_remove_file(&dev->ibdev.dev, 1451 device_remove_file(&dev->ibdev.dev,
1431 iwch_class_attributes[i]); 1452 iwch_class_attributes[i]);
1432 ib_unregister_device(&dev->ibdev); 1453 ib_unregister_device(&dev->ibdev);
1454 kfree(dev->ibdev.iwcm);
1433 return; 1455 return;
1434} 1456}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 27bbdc8e773a..6e8653471941 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 889 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
890 init_attr.rqe_count = iwch_rqes_posted(qhp); 890 init_attr.rqe_count = iwch_rqes_posted(qhp);
891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 891 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
892 init_attr.chan = qhp->ep->l2t->smt_idx;
892 if (peer2peer) { 893 if (peer2peer) {
893 init_attr.rtr_type = RTR_READ; 894 init_attr.rtr_type = RTR_READ;
894 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) 895 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index fab18a2c74a8..5b635aa5947e 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0028" 55#define HCAD_VERSION "0029"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
64static int ehca_poll_all_eqs = 1; 64static int ehca_poll_all_eqs = 1;
65 65
66int ehca_debug_level = 0; 66int ehca_debug_level = 0;
67int ehca_nr_ports = 2; 67int ehca_nr_ports = -1;
68int ehca_use_hp_mr = 0; 68int ehca_use_hp_mr = 0;
69int ehca_port_act_time = 30; 69int ehca_port_act_time = 30;
70int ehca_static_rate = -1; 70int ehca_static_rate = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
95 "Hardware level (0: autosensing (default), " 95 "Hardware level (0: autosensing (default), "
96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); 96 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
97MODULE_PARM_DESC(nr_ports, 97MODULE_PARM_DESC(nr_ports,
98 "number of connected ports (-1: autodetect, 1: port one only, " 98 "number of connected ports (-1: autodetect (default), "
99 "2: two ports (default)"); 99 "1: port one only, 2: two ports)");
100MODULE_PARM_DESC(use_hp_mr, 100MODULE_PARM_DESC(use_hp_mr,
101 "Use high performance MRs (default: no)"); 101 "Use high performance MRs (default: no)");
102MODULE_PARM_DESC(port_act_time, 102MODULE_PARM_DESC(port_act_time,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 5a3d96f84c79..8fd88cd828fd 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -786,7 +786,11 @@ repoll:
786 wc->slid = cqe->rlid; 786 wc->slid = cqe->rlid;
787 wc->dlid_path_bits = cqe->dlid; 787 wc->dlid_path_bits = cqe->dlid;
788 wc->src_qp = cqe->remote_qp_number; 788 wc->src_qp = cqe->remote_qp_number;
789 wc->wc_flags = cqe->w_completion_flags; 789 /*
790 * HW has "Immed data present" and "GRH present" in bits 6 and 5.
791 * SW defines those in bits 1 and 0, so we can just shift and mask.
792 */
793 wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
790 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data); 794 wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
791 wc->sl = cqe->service_level; 795 wc->sl = cqe->service_level;
792 796
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index c568b28f4e20..8c1213f8916a 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -125,14 +125,30 @@ struct ib_perf {
125 u8 data[192]; 125 u8 data[192];
126} __attribute__ ((packed)); 126} __attribute__ ((packed));
127 127
128/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
129struct tcslfl {
130 u32 tc:8;
131 u32 sl:4;
132 u32 fl:20;
133} __attribute__ ((packed));
134
135/* IP Version/TC/FL packed into 32 bits, as in GRH */
136struct vertcfl {
137 u32 ver:4;
138 u32 tc:8;
139 u32 fl:20;
140} __attribute__ ((packed));
128 141
129static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, 142static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
143 struct ib_wc *in_wc, struct ib_grh *in_grh,
130 struct ib_mad *in_mad, struct ib_mad *out_mad) 144 struct ib_mad *in_mad, struct ib_mad *out_mad)
131{ 145{
132 struct ib_perf *in_perf = (struct ib_perf *)in_mad; 146 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
133 struct ib_perf *out_perf = (struct ib_perf *)out_mad; 147 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
134 struct ib_class_port_info *poi = 148 struct ib_class_port_info *poi =
135 (struct ib_class_port_info *)out_perf->data; 149 (struct ib_class_port_info *)out_perf->data;
150 struct tcslfl *tcslfl =
151 (struct tcslfl *)&poi->redirect_tcslfl;
136 struct ehca_shca *shca = 152 struct ehca_shca *shca =
137 container_of(ibdev, struct ehca_shca, ib_device); 153 container_of(ibdev, struct ehca_shca, ib_device);
138 struct ehca_sport *sport = &shca->sport[port_num - 1]; 154 struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
158 poi->base_version = 1; 174 poi->base_version = 1;
159 poi->class_version = 1; 175 poi->class_version = 1;
160 poi->resp_time_value = 18; 176 poi->resp_time_value = 18;
161 poi->redirect_lid = sport->saved_attr.lid; 177
162 poi->redirect_qp = sport->pma_qp_nr; 178 /* copy local routing information from WC where applicable */
179 tcslfl->sl = in_wc->sl;
180 poi->redirect_lid =
181 sport->saved_attr.lid | in_wc->dlid_path_bits;
182 poi->redirect_qp = sport->pma_qp_nr;
163 poi->redirect_qkey = IB_QP1_QKEY; 183 poi->redirect_qkey = IB_QP1_QKEY;
164 poi->redirect_pkey = IB_DEFAULT_PKEY_FULL; 184
185 ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
186 &poi->redirect_pkey);
187
188 /* if request was globally routed, copy route info */
189 if (in_grh) {
190 struct vertcfl *vertcfl =
191 (struct vertcfl *)&in_grh->version_tclass_flow;
192 memcpy(poi->redirect_gid, in_grh->dgid.raw,
193 sizeof(poi->redirect_gid));
194 tcslfl->tc = vertcfl->tc;
195 tcslfl->fl = vertcfl->fl;
196 } else
197 /* else only fill in default GID */
198 ehca_query_gid(ibdev, port_num, 0,
199 (union ib_gid *)&poi->redirect_gid);
165 200
166 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", 201 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
167 sport->saved_attr.lid, sport->pma_qp_nr); 202 sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
183 218
184int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 219int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
185 struct ib_wc *in_wc, struct ib_grh *in_grh, 220 struct ib_wc *in_wc, struct ib_grh *in_grh,
186 struct ib_mad *in_mad, 221 struct ib_mad *in_mad, struct ib_mad *out_mad)
187 struct ib_mad *out_mad)
188{ 222{
189 int ret; 223 int ret;
190 224
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
196 return IB_MAD_RESULT_SUCCESS; 230 return IB_MAD_RESULT_SUCCESS;
197 231
198 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); 232 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
199 ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad); 233 ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
234 in_mad, out_mad);
200 235
201 return ret; 236 return ret;
202} 237}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 23173982b32c..38a287006612 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
1616 pd->port_cnt = 1; 1616 pd->port_cnt = 1;
1617 port_fp(fp) = pd; 1617 port_fp(fp) = pd;
1618 pd->port_pid = get_pid(task_pid(current)); 1618 pd->port_pid = get_pid(task_pid(current));
1619 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1619 strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1620 ipath_stats.sps_ports++; 1620 ipath_stats.sps_ports++;
1621 ret = 0; 1621 ret = 0;
1622 } else 1622 } else
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 16a702d46018..ceb98ee78666 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
60 if (smp->attr_mod) 60 if (smp->attr_mod)
61 smp->status |= IB_SMP_INVALID_FIELD; 61 smp->status |= IB_SMP_INVALID_FIELD;
62 62
63 strncpy(smp->data, ibdev->node_desc, sizeof(smp->data)); 63 memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
64 64
65 return reply(smp); 65 return reply(smp);
66} 66}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ae3d7590346e..3cb3f47a10b8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
342 struct mlx4_ib_alloc_ucontext_resp resp; 342 struct mlx4_ib_alloc_ucontext_resp resp;
343 int err; 343 int err;
344 344
345 if (!dev->ib_active)
346 return ERR_PTR(-EAGAIN);
347
345 resp.qp_tab_size = dev->dev->caps.num_qps; 348 resp.qp_tab_size = dev->dev->caps.num_qps;
346 resp.bf_reg_size = dev->dev->caps.bf_reg_size; 349 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
347 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; 350 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
540 543
541static void *mlx4_ib_add(struct mlx4_dev *dev) 544static void *mlx4_ib_add(struct mlx4_dev *dev)
542{ 545{
543 static int mlx4_ib_version_printed;
544 struct mlx4_ib_dev *ibdev; 546 struct mlx4_ib_dev *ibdev;
545 int num_ports = 0; 547 int num_ports = 0;
546 int i; 548 int i;
547 549
548 if (!mlx4_ib_version_printed) { 550 printk_once(KERN_INFO "%s", mlx4_ib_version);
549 printk(KERN_INFO "%s", mlx4_ib_version);
550 ++mlx4_ib_version_printed;
551 }
552 551
553 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 552 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
554 num_ports++; 553 num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
673 goto err_reg; 672 goto err_reg;
674 } 673 }
675 674
675 ibdev->ib_active = true;
676
676 return ibdev; 677 return ibdev;
677 678
678err_reg: 679err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
729 break; 730 break;
730 731
731 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: 732 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
733 ibdev->ib_active = false;
732 ibev.event = IB_EVENT_DEVICE_FATAL; 734 ibev.event = IB_EVENT_DEVICE_FATAL;
733 break; 735 break;
734 736
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 8a7dd6795fa0..3486d7675e56 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
175 spinlock_t sm_lock; 175 spinlock_t sm_lock;
176 176
177 struct mutex cap_mask_mutex; 177 struct mutex cap_mask_mutex;
178 bool ib_active;
178}; 179};
179 180
180static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 181static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index c4a02648c8af..219b10397b4d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
615} 615}
616 616
617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 617static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
618 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
618{ 619{
619 if (send_cq == recv_cq) 620 if (send_cq == recv_cq) {
620 spin_lock_irq(&send_cq->lock); 621 spin_lock_irq(&send_cq->lock);
621 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 622 __acquire(&recv_cq->lock);
623 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
622 spin_lock_irq(&send_cq->lock); 624 spin_lock_irq(&send_cq->lock);
623 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 625 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
624 } else { 626 } else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
628} 630}
629 631
630static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) 632static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
633 __releases(&send_cq->lock) __releases(&recv_cq->lock)
631{ 634{
632 if (send_cq == recv_cq) 635 if (send_cq == recv_cq) {
636 __release(&recv_cq->lock);
633 spin_unlock_irq(&send_cq->lock); 637 spin_unlock_irq(&send_cq->lock);
634 else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 638 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
635 spin_unlock(&recv_cq->lock); 639 spin_unlock(&recv_cq->lock);
636 spin_unlock_irq(&send_cq->lock); 640 spin_unlock_irq(&send_cq->lock);
637 } else { 641 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 65ad359fdf16..056b2a4c6970 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
88 event.device = &dev->ib_dev; 88 event.device = &dev->ib_dev;
89 event.event = IB_EVENT_DEVICE_FATAL; 89 event.event = IB_EVENT_DEVICE_FATAL;
90 event.element.port_num = 0; 90 event.element.port_num = 0;
91 dev->active = false;
91 92
92 ib_dispatch_event(&event); 93 ib_dispatch_event(&event);
93 94
diff --git a/drivers/infiniband/hw/mthca/mthca_config_reg.h b/drivers/infiniband/hw/mthca/mthca_config_reg.h
index 75671f75cac4..155bc66395be 100644
--- a/drivers/infiniband/hw/mthca/mthca_config_reg.h
+++ b/drivers/infiniband/hw/mthca/mthca_config_reg.h
@@ -34,8 +34,6 @@
34#ifndef MTHCA_CONFIG_REG_H 34#ifndef MTHCA_CONFIG_REG_H
35#define MTHCA_CONFIG_REG_H 35#define MTHCA_CONFIG_REG_H
36 36
37#include <asm/page.h>
38
39#define MTHCA_HCR_BASE 0x80680 37#define MTHCA_HCR_BASE 0x80680
40#define MTHCA_HCR_SIZE 0x0001c 38#define MTHCA_HCR_SIZE 0x0001c
41#define MTHCA_ECR_BASE 0x80700 39#define MTHCA_ECR_BASE 0x80700
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 9ef611f6dd36..7e6a6d64ad4e 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -357,6 +357,7 @@ struct mthca_dev {
357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; 357 struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
358 spinlock_t sm_lock; 358 spinlock_t sm_lock;
359 u8 rate[MTHCA_MAX_PORTS]; 359 u8 rate[MTHCA_MAX_PORTS];
360 bool active;
360}; 361};
361 362
362#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG 363#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 90e4e450a120..8c31fa36e95e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
829 829
830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { 830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
831 static const char *eq_name[] = { 831 static const char *eq_name[] = {
832 [MTHCA_EQ_COMP] = DRV_NAME " (comp)", 832 [MTHCA_EQ_COMP] = DRV_NAME "-comp",
833 [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", 833 [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
834 [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" 834 [MTHCA_EQ_CMD] = DRV_NAME "-cmd"
835 }; 835 };
836 836
837 for (i = 0; i < MTHCA_NUM_EQ; ++i) { 837 for (i = 0; i < MTHCA_NUM_EQ; ++i) {
838 snprintf(dev->eq_table.eq[i].irq_name,
839 IB_DEVICE_NAME_MAX,
840 "%s@pci:%s", eq_name[i],
841 pci_name(dev->pdev));
838 err = request_irq(dev->eq_table.eq[i].msi_x_vector, 842 err = request_irq(dev->eq_table.eq[i].msi_x_vector,
839 mthca_is_memfree(dev) ? 843 mthca_is_memfree(dev) ?
840 mthca_arbel_msi_x_interrupt : 844 mthca_arbel_msi_x_interrupt :
841 mthca_tavor_msi_x_interrupt, 845 mthca_tavor_msi_x_interrupt,
842 0, eq_name[i], dev->eq_table.eq + i); 846 0, dev->eq_table.eq[i].irq_name,
847 dev->eq_table.eq + i);
843 if (err) 848 if (err)
844 goto err_out_cmd; 849 goto err_out_cmd;
845 dev->eq_table.eq[i].have_irq = 1; 850 dev->eq_table.eq[i].have_irq = 1;
846 } 851 }
847 } else { 852 } else {
853 snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
854 DRV_NAME "@pci:%s", pci_name(dev->pdev));
848 err = request_irq(dev->pdev->irq, 855 err = request_irq(dev->pdev->irq,
849 mthca_is_memfree(dev) ? 856 mthca_is_memfree(dev) ?
850 mthca_arbel_interrupt : 857 mthca_arbel_interrupt :
851 mthca_tavor_interrupt, 858 mthca_tavor_interrupt,
852 IRQF_SHARED, DRV_NAME, dev); 859 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
853 if (err) 860 if (err)
854 goto err_out_cmd; 861 goto err_out_cmd;
855 dev->eq_table.have_irq = 1; 862 dev->eq_table.have_irq = 1;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 13da9f1d24c0..b01b28987874 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1116 pci_set_drvdata(pdev, mdev); 1116 pci_set_drvdata(pdev, mdev);
1117 mdev->hca_type = hca_type; 1117 mdev->hca_type = hca_type;
1118 1118
1119 mdev->active = true;
1120
1119 return 0; 1121 return 0;
1120 1122
1121err_unregister: 1123err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
1215static int __devinit mthca_init_one(struct pci_dev *pdev, 1217static int __devinit mthca_init_one(struct pci_dev *pdev,
1216 const struct pci_device_id *id) 1218 const struct pci_device_id *id)
1217{ 1219{
1218 static int mthca_version_printed = 0;
1219 int ret; 1220 int ret;
1220 1221
1221 mutex_lock(&mthca_device_mutex); 1222 mutex_lock(&mthca_device_mutex);
1222 1223
1223 if (!mthca_version_printed) { 1224 printk_once(KERN_INFO "%s", mthca_version);
1224 printk(KERN_INFO "%s", mthca_version);
1225 ++mthca_version_printed;
1226 }
1227 1225
1228 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { 1226 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1229 printk(KERN_ERR PFX "%s has invalid driver data %lx\n", 1227 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 87ad889e367b..bcf7a4014820 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
334 struct mthca_ucontext *context; 334 struct mthca_ucontext *context;
335 int err; 335 int err;
336 336
337 if (!(to_mdev(ibdev)->active))
338 return ERR_PTR(-EAGAIN);
339
337 memset(&uresp, 0, sizeof uresp); 340 memset(&uresp, 0, sizeof uresp);
338 341
339 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 342 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index c621f8794b88..90f4c4d2e983 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -113,6 +113,7 @@ struct mthca_eq {
113 int nent; 113 int nent;
114 struct mthca_buf_list *page_list; 114 struct mthca_buf_list *page_list;
115 struct mthca_mr mr; 115 struct mthca_mr mr;
116 char irq_name[IB_DEVICE_NAME_MAX];
116}; 117};
117 118
118struct mthca_av; 119struct mthca_av;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index f5081bfde6db..c10576fa60c1 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1319} 1319}
1320 1320
1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1321static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1322 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1322{ 1323{
1323 if (send_cq == recv_cq) 1324 if (send_cq == recv_cq) {
1324 spin_lock_irq(&send_cq->lock); 1325 spin_lock_irq(&send_cq->lock);
1325 else if (send_cq->cqn < recv_cq->cqn) { 1326 __acquire(&recv_cq->lock);
1327 } else if (send_cq->cqn < recv_cq->cqn) {
1326 spin_lock_irq(&send_cq->lock); 1328 spin_lock_irq(&send_cq->lock);
1327 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1329 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1328 } else { 1330 } else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1332} 1334}
1333 1335
1334static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1336static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1337 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1335{ 1338{
1336 if (send_cq == recv_cq) 1339 if (send_cq == recv_cq) {
1340 __release(&recv_cq->lock);
1337 spin_unlock_irq(&send_cq->lock); 1341 spin_unlock_irq(&send_cq->lock);
1338 else if (send_cq->cqn < recv_cq->cqn) { 1342 } else if (send_cq->cqn < recv_cq->cqn) {
1339 spin_unlock(&recv_cq->lock); 1343 spin_unlock(&recv_cq->lock);
1340 spin_unlock_irq(&send_cq->lock); 1344 spin_unlock_irq(&send_cq->lock);
1341 } else { 1345 } else {
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index acb6817f6060..2a13a163d337 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -30,7 +30,6 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/init.h>
34#include <linux/errno.h> 33#include <linux/errno.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/delay.h> 35#include <linux/delay.h>
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bf1720f7f35f..bcc6abc4faff 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
523void nes_cm_disconn_worker(void *); 523void nes_cm_disconn_worker(void *);
524 524
525/* nes_verbs.c */ 525/* nes_verbs.c */
526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32); 526int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); 527int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
528struct nes_ib_device *nes_init_ofa_device(struct net_device *); 528struct nes_ib_device *nes_init_ofa_device(struct net_device *);
529void nes_destroy_ofa_device(struct nes_ib_device *); 529void nes_destroy_ofa_device(struct nes_ib_device *);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 114b802771ad..73473db19863 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
2450 */ 2450 */
2451int nes_cm_disconn(struct nes_qp *nesqp) 2451int nes_cm_disconn(struct nes_qp *nesqp)
2452{ 2452{
2453 unsigned long flags; 2453 struct disconn_work *work;
2454
2455 spin_lock_irqsave(&nesqp->lock, flags);
2456 if (nesqp->disconn_pending == 0) {
2457 nesqp->disconn_pending++;
2458 spin_unlock_irqrestore(&nesqp->lock, flags);
2459 /* init our disconnect work element, to */
2460 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
2461 2454
2462 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); 2455 work = kzalloc(sizeof *work, GFP_ATOMIC);
2463 } else 2456 if (!work)
2464 spin_unlock_irqrestore(&nesqp->lock, flags); 2457 return -ENOMEM; /* Timer will clean up */
2465 2458
2459 nes_add_ref(&nesqp->ibqp);
2460 work->nesqp = nesqp;
2461 INIT_WORK(&work->work, nes_disconnect_worker);
2462 queue_work(g_cm_core->disconn_wq, &work->work);
2466 return 0; 2463 return 0;
2467} 2464}
2468 2465
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2472 */ 2469 */
2473static void nes_disconnect_worker(struct work_struct *work) 2470static void nes_disconnect_worker(struct work_struct *work)
2474{ 2471{
2475 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work); 2472 struct disconn_work *dwork = container_of(work, struct disconn_work, work);
2473 struct nes_qp *nesqp = dwork->nesqp;
2476 2474
2475 kfree(dwork);
2477 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n", 2476 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
2478 nesqp->last_aeq, nesqp->hwqp.qp_id); 2477 nesqp->last_aeq, nesqp->hwqp.qp_id);
2479 nes_cm_disconn_true(nesqp); 2478 nes_cm_disconn_true(nesqp);
2479 nes_rem_ref(&nesqp->ibqp);
2480} 2480}
2481 2481
2482 2482
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2493 u16 last_ae; 2493 u16 last_ae;
2494 u8 original_hw_tcp_state; 2494 u8 original_hw_tcp_state;
2495 u8 original_ibqp_state; 2495 u8 original_ibqp_state;
2496 u8 issued_disconnect_reset = 0; 2496 enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
2497 int issue_disconn = 0;
2498 int issue_close = 0;
2499 int issue_flush = 0;
2500 u32 flush_q = NES_CQP_FLUSH_RQ;
2501 struct ib_event ibevent;
2497 2502
2498 if (!nesqp) { 2503 if (!nesqp) {
2499 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n"); 2504 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2517 original_ibqp_state = nesqp->ibqp_state; 2522 original_ibqp_state = nesqp->ibqp_state;
2518 last_ae = nesqp->last_aeq; 2523 last_ae = nesqp->last_aeq;
2519 2524
2525 if (nesqp->term_flags) {
2526 issue_disconn = 1;
2527 issue_close = 1;
2528 nesqp->cm_id = NULL;
2529 if (nesqp->flush_issued == 0) {
2530 nesqp->flush_issued = 1;
2531 issue_flush = 1;
2532 }
2533 } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2534 ((original_ibqp_state == IB_QPS_RTS) &&
2535 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2536 issue_disconn = 1;
2537 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
2538 disconn_status = IW_CM_EVENT_STATUS_RESET;
2539 }
2540
2541 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2542 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2543 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2544 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2545 issue_close = 1;
2546 nesqp->cm_id = NULL;
2547 if (nesqp->flush_issued == 0) {
2548 nesqp->flush_issued = 1;
2549 issue_flush = 1;
2550 }
2551 }
2552
2553 spin_unlock_irqrestore(&nesqp->lock, flags);
2520 2554
2521 nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state); 2555 if ((issue_flush) && (nesqp->destroyed == 0)) {
2556 /* Flush the queue(s) */
2557 if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
2558 flush_q |= NES_CQP_FLUSH_SQ;
2559 flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
2522 2560
2523 if ((nesqp->cm_id) && (cm_id->event_handler)) { 2561 if (nesqp->term_flags) {
2524 if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || 2562 ibevent.device = nesqp->ibqp.device;
2525 ((original_ibqp_state == IB_QPS_RTS) && 2563 ibevent.event = nesqp->terminate_eventtype;
2526 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2564 ibevent.element.qp = &nesqp->ibqp;
2565 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2566 }
2567 }
2568
2569 if ((cm_id) && (cm_id->event_handler)) {
2570 if (issue_disconn) {
2527 atomic_inc(&cm_disconnects); 2571 atomic_inc(&cm_disconnects);
2528 cm_event.event = IW_CM_EVENT_DISCONNECT; 2572 cm_event.event = IW_CM_EVENT_DISCONNECT;
2529 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { 2573 cm_event.status = disconn_status;
2530 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2531 nes_debug(NES_DBG_CM, "Generating a CM "
2532 "Disconnect Event (status reset) for "
2533 "QP%u, cm_id = %p. \n",
2534 nesqp->hwqp.qp_id, cm_id);
2535 } else
2536 cm_event.status = IW_CM_EVENT_STATUS_OK;
2537
2538 cm_event.local_addr = cm_id->local_addr; 2574 cm_event.local_addr = cm_id->local_addr;
2539 cm_event.remote_addr = cm_id->remote_addr; 2575 cm_event.remote_addr = cm_id->remote_addr;
2540 cm_event.private_data = NULL; 2576 cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2547 nesqp->hwqp.sq_tail, cm_id, 2583 nesqp->hwqp.sq_tail, cm_id,
2548 atomic_read(&nesqp->refcount)); 2584 atomic_read(&nesqp->refcount));
2549 2585
2550 spin_unlock_irqrestore(&nesqp->lock, flags);
2551 ret = cm_id->event_handler(cm_id, &cm_event); 2586 ret = cm_id->event_handler(cm_id, &cm_event);
2552 if (ret) 2587 if (ret)
2553 nes_debug(NES_DBG_CM, "OFA CM event_handler " 2588 nes_debug(NES_DBG_CM, "OFA CM event_handler "
2554 "returned, ret=%d\n", ret); 2589 "returned, ret=%d\n", ret);
2555 spin_lock_irqsave(&nesqp->lock, flags);
2556 } 2590 }
2557 2591
2558 nesqp->disconn_pending = 0; 2592 if (issue_close) {
2559 /* There might have been another AE while the lock was released */
2560 original_hw_tcp_state = nesqp->hw_tcp_state;
2561 original_ibqp_state = nesqp->ibqp_state;
2562 last_ae = nesqp->last_aeq;
2563
2564 if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
2565 ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2566 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2567 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2568 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2569 atomic_inc(&cm_closes); 2593 atomic_inc(&cm_closes);
2570 nesqp->cm_id = NULL;
2571 nesqp->in_disconnect = 0;
2572 spin_unlock_irqrestore(&nesqp->lock, flags);
2573 nes_disconnect(nesqp, 1); 2594 nes_disconnect(nesqp, 1);
2574 2595
2575 cm_id->provider_data = nesqp; 2596 cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2588 } 2609 }
2589 2610
2590 cm_id->rem_ref(cm_id); 2611 cm_id->rem_ref(cm_id);
2591
2592 spin_lock_irqsave(&nesqp->lock, flags);
2593 if (nesqp->flush_issued == 0) {
2594 nesqp->flush_issued = 1;
2595 spin_unlock_irqrestore(&nesqp->lock, flags);
2596 flush_wqes(nesvnic->nesdev, nesqp,
2597 NES_CQP_FLUSH_RQ, 1);
2598 } else
2599 spin_unlock_irqrestore(&nesqp->lock, flags);
2600 } else {
2601 cm_id = nesqp->cm_id;
2602 spin_unlock_irqrestore(&nesqp->lock, flags);
2603 /* check to see if the inbound reset beat the outbound reset */
2604 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
2605 nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
2606 "due to inbound reset beating the "
2607 "outbound reset.\n", nesqp->hwqp.qp_id);
2608 }
2609 } 2612 }
2610 } else {
2611 nesqp->disconn_pending = 0;
2612 spin_unlock_irqrestore(&nesqp->lock, flags);
2613 } 2613 }
2614 2614
2615 return 0; 2615 return 0;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 8b7e7c0e496e..90e8e4d8a5ce 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -410,8 +410,6 @@ struct nes_cm_ops {
410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *, 410int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
411 enum nes_timer_type, int, int); 411 enum nes_timer_type, int, int);
412 412
413int nes_cm_disconn(struct nes_qp *);
414
415int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *); 413int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
416int nes_reject(struct iw_cm_id *, const void *, u8); 414int nes_reject(struct iw_cm_id *, const void *, u8);
417int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *); 415int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 4a84d02ece06..63a1a8e1e8a3 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
74static void process_critical_error(struct nes_device *nesdev); 74static void process_critical_error(struct nes_device *nesdev);
75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); 75static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); 76static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
77static void nes_terminate_timeout(unsigned long context);
78static void nes_terminate_start_timer(struct nes_qp *nesqp);
77 79
78#ifdef CONFIG_INFINIBAND_NES_DEBUG 80#ifdef CONFIG_INFINIBAND_NES_DEBUG
79static unsigned char *nes_iwarp_state_str[] = { 81static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2903} 2905}
2904 2906
2905 2907
2908static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
2909{
2910 u16 pkt_len;
2911
2912 if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
2913 /* skip over ethernet header */
2914 pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
2915 pkt += ETH_HLEN;
2916
2917 /* Skip over IP and TCP headers */
2918 pkt += 4 * (pkt[0] & 0x0f);
2919 pkt += 4 * ((pkt[12] >> 4) & 0x0f);
2920 }
2921 return pkt;
2922}
2923
2924/* Determine if incoming error pkt is rdma layer */
2925static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
2926{
2927 u8 *pkt;
2928 u16 *mpa;
2929 u32 opcode = 0xffffffff;
2930
2931 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2932 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2933 mpa = (u16 *)locate_mpa(pkt, aeq_info);
2934 opcode = be16_to_cpu(mpa[1]) & 0xf;
2935 }
2936
2937 return opcode;
2938}
2939
2940/* Build iWARP terminate header */
2941static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
2942{
2943 u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
2944 u16 ddp_seg_len;
2945 int copy_len = 0;
2946 u8 is_tagged = 0;
2947 u8 flush_code = 0;
2948 struct nes_terminate_hdr *termhdr;
2949
2950 termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
2951 memset(termhdr, 0, 64);
2952
2953 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
2954
2955 /* Use data from offending packet to fill in ddp & rdma hdrs */
2956 pkt = locate_mpa(pkt, aeq_info);
2957 ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
2958 if (ddp_seg_len) {
2959 copy_len = 2;
2960 termhdr->hdrct = DDP_LEN_FLAG;
2961 if (pkt[2] & 0x80) {
2962 is_tagged = 1;
2963 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
2964 copy_len += TERM_DDP_LEN_TAGGED;
2965 termhdr->hdrct |= DDP_HDR_FLAG;
2966 }
2967 } else {
2968 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
2969 copy_len += TERM_DDP_LEN_UNTAGGED;
2970 termhdr->hdrct |= DDP_HDR_FLAG;
2971 }
2972
2973 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
2974 if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
2975 copy_len += TERM_RDMA_LEN;
2976 termhdr->hdrct |= RDMA_HDR_FLAG;
2977 }
2978 }
2979 }
2980 }
2981 }
2982
2983 switch (async_event_id) {
2984 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
2985 switch (iwarp_opcode(nesqp, aeq_info)) {
2986 case IWARP_OPCODE_WRITE:
2987 flush_code = IB_WC_LOC_PROT_ERR;
2988 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
2989 termhdr->error_code = DDP_TAGGED_INV_STAG;
2990 break;
2991 default:
2992 flush_code = IB_WC_REM_ACCESS_ERR;
2993 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
2994 termhdr->error_code = RDMAP_INV_STAG;
2995 }
2996 break;
2997 case NES_AEQE_AEID_AMP_INVALID_STAG:
2998 flush_code = IB_WC_REM_ACCESS_ERR;
2999 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3000 termhdr->error_code = RDMAP_INV_STAG;
3001 break;
3002 case NES_AEQE_AEID_AMP_BAD_QP:
3003 flush_code = IB_WC_LOC_QP_OP_ERR;
3004 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3005 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3006 break;
3007 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3008 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3009 switch (iwarp_opcode(nesqp, aeq_info)) {
3010 case IWARP_OPCODE_SEND_INV:
3011 case IWARP_OPCODE_SEND_SE_INV:
3012 flush_code = IB_WC_REM_OP_ERR;
3013 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3014 termhdr->error_code = RDMAP_CANT_INV_STAG;
3015 break;
3016 default:
3017 flush_code = IB_WC_REM_ACCESS_ERR;
3018 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3019 termhdr->error_code = RDMAP_INV_STAG;
3020 }
3021 break;
3022 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3023 if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
3024 flush_code = IB_WC_LOC_PROT_ERR;
3025 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3026 termhdr->error_code = DDP_TAGGED_BOUNDS;
3027 } else {
3028 flush_code = IB_WC_REM_ACCESS_ERR;
3029 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3030 termhdr->error_code = RDMAP_INV_BOUNDS;
3031 }
3032 break;
3033 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3034 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3035 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3036 flush_code = IB_WC_REM_ACCESS_ERR;
3037 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3038 termhdr->error_code = RDMAP_ACCESS;
3039 break;
3040 case NES_AEQE_AEID_AMP_TO_WRAP:
3041 flush_code = IB_WC_REM_ACCESS_ERR;
3042 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3043 termhdr->error_code = RDMAP_TO_WRAP;
3044 break;
3045 case NES_AEQE_AEID_AMP_BAD_PD:
3046 switch (iwarp_opcode(nesqp, aeq_info)) {
3047 case IWARP_OPCODE_WRITE:
3048 flush_code = IB_WC_LOC_PROT_ERR;
3049 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3050 termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
3051 break;
3052 case IWARP_OPCODE_SEND_INV:
3053 case IWARP_OPCODE_SEND_SE_INV:
3054 flush_code = IB_WC_REM_ACCESS_ERR;
3055 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3056 termhdr->error_code = RDMAP_CANT_INV_STAG;
3057 break;
3058 default:
3059 flush_code = IB_WC_REM_ACCESS_ERR;
3060 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
3061 termhdr->error_code = RDMAP_UNASSOC_STAG;
3062 }
3063 break;
3064 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3065 flush_code = IB_WC_LOC_LEN_ERR;
3066 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3067 termhdr->error_code = MPA_MARKER;
3068 break;
3069 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3070 flush_code = IB_WC_GENERAL_ERR;
3071 termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
3072 termhdr->error_code = MPA_CRC;
3073 break;
3074 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3075 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3076 flush_code = IB_WC_LOC_LEN_ERR;
3077 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3078 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3079 break;
3080 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3081 case NES_AEQE_AEID_DDP_NO_L_BIT:
3082 flush_code = IB_WC_FATAL_ERR;
3083 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
3084 termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
3085 break;
3086 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3087 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3088 flush_code = IB_WC_GENERAL_ERR;
3089 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3090 termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
3091 break;
3092 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3093 flush_code = IB_WC_LOC_LEN_ERR;
3094 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3095 termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
3096 break;
3097 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3098 flush_code = IB_WC_GENERAL_ERR;
3099 if (is_tagged) {
3100 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
3101 termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
3102 } else {
3103 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3104 termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
3105 }
3106 break;
3107 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3108 flush_code = IB_WC_GENERAL_ERR;
3109 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3110 termhdr->error_code = DDP_UNTAGGED_INV_MO;
3111 break;
3112 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3113 flush_code = IB_WC_REM_OP_ERR;
3114 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3115 termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
3116 break;
3117 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3118 flush_code = IB_WC_GENERAL_ERR;
3119 termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
3120 termhdr->error_code = DDP_UNTAGGED_INV_QN;
3121 break;
3122 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3123 flush_code = IB_WC_GENERAL_ERR;
3124 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3125 termhdr->error_code = RDMAP_INV_RDMAP_VER;
3126 break;
3127 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3128 flush_code = IB_WC_LOC_QP_OP_ERR;
3129 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3130 termhdr->error_code = RDMAP_UNEXPECTED_OP;
3131 break;
3132 default:
3133 flush_code = IB_WC_FATAL_ERR;
3134 termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
3135 termhdr->error_code = RDMAP_UNSPECIFIED;
3136 break;
3137 }
3138
3139 if (copy_len)
3140 memcpy(termhdr + 1, pkt, copy_len);
3141
3142 if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
3143 if (aeq_info & NES_AEQE_SQ)
3144 nesqp->term_sq_flush_code = flush_code;
3145 else
3146 nesqp->term_rq_flush_code = flush_code;
3147 }
3148
3149 return sizeof(struct nes_terminate_hdr) + copy_len;
3150}
3151
3152static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
3153 struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
3154{
3155 u64 context;
3156 unsigned long flags;
3157 u32 aeq_info;
3158 u16 async_event_id;
3159 u8 tcp_state;
3160 u8 iwarp_state;
3161 u32 termlen = 0;
3162 u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
3163 NES_CQP_QP_TERM_DONT_SEND_FIN;
3164 struct nes_adapter *nesadapter = nesdev->nesadapter;
3165
3166 if (nesqp->term_flags & NES_TERM_SENT)
3167 return; /* Sanity check */
3168
3169 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3170 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3171 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3172 async_event_id = (u16)aeq_info;
3173
3174 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
3175 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
3176 if (!context) {
3177 WARN_ON(!context);
3178 return;
3179 }
3180
3181 nesqp = (struct nes_qp *)(unsigned long)context;
3182 spin_lock_irqsave(&nesqp->lock, flags);
3183 nesqp->hw_iwarp_state = iwarp_state;
3184 nesqp->hw_tcp_state = tcp_state;
3185 nesqp->last_aeq = async_event_id;
3186 nesqp->terminate_eventtype = eventtype;
3187 spin_unlock_irqrestore(&nesqp->lock, flags);
3188
3189 if (nesadapter->send_term_ok)
3190 termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
3191 else
3192 mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
3193
3194 nes_terminate_start_timer(nesqp);
3195 nesqp->term_flags |= NES_TERM_SENT;
3196 nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
3197}
3198
3199static void nes_terminate_send_fin(struct nes_device *nesdev,
3200 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3201{
3202 u32 aeq_info;
3203 u16 async_event_id;
3204 u8 tcp_state;
3205 u8 iwarp_state;
3206 unsigned long flags;
3207
3208 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3209 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
3210 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
3211 async_event_id = (u16)aeq_info;
3212
3213 spin_lock_irqsave(&nesqp->lock, flags);
3214 nesqp->hw_iwarp_state = iwarp_state;
3215 nesqp->hw_tcp_state = tcp_state;
3216 nesqp->last_aeq = async_event_id;
3217 spin_unlock_irqrestore(&nesqp->lock, flags);
3218
3219 /* Send the fin only */
3220 nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
3221 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
3222}
3223
3224/* Cleanup after a terminate sent or received */
3225static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
3226{
3227 u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3228 unsigned long flags;
3229 struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
3230 struct nes_device *nesdev = nesvnic->nesdev;
3231 u8 first_time = 0;
3232
3233 spin_lock_irqsave(&nesqp->lock, flags);
3234 if (nesqp->hte_added) {
3235 nesqp->hte_added = 0;
3236 next_iwarp_state |= NES_CQP_QP_DEL_HTE;
3237 }
3238
3239 first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
3240 nesqp->term_flags |= NES_TERM_DONE;
3241 spin_unlock_irqrestore(&nesqp->lock, flags);
3242
3243 /* Make sure we go through this only once */
3244 if (first_time) {
3245 if (timeout_occurred == 0)
3246 del_timer(&nesqp->terminate_timer);
3247 else
3248 next_iwarp_state |= NES_CQP_QP_RESET;
3249
3250 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3251 nes_cm_disconn(nesqp);
3252 }
3253}
3254
3255static void nes_terminate_received(struct nes_device *nesdev,
3256 struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
3257{
3258 u32 aeq_info;
3259 u8 *pkt;
3260 u32 *mpa;
3261 u8 ddp_ctl;
3262 u8 rdma_ctl;
3263 u16 aeq_id = 0;
3264
3265 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
3266 if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
3267 /* Terminate is not a performance path so the silicon */
3268 /* did not validate the frame - do it now */
3269 pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
3270 mpa = (u32 *)locate_mpa(pkt, aeq_info);
3271 ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
3272 rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
3273 if ((ddp_ctl & 0xc0) != 0x40)
3274 aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
3275 else if ((ddp_ctl & 0x03) != 1)
3276 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
3277 else if (be32_to_cpu(mpa[2]) != 2)
3278 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
3279 else if (be32_to_cpu(mpa[3]) != 1)
3280 aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
3281 else if (be32_to_cpu(mpa[4]) != 0)
3282 aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
3283 else if ((rdma_ctl & 0xc0) != 0x40)
3284 aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
3285
3286 if (aeq_id) {
3287 /* Bad terminate recvd - send back a terminate */
3288 aeq_info = (aeq_info & 0xffff0000) | aeq_id;
3289 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3290 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3291 return;
3292 }
3293 }
3294
3295 nesqp->term_flags |= NES_TERM_RCVD;
3296 nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
3297 nes_terminate_start_timer(nesqp);
3298 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3299}
3300
3301/* Timeout routine in case terminate fails to complete */
3302static void nes_terminate_timeout(unsigned long context)
3303{
3304 struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
3305
3306 nes_terminate_done(nesqp, 1);
3307}
3308
3309/* Set a timer in case hw cannot complete the terminate sequence */
3310static void nes_terminate_start_timer(struct nes_qp *nesqp)
3311{
3312 init_timer(&nesqp->terminate_timer);
3313 nesqp->terminate_timer.function = nes_terminate_timeout;
3314 nesqp->terminate_timer.expires = jiffies + HZ;
3315 nesqp->terminate_timer.data = (unsigned long)nesqp;
3316 add_timer(&nesqp->terminate_timer);
3317}
3318
2906/** 3319/**
2907 * nes_process_iwarp_aeqe 3320 * nes_process_iwarp_aeqe
2908 */ 3321 */
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2910 struct nes_hw_aeqe *aeqe) 3323 struct nes_hw_aeqe *aeqe)
2911{ 3324{
2912 u64 context; 3325 u64 context;
2913 u64 aeqe_context = 0;
2914 unsigned long flags; 3326 unsigned long flags;
2915 struct nes_qp *nesqp; 3327 struct nes_qp *nesqp;
3328 struct nes_hw_cq *hw_cq;
3329 struct nes_cq *nescq;
2916 int resource_allocated; 3330 int resource_allocated;
2917 /* struct iw_cm_id *cm_id; */
2918 struct nes_adapter *nesadapter = nesdev->nesadapter; 3331 struct nes_adapter *nesadapter = nesdev->nesadapter;
2919 struct ib_event ibevent;
2920 /* struct iw_cm_event cm_event; */
2921 u32 aeq_info; 3332 u32 aeq_info;
2922 u32 next_iwarp_state = 0; 3333 u32 next_iwarp_state = 0;
2923 u16 async_event_id; 3334 u16 async_event_id;
2924 u8 tcp_state; 3335 u8 tcp_state;
2925 u8 iwarp_state; 3336 u8 iwarp_state;
3337 int must_disconn = 1;
3338 int must_terminate = 0;
3339 struct ib_event ibevent;
2926 3340
2927 nes_debug(NES_DBG_AEQ, "\n"); 3341 nes_debug(NES_DBG_AEQ, "\n");
2928 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]); 3342 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
2929 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) { 3343 if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
2930 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]); 3344 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2931 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32; 3345 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2932 } else { 3346 } else {
2933 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2934 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2935 context = (unsigned long)nesadapter->qp_table[le32_to_cpu( 3347 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
2936 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN]; 3348 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
2937 BUG_ON(!context); 3349 BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2948 3360
2949 switch (async_event_id) { 3361 switch (async_event_id) {
2950 case NES_AEQE_AEID_LLP_FIN_RECEIVED: 3362 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
2951 nesqp = *((struct nes_qp **)&context); 3363 nesqp = (struct nes_qp *)(unsigned long)context;
3364
3365 if (nesqp->term_flags)
3366 return; /* Ignore it, wait for close complete */
3367
2952 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3368 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
2953 nesqp->cm_id->add_ref(nesqp->cm_id); 3369 nesqp->cm_id->add_ref(nesqp->cm_id);
2954 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 3370 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2959 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3375 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
2960 async_event_id, nesqp->last_aeq, tcp_state); 3376 async_event_id, nesqp->last_aeq, tcp_state);
2961 } 3377 }
3378
2962 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || 3379 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2963 (nesqp->ibqp_state != IB_QPS_RTS)) { 3380 (nesqp->ibqp_state != IB_QPS_RTS)) {
2964 /* FIN Received but tcp state or IB state moved on, 3381 /* FIN Received but tcp state or IB state moved on,
2965 should expect a close complete */ 3382 should expect a close complete */
2966 return; 3383 return;
2967 } 3384 }
3385
2968 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: 3386 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
3387 nesqp = (struct nes_qp *)(unsigned long)context;
3388 if (nesqp->term_flags) {
3389 nes_terminate_done(nesqp, 0);
3390 return;
3391 }
3392
2969 case NES_AEQE_AEID_LLP_CONNECTION_RESET: 3393 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
2970 case NES_AEQE_AEID_TERMINATE_SENT:
2971 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
2972 case NES_AEQE_AEID_RESET_SENT: 3394 case NES_AEQE_AEID_RESET_SENT:
2973 nesqp = *((struct nes_qp **)&context); 3395 nesqp = (struct nes_qp *)(unsigned long)context;
2974 if (async_event_id == NES_AEQE_AEID_RESET_SENT) { 3396 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
2975 tcp_state = NES_AEQE_TCP_STATE_CLOSED; 3397 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2976 } 3398 }
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2982 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || 3404 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2983 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { 3405 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
2984 nesqp->hte_added = 0; 3406 nesqp->hte_added = 0;
2985 spin_unlock_irqrestore(&nesqp->lock, flags); 3407 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
2986 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
2987 nesqp->hwqp.qp_id);
2988 nes_hw_modify_qp(nesdev, nesqp,
2989 NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
2990 spin_lock_irqsave(&nesqp->lock, flags);
2991 } 3408 }
2992 3409
2993 if ((nesqp->ibqp_state == IB_QPS_RTS) && 3410 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2999 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; 3416 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3000 break; 3417 break;
3001 case NES_AEQE_IWARP_STATE_TERMINATE: 3418 case NES_AEQE_IWARP_STATE_TERMINATE:
3002 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE; 3419 must_disconn = 0; /* terminate path takes care of disconn */
3003 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE; 3420 if (nesqp->term_flags == 0)
3004 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { 3421 must_terminate = 1;
3005 next_iwarp_state |= 0x02000000;
3006 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3007 }
3008 break; 3422 break;
3009 default:
3010 next_iwarp_state = 0;
3011 }
3012 spin_unlock_irqrestore(&nesqp->lock, flags);
3013 if (next_iwarp_state) {
3014 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3015 " also added another reference\n",
3016 nesqp->hwqp.qp_id, next_iwarp_state);
3017 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3018 } 3423 }
3019 nes_cm_disconn(nesqp);
3020 } else { 3424 } else {
3021 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { 3425 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
3022 /* FIN Received but ib state not RTS, 3426 /* FIN Received but ib state not RTS,
3023 close complete will be on its way */ 3427 close complete will be on its way */
3024 spin_unlock_irqrestore(&nesqp->lock, flags); 3428 must_disconn = 0;
3025 return;
3026 }
3027 spin_unlock_irqrestore(&nesqp->lock, flags);
3028 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
3029 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
3030 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3031 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
3032 " also added another reference\n",
3033 nesqp->hwqp.qp_id, next_iwarp_state);
3034 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3035 } 3429 }
3036 nes_cm_disconn(nesqp);
3037 } 3430 }
3038 break;
3039 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3040 nesqp = *((struct nes_qp **)&context);
3041 spin_lock_irqsave(&nesqp->lock, flags);
3042 nesqp->hw_iwarp_state = iwarp_state;
3043 nesqp->hw_tcp_state = tcp_state;
3044 nesqp->last_aeq = async_event_id;
3045 spin_unlock_irqrestore(&nesqp->lock, flags); 3431 spin_unlock_irqrestore(&nesqp->lock, flags);
3046 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED" 3432
3047 " event on QP%u \n Q2 Data:\n", 3433 if (must_terminate)
3048 nesqp->hwqp.qp_id); 3434 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3049 if (nesqp->ibqp.event_handler) { 3435 else if (must_disconn) {
3050 ibevent.device = nesqp->ibqp.device; 3436 if (next_iwarp_state) {
3051 ibevent.element.qp = &nesqp->ibqp; 3437 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
3052 ibevent.event = IB_EVENT_QP_FATAL; 3438 nesqp->hwqp.qp_id, next_iwarp_state);
3053 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3439 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
3054 } 3440 }
3055 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
3056 ((nesqp->ibqp_state == IB_QPS_RTS)&&
3057 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
3058 nes_cm_disconn(nesqp); 3441 nes_cm_disconn(nesqp);
3059 } else {
3060 nesqp->in_disconnect = 0;
3061 wake_up(&nesqp->kick_waitq);
3062 } 3442 }
3063 break; 3443 break;
3064 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: 3444
3065 nesqp = *((struct nes_qp **)&context); 3445 case NES_AEQE_AEID_TERMINATE_SENT:
3066 spin_lock_irqsave(&nesqp->lock, flags); 3446 nesqp = (struct nes_qp *)(unsigned long)context;
3067 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; 3447 nes_terminate_send_fin(nesdev, nesqp, aeqe);
3068 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3069 nesqp->last_aeq = async_event_id;
3070 if (nesqp->cm_id) {
3071 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3072 " event on QP%u, remote IP = 0x%08X \n",
3073 nesqp->hwqp.qp_id,
3074 ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
3075 } else {
3076 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
3077 " event on QP%u \n",
3078 nesqp->hwqp.qp_id);
3079 }
3080 spin_unlock_irqrestore(&nesqp->lock, flags);
3081 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
3082 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
3083 if (nesqp->ibqp.event_handler) {
3084 ibevent.device = nesqp->ibqp.device;
3085 ibevent.element.qp = &nesqp->ibqp;
3086 ibevent.event = IB_EVENT_QP_FATAL;
3087 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3088 }
3089 break; 3448 break;
3090 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX: 3449
3091 if (NES_AEQE_INBOUND_RDMA&aeq_info) { 3450 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
3092 nesqp = nesadapter->qp_table[le32_to_cpu( 3451 nesqp = (struct nes_qp *)(unsigned long)context;
3093 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3452 nes_terminate_received(nesdev, nesqp, aeqe);
3094 } else {
3095 /* TODO: get the actual WQE and mask off wqe index */
3096 context &= ~((u64)511);
3097 nesqp = *((struct nes_qp **)&context);
3098 }
3099 spin_lock_irqsave(&nesqp->lock, flags);
3100 nesqp->hw_iwarp_state = iwarp_state;
3101 nesqp->hw_tcp_state = tcp_state;
3102 nesqp->last_aeq = async_event_id;
3103 spin_unlock_irqrestore(&nesqp->lock, flags);
3104 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
3105 nesqp->hwqp.qp_id);
3106 if (nesqp->ibqp.event_handler) {
3107 ibevent.device = nesqp->ibqp.device;
3108 ibevent.element.qp = &nesqp->ibqp;
3109 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3110 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3111 }
3112 break; 3453 break;
3454
3455 case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
3456 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
3113 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG: 3457 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
3114 nesqp = *((struct nes_qp **)&context); 3458 case NES_AEQE_AEID_AMP_INVALID_STAG:
3115 spin_lock_irqsave(&nesqp->lock, flags); 3459 case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
3116 nesqp->hw_iwarp_state = iwarp_state; 3460 case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
3117 nesqp->hw_tcp_state = tcp_state;
3118 nesqp->last_aeq = async_event_id;
3119 spin_unlock_irqrestore(&nesqp->lock, flags);
3120 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
3121 nesqp->hwqp.qp_id);
3122 if (nesqp->ibqp.event_handler) {
3123 ibevent.device = nesqp->ibqp.device;
3124 ibevent.element.qp = &nesqp->ibqp;
3125 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3126 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3127 }
3128 break;
3129 case NES_AEQE_AEID_PRIV_OPERATION_DENIED: 3461 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
3130 nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words 3462 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
3131 [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN]; 3463 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3132 spin_lock_irqsave(&nesqp->lock, flags); 3464 case NES_AEQE_AEID_AMP_TO_WRAP:
3133 nesqp->hw_iwarp_state = iwarp_state; 3465 nesqp = (struct nes_qp *)(unsigned long)context;
3134 nesqp->hw_tcp_state = tcp_state; 3466 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
3135 nesqp->last_aeq = async_event_id; 3467 break;
3136 spin_unlock_irqrestore(&nesqp->lock, flags); 3468
3137 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u," 3469 case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
3138 " nesqp = %p, AE reported %p\n", 3470 case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
3139 nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context)); 3471 case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
3140 if (nesqp->ibqp.event_handler) { 3472 case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
3141 ibevent.device = nesqp->ibqp.device; 3473 nesqp = (struct nes_qp *)(unsigned long)context;
3142 ibevent.element.qp = &nesqp->ibqp; 3474 if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
3143 ibevent.event = IB_EVENT_QP_ACCESS_ERR; 3475 aeq_info &= 0xffff0000;
3144 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3476 aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
3477 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
3145 } 3478 }
3479
3480 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
3481 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
3482 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3483 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3484 case NES_AEQE_AEID_AMP_BAD_QP:
3485 case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
3486 case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
3487 case NES_AEQE_AEID_DDP_NO_L_BIT:
3488 case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
3489 case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
3490 case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
3491 case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
3492 case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
3493 case NES_AEQE_AEID_AMP_BAD_PD:
3494 case NES_AEQE_AEID_AMP_FASTREG_SHARED:
3495 case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
3496 case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
3497 case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
3498 case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
3499 case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
3500 case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
3501 case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
3502 case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
3503 case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
3504 case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
3505 case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
3506 case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
3507 case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
3508 case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
3509 case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
3510 case NES_AEQE_AEID_BAD_CLOSE:
3511 case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
3512 case NES_AEQE_AEID_STAG_ZERO_INVALID:
3513 case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
3514 case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
3515 nesqp = (struct nes_qp *)(unsigned long)context;
3516 nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
3146 break; 3517 break;
3518
3147 case NES_AEQE_AEID_CQ_OPERATION_ERROR: 3519 case NES_AEQE_AEID_CQ_OPERATION_ERROR:
3148 context <<= 1; 3520 context <<= 1;
3149 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n", 3521 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3153 if (resource_allocated) { 3525 if (resource_allocated) {
3154 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n", 3526 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
3155 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])); 3527 __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
3528 hw_cq = (struct nes_hw_cq *)(unsigned long)context;
3529 if (hw_cq) {
3530 nescq = container_of(hw_cq, struct nes_cq, hw_cq);
3531 if (nescq->ibcq.event_handler) {
3532 ibevent.device = nescq->ibcq.device;
3533 ibevent.event = IB_EVENT_CQ_ERR;
3534 ibevent.element.cq = &nescq->ibcq;
3535 nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
3536 }
3537 }
3156 } 3538 }
3157 break; 3539 break;
3158 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 3540
3159 nesqp = nesadapter->qp_table[le32_to_cpu(
3160 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
3161 spin_lock_irqsave(&nesqp->lock, flags);
3162 nesqp->hw_iwarp_state = iwarp_state;
3163 nesqp->hw_tcp_state = tcp_state;
3164 nesqp->last_aeq = async_event_id;
3165 spin_unlock_irqrestore(&nesqp->lock, flags);
3166 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
3167 "_FOR_AVAILABLE_BUFFER event on QP%u\n",
3168 nesqp->hwqp.qp_id);
3169 if (nesqp->ibqp.event_handler) {
3170 ibevent.device = nesqp->ibqp.device;
3171 ibevent.element.qp = &nesqp->ibqp;
3172 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3173 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3174 }
3175 /* tell cm to disconnect, cm will queue work to thread */
3176 nes_cm_disconn(nesqp);
3177 break;
3178 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
3179 nesqp = *((struct nes_qp **)&context);
3180 spin_lock_irqsave(&nesqp->lock, flags);
3181 nesqp->hw_iwarp_state = iwarp_state;
3182 nesqp->hw_tcp_state = tcp_state;
3183 nesqp->last_aeq = async_event_id;
3184 spin_unlock_irqrestore(&nesqp->lock, flags);
3185 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
3186 "_NO_BUFFER_AVAILABLE event on QP%u\n",
3187 nesqp->hwqp.qp_id);
3188 if (nesqp->ibqp.event_handler) {
3189 ibevent.device = nesqp->ibqp.device;
3190 ibevent.element.qp = &nesqp->ibqp;
3191 ibevent.event = IB_EVENT_QP_FATAL;
3192 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3193 }
3194 /* tell cm to disconnect, cm will queue work to thread */
3195 nes_cm_disconn(nesqp);
3196 break;
3197 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
3198 nesqp = *((struct nes_qp **)&context);
3199 spin_lock_irqsave(&nesqp->lock, flags);
3200 nesqp->hw_iwarp_state = iwarp_state;
3201 nesqp->hw_tcp_state = tcp_state;
3202 nesqp->last_aeq = async_event_id;
3203 spin_unlock_irqrestore(&nesqp->lock, flags);
3204 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
3205 " event on QP%u \n Q2 Data:\n",
3206 nesqp->hwqp.qp_id);
3207 if (nesqp->ibqp.event_handler) {
3208 ibevent.device = nesqp->ibqp.device;
3209 ibevent.element.qp = &nesqp->ibqp;
3210 ibevent.event = IB_EVENT_QP_FATAL;
3211 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3212 }
3213 /* tell cm to disconnect, cm will queue work to thread */
3214 nes_cm_disconn(nesqp);
3215 break;
3216 /* TODO: additional AEs need to be here */
3217 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3218 nesqp = *((struct nes_qp **)&context);
3219 spin_lock_irqsave(&nesqp->lock, flags);
3220 nesqp->hw_iwarp_state = iwarp_state;
3221 nesqp->hw_tcp_state = tcp_state;
3222 nesqp->last_aeq = async_event_id;
3223 spin_unlock_irqrestore(&nesqp->lock, flags);
3224 if (nesqp->ibqp.event_handler) {
3225 ibevent.device = nesqp->ibqp.device;
3226 ibevent.element.qp = &nesqp->ibqp;
3227 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3228 nesqp->ibqp.event_handler(&ibevent,
3229 nesqp->ibqp.qp_context);
3230 }
3231 nes_cm_disconn(nesqp);
3232 break;
3233 default: 3541 default:
3234 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", 3542 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
3235 async_event_id); 3543 async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3238 3546
3239} 3547}
3240 3548
3241
3242/** 3549/**
3243 * nes_iwarp_ce_handler 3550 * nes_iwarp_ce_handler
3244 */ 3551 */
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3373{ 3680{
3374 struct nes_cqp_request *cqp_request; 3681 struct nes_cqp_request *cqp_request;
3375 struct nes_hw_cqp_wqe *cqp_wqe; 3682 struct nes_hw_cqp_wqe *cqp_wqe;
3683 u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3684 u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
3376 int ret; 3685 int ret;
3377 3686
3378 cqp_request = nes_get_cqp_request(nesdev); 3687 cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3389 cqp_wqe = &cqp_request->cqp_wqe; 3698 cqp_wqe = &cqp_request->cqp_wqe;
3390 nes_fill_init_cqp_wqe(cqp_wqe, nesdev); 3699 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
3391 3700
3701 /* If wqe in error was identified, set code to be put into cqe */
3702 if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
3703 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3704 sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
3705 nesqp->term_sq_flush_code = 0;
3706 }
3707
3708 if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
3709 which_wq |= NES_CQP_FLUSH_MAJ_MIN;
3710 rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
3711 nesqp->term_rq_flush_code = 0;
3712 }
3713
3714 if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
3715 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
3716 cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
3717 }
3718
3392 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = 3719 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
3393 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq); 3720 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
3394 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id); 3721 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index c3654c6383fe..f28a41ba9fa1 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
241}; 241};
242 242
243#define NES_CQP_OP_IWARP_STATE_SHIFT 28 243#define NES_CQP_OP_IWARP_STATE_SHIFT 28
244#define NES_CQP_OP_TERMLEN_SHIFT 28
244 245
245enum nes_cqp_qp_bits { 246enum nes_cqp_qp_bits {
246 NES_CQP_QP_ARP_VALID = (1<<8), 247 NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
265 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT), 266 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
266 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT), 267 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
267 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT), 268 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
269 NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
270 NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
268 NES_CQP_QP_RESET = (1<<31), 271 NES_CQP_QP_RESET = (1<<31),
269}; 272};
270 273
271enum nes_cqp_qp_wqe_word_idx { 274enum nes_cqp_qp_wqe_word_idx {
272 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6, 275 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
273 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7, 276 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
277 NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
278 NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
274 NES_CQP_QP_WQE_NEW_MSS_IDX = 15, 279 NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
275}; 280};
276 281
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
361enum nes_cqp_flush_bits { 366enum nes_cqp_flush_bits {
362 NES_CQP_FLUSH_SQ = (1<<30), 367 NES_CQP_FLUSH_SQ = (1<<30),
363 NES_CQP_FLUSH_RQ = (1<<31), 368 NES_CQP_FLUSH_RQ = (1<<31),
369 NES_CQP_FLUSH_MAJ_MIN = (1<<28),
364}; 370};
365 371
366enum nes_cqe_opcode_bits { 372enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
633 NES_AEQE_INBOUND_RDMA = (1<<19), 639 NES_AEQE_INBOUND_RDMA = (1<<19),
634 NES_AEQE_IWARP_STATE_MASK = (7<<20), 640 NES_AEQE_IWARP_STATE_MASK = (7<<20),
635 NES_AEQE_TCP_STATE_MASK = (0xf<<24), 641 NES_AEQE_TCP_STATE_MASK = (0xf<<24),
642 NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
636 NES_AEQE_VALID = (1<<31), 643 NES_AEQE_VALID = (1<<31),
637}; 644};
638 645
639#define NES_AEQE_IWARP_STATE_SHIFT 20 646#define NES_AEQE_IWARP_STATE_SHIFT 20
640#define NES_AEQE_TCP_STATE_SHIFT 24 647#define NES_AEQE_TCP_STATE_SHIFT 24
648#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
649#define NES_AEQE_Q2_DATA_MPA (1<<29)
641 650
642enum nes_aeqe_iwarp_state { 651enum nes_aeqe_iwarp_state {
643 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0, 652 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
751 NES_IWARP_SQ_OP_NOP = 12, 760 NES_IWARP_SQ_OP_NOP = 12,
752}; 761};
753 762
763enum nes_iwarp_cqe_major_code {
764 NES_IWARP_CQE_MAJOR_FLUSH = 1,
765 NES_IWARP_CQE_MAJOR_DRV = 0x8000
766};
767
768enum nes_iwarp_cqe_minor_code {
769 NES_IWARP_CQE_MINOR_FLUSH = 1
770};
771
754#define NES_EEPROM_READ_REQUEST (1<<16) 772#define NES_EEPROM_READ_REQUEST (1<<16)
755#define NES_MAC_ADDR_VALID (1<<20) 773#define NES_MAC_ADDR_VALID (1<<20)
756 774
@@ -1119,6 +1137,7 @@ struct nes_adapter {
1119 u8 netdev_max; /* from host nic address count in EEPROM */ 1137 u8 netdev_max; /* from host nic address count in EEPROM */
1120 u8 port_count; 1138 u8 port_count;
1121 u8 virtwq; 1139 u8 virtwq;
1140 u8 send_term_ok;
1122 u8 et_use_adaptive_rx_coalesce; 1141 u8 et_use_adaptive_rx_coalesce;
1123 u8 adapter_fcn_count; 1142 u8 adapter_fcn_count;
1124 u8 pft_mcast_map[NES_PFT_SIZE]; 1143 u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
1217 u32 num_pd; 1236 u32 num_pd;
1218}; 1237};
1219 1238
1239enum nes_hdrct_flags {
1240 DDP_LEN_FLAG = 0x80,
1241 DDP_HDR_FLAG = 0x40,
1242 RDMA_HDR_FLAG = 0x20
1243};
1244
1245enum nes_term_layers {
1246 LAYER_RDMA = 0,
1247 LAYER_DDP = 1,
1248 LAYER_MPA = 2
1249};
1250
1251enum nes_term_error_types {
1252 RDMAP_CATASTROPHIC = 0,
1253 RDMAP_REMOTE_PROT = 1,
1254 RDMAP_REMOTE_OP = 2,
1255 DDP_CATASTROPHIC = 0,
1256 DDP_TAGGED_BUFFER = 1,
1257 DDP_UNTAGGED_BUFFER = 2,
1258 DDP_LLP = 3
1259};
1260
1261enum nes_term_rdma_errors {
1262 RDMAP_INV_STAG = 0x00,
1263 RDMAP_INV_BOUNDS = 0x01,
1264 RDMAP_ACCESS = 0x02,
1265 RDMAP_UNASSOC_STAG = 0x03,
1266 RDMAP_TO_WRAP = 0x04,
1267 RDMAP_INV_RDMAP_VER = 0x05,
1268 RDMAP_UNEXPECTED_OP = 0x06,
1269 RDMAP_CATASTROPHIC_LOCAL = 0x07,
1270 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
1271 RDMAP_CANT_INV_STAG = 0x09,
1272 RDMAP_UNSPECIFIED = 0xff
1273};
1274
1275enum nes_term_ddp_errors {
1276 DDP_CATASTROPHIC_LOCAL = 0x00,
1277 DDP_TAGGED_INV_STAG = 0x00,
1278 DDP_TAGGED_BOUNDS = 0x01,
1279 DDP_TAGGED_UNASSOC_STAG = 0x02,
1280 DDP_TAGGED_TO_WRAP = 0x03,
1281 DDP_TAGGED_INV_DDP_VER = 0x04,
1282 DDP_UNTAGGED_INV_QN = 0x01,
1283 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
1284 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
1285 DDP_UNTAGGED_INV_MO = 0x04,
1286 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
1287 DDP_UNTAGGED_INV_DDP_VER = 0x06
1288};
1289
1290enum nes_term_mpa_errors {
1291 MPA_CLOSED = 0x01,
1292 MPA_CRC = 0x02,
1293 MPA_MARKER = 0x03,
1294 MPA_REQ_RSP = 0x04,
1295};
1296
1297struct nes_terminate_hdr {
1298 u8 layer_etype;
1299 u8 error_code;
1300 u8 hdrct;
1301 u8 rsvd;
1302};
1303
1304/* Used to determine how to fill in terminate error codes */
1305#define IWARP_OPCODE_WRITE 0
1306#define IWARP_OPCODE_READREQ 1
1307#define IWARP_OPCODE_READRSP 2
1308#define IWARP_OPCODE_SEND 3
1309#define IWARP_OPCODE_SEND_INV 4
1310#define IWARP_OPCODE_SEND_SE 5
1311#define IWARP_OPCODE_SEND_SE_INV 6
1312#define IWARP_OPCODE_TERM 7
1313
1314/* These values are used only during terminate processing */
1315#define TERM_DDP_LEN_TAGGED 14
1316#define TERM_DDP_LEN_UNTAGGED 18
1317#define TERM_RDMA_LEN 28
1318#define RDMA_OPCODE_MASK 0x0f
1319#define RDMA_READ_REQ_OPCODE 1
1320#define BAD_FRAME_OFFSET 64
1321#define CQE_MAJOR_DRV 0x8000
1322
1220#define nes_vlan_rx vlan_hwaccel_receive_skb 1323#define nes_vlan_rx vlan_hwaccel_receive_skb
1221#define nes_netif_rx netif_receive_skb 1324#define nes_netif_rx netif_receive_skb
1222 1325
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index a282031d15c7..9687c397ce1a 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) { 183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
184 nesadapter->virtwq = 1; 184 nesadapter->virtwq = 1;
185 } 185 }
186 if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
187 nesadapter->send_term_ok = 1;
188
186 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + 189 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
187 (u32)((u8)eeprom_data); 190 (u32)((u8)eeprom_data);
188 191
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
548 spin_unlock_irqrestore(&nesdev->cqp.lock, flags); 551 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
549 } 552 }
550 if (cqp_request == NULL) { 553 if (cqp_request == NULL) {
551 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL); 554 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
552 if (cqp_request) { 555 if (cqp_request) {
553 cqp_request->dynamic = 1; 556 cqp_request->dynamic = 1;
554 INIT_LIST_HEAD(&cqp_request->list); 557 INIT_LIST_HEAD(&cqp_request->list);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 21e0fd336cf7..a680c42d6e8c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
667 */ 667 */
668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) 668static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
669{ 669{
670 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
671 struct net_device *netdev = nesvnic->netdev;
672
670 memset(props, 0, sizeof(*props)); 673 memset(props, 0, sizeof(*props));
671 674
672 props->max_mtu = IB_MTU_2048; 675 props->max_mtu = IB_MTU_4096;
673 props->active_mtu = IB_MTU_2048; 676
677 if (netdev->mtu >= 4096)
678 props->active_mtu = IB_MTU_4096;
679 else if (netdev->mtu >= 2048)
680 props->active_mtu = IB_MTU_2048;
681 else if (netdev->mtu >= 1024)
682 props->active_mtu = IB_MTU_1024;
683 else if (netdev->mtu >= 512)
684 props->active_mtu = IB_MTU_512;
685 else
686 props->active_mtu = IB_MTU_256;
687
674 props->lid = 1; 688 props->lid = 1;
675 props->lmc = 0; 689 props->lmc = 0;
676 props->sm_lid = 0; 690 props->sm_lid = 0;
677 props->sm_sl = 0; 691 props->sm_sl = 0;
678 props->state = IB_PORT_ACTIVE; 692 if (nesvnic->linkup)
693 props->state = IB_PORT_ACTIVE;
694 else
695 props->state = IB_PORT_DOWN;
679 props->phys_state = 0; 696 props->phys_state = 0;
680 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | 697 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
681 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 698 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1506,12 +1523,45 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1506 1523
1507 1524
1508/** 1525/**
1526 * nes_clean_cq
1527 */
1528static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
1529{
1530 u32 cq_head;
1531 u32 lo;
1532 u32 hi;
1533 u64 u64temp;
1534 unsigned long flags = 0;
1535
1536 spin_lock_irqsave(&nescq->lock, flags);
1537
1538 cq_head = nescq->hw_cq.cq_head;
1539 while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
1540 rmb();
1541 lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
1542 hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
1543 u64temp = (((u64)hi) << 32) | ((u64)lo);
1544 u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
1545 if (u64temp == (u64)(unsigned long)nesqp) {
1546 /* Zero the context value so cqe will be ignored */
1547 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
1548 nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
1549 }
1550
1551 if (++cq_head >= nescq->hw_cq.cq_size)
1552 cq_head = 0;
1553 }
1554
1555 spin_unlock_irqrestore(&nescq->lock, flags);
1556}
1557
1558
1559/**
1509 * nes_destroy_qp 1560 * nes_destroy_qp
1510 */ 1561 */
1511static int nes_destroy_qp(struct ib_qp *ibqp) 1562static int nes_destroy_qp(struct ib_qp *ibqp)
1512{ 1563{
1513 struct nes_qp *nesqp = to_nesqp(ibqp); 1564 struct nes_qp *nesqp = to_nesqp(ibqp);
1514 /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
1515 struct nes_ucontext *nes_ucontext; 1565 struct nes_ucontext *nes_ucontext;
1516 struct ib_qp_attr attr; 1566 struct ib_qp_attr attr;
1517 struct iw_cm_id *cm_id; 1567 struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1548 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret); 1598 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
1549 } 1599 }
1550 1600
1551
1552 if (nesqp->user_mode) { 1601 if (nesqp->user_mode) {
1553 if ((ibqp->uobject)&&(ibqp->uobject->context)) { 1602 if ((ibqp->uobject)&&(ibqp->uobject->context)) {
1554 nes_ucontext = to_nesucontext(ibqp->uobject->context); 1603 nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1560 } 1609 }
1561 if (nesqp->pbl_pbase) 1610 if (nesqp->pbl_pbase)
1562 kunmap(nesqp->page); 1611 kunmap(nesqp->page);
1612 } else {
1613 /* Clean any pending completions from the cq(s) */
1614 if (nesqp->nesscq)
1615 nes_clean_cq(nesqp, nesqp->nesscq);
1616
1617 if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
1618 nes_clean_cq(nesqp, nesqp->nesrcq);
1563 } 1619 }
1564 1620
1565 nes_rem_ref(&nesqp->ibqp); 1621 nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2884 * nes_hw_modify_qp 2940 * nes_hw_modify_qp
2885 */ 2941 */
2886int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp, 2942int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2887 u32 next_iwarp_state, u32 wait_completion) 2943 u32 next_iwarp_state, u32 termlen, u32 wait_completion)
2888{ 2944{
2889 struct nes_hw_cqp_wqe *cqp_wqe; 2945 struct nes_hw_cqp_wqe *cqp_wqe;
2890 /* struct iw_cm_id *cm_id = nesqp->cm_id; */ 2946 /* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2916 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id); 2972 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
2917 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase); 2973 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
2918 2974
2975 /* If sending a terminate message, fill in the length (in words) */
2976 if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
2977 !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
2978 termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
2979 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
2980 }
2981
2919 atomic_set(&cqp_request->refcount, 2); 2982 atomic_set(&cqp_request->refcount, 2);
2920 nes_post_cqp_request(nesdev, cqp_request); 2983 nes_post_cqp_request(nesdev, cqp_request);
2921 2984
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3086 } 3149 }
3087 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", 3150 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
3088 nesqp->hwqp.qp_id); 3151 nesqp->hwqp.qp_id);
3152 if (nesqp->term_flags)
3153 del_timer(&nesqp->terminate_timer);
3154
3089 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR; 3155 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3090 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ 3156 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
3091 if (nesqp->hte_added) { 3157 if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3163 3229
3164 if (issue_modify_qp) { 3230 if (issue_modify_qp) {
3165 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n"); 3231 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
3166 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1); 3232 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
3167 if (ret) 3233 if (ret)
3168 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)" 3234 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
3169 " failed for QP%u.\n", 3235 " failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3328 head = nesqp->hwqp.sq_head; 3394 head = nesqp->hwqp.sq_head;
3329 3395
3330 while (ib_wr) { 3396 while (ib_wr) {
3397 /* Check for QP error */
3398 if (nesqp->term_flags) {
3399 err = -EINVAL;
3400 break;
3401 }
3402
3331 /* Check for SQ overflow */ 3403 /* Check for SQ overflow */
3332 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 3404 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3333 err = -EINVAL; 3405 err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3484 head = nesqp->hwqp.rq_head; 3556 head = nesqp->hwqp.rq_head;
3485 3557
3486 while (ib_wr) { 3558 while (ib_wr) {
3559 /* Check for QP error */
3560 if (nesqp->term_flags) {
3561 err = -EINVAL;
3562 break;
3563 }
3564
3487 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) { 3565 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3488 err = -EINVAL; 3566 err = -EINVAL;
3489 break; 3567 break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3547{ 3625{
3548 u64 u64temp; 3626 u64 u64temp;
3549 u64 wrid; 3627 u64 wrid;
3550 /* u64 u64temp; */
3551 unsigned long flags = 0; 3628 unsigned long flags = 0;
3552 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); 3629 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
3553 struct nes_device *nesdev = nesvnic->nesdev; 3630 struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3555 struct nes_qp *nesqp; 3632 struct nes_qp *nesqp;
3556 struct nes_hw_cqe cqe; 3633 struct nes_hw_cqe cqe;
3557 u32 head; 3634 u32 head;
3558 u32 wq_tail; 3635 u32 wq_tail = 0;
3559 u32 cq_size; 3636 u32 cq_size;
3560 u32 cqe_count = 0; 3637 u32 cqe_count = 0;
3561 u32 wqe_index; 3638 u32 wqe_index;
3562 u32 u32temp; 3639 u32 u32temp;
3563 /* u32 counter; */ 3640 u32 move_cq_head = 1;
3641 u32 err_code;
3564 3642
3565 nes_debug(NES_DBG_CQ, "\n"); 3643 nes_debug(NES_DBG_CQ, "\n");
3566 3644
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3570 cq_size = nescq->hw_cq.cq_size; 3648 cq_size = nescq->hw_cq.cq_size;
3571 3649
3572 while (cqe_count < num_entries) { 3650 while (cqe_count < num_entries) {
3573 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3651 if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3574 NES_CQE_VALID) { 3652 NES_CQE_VALID) == 0)
3575 /* 3653 break;
3576 * Make sure we read CQ entry contents *after* 3654
3577 * we've checked the valid bit. 3655 /*
3578 */ 3656 * Make sure we read CQ entry contents *after*
3579 rmb(); 3657 * we've checked the valid bit.
3580 3658 */
3581 cqe = nescq->hw_cq.cq_vbase[head]; 3659 rmb();
3582 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3660
3583 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); 3661 cqe = nescq->hw_cq.cq_vbase[head];
3584 wqe_index = u32temp & 3662 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3585 (nesdev->nesadapter->max_qp_wr - 1); 3663 wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
3586 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1); 3664 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
3587 /* parse CQE, get completion context from WQE (either rq or sq */ 3665 /* parse CQE, get completion context from WQE (either rq or sq) */
3588 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | 3666 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
3589 ((u64)u32temp); 3667 ((u64)u32temp);
3590 nesqp = *((struct nes_qp **)&u64temp); 3668
3669 if (u64temp) {
3670 nesqp = (struct nes_qp *)(unsigned long)u64temp;
3591 memset(entry, 0, sizeof *entry); 3671 memset(entry, 0, sizeof *entry);
3592 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { 3672 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
3593 entry->status = IB_WC_SUCCESS; 3673 entry->status = IB_WC_SUCCESS;
3594 } else { 3674 } else {
3595 entry->status = IB_WC_WR_FLUSH_ERR; 3675 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
3676 if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
3677 entry->status = err_code & 0x0000ffff;
3678
3679 /* The rest of the cqe's will be marked as flushed */
3680 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
3681 cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
3682 NES_IWARP_CQE_MINOR_FLUSH);
3683 } else
3684 entry->status = IB_WC_WR_FLUSH_ERR;
3596 } 3685 }
3597 3686
3598 entry->qp = &nesqp->ibqp; 3687 entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3601 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { 3690 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
3602 if (nesqp->skip_lsmm) { 3691 if (nesqp->skip_lsmm) {
3603 nesqp->skip_lsmm = 0; 3692 nesqp->skip_lsmm = 0;
3604 wq_tail = nesqp->hwqp.sq_tail++; 3693 nesqp->hwqp.sq_tail++;
3605 } 3694 }
3606 3695
3607 /* Working on a SQ Completion*/ 3696 /* Working on a SQ Completion*/
3608 wq_tail = wqe_index; 3697 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3609 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3610 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
3611 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) | 3698 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
3612 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail]. 3699 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
3613 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX]))); 3700 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
3614 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3701 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3615 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]); 3702 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
3616 3703
3617 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3704 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3618 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) { 3705 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
3619 case NES_IWARP_SQ_OP_RDMAW: 3706 case NES_IWARP_SQ_OP_RDMAW:
3620 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n"); 3707 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3623 case NES_IWARP_SQ_OP_RDMAR: 3710 case NES_IWARP_SQ_OP_RDMAR:
3624 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n"); 3711 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
3625 entry->opcode = IB_WC_RDMA_READ; 3712 entry->opcode = IB_WC_RDMA_READ;
3626 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail]. 3713 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
3627 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]); 3714 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
3628 break; 3715 break;
3629 case NES_IWARP_SQ_OP_SENDINV: 3716 case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3634 entry->opcode = IB_WC_SEND; 3721 entry->opcode = IB_WC_SEND;
3635 break; 3722 break;
3636 } 3723 }
3724
3725 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3726 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
3727 move_cq_head = 0;
3728 wq_tail = nesqp->hwqp.sq_tail;
3729 }
3637 } else { 3730 } else {
3638 /* Working on a RQ Completion*/ 3731 /* Working on a RQ Completion*/
3639 wq_tail = wqe_index;
3640 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3641 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); 3732 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
3642 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) | 3733 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
3643 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32); 3734 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
3644 entry->opcode = IB_WC_RECV; 3735 entry->opcode = IB_WC_RECV;
3736
3737 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3738 if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
3739 move_cq_head = 0;
3740 wq_tail = nesqp->hwqp.rq_tail;
3741 }
3645 } 3742 }
3743
3646 entry->wr_id = wrid; 3744 entry->wr_id = wrid;
3745 entry++;
3746 cqe_count++;
3747 }
3647 3748
3749 if (move_cq_head) {
3750 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3648 if (++head >= cq_size) 3751 if (++head >= cq_size)
3649 head = 0; 3752 head = 0;
3650 cqe_count++;
3651 nescq->polled_completions++; 3753 nescq->polled_completions++;
3754
3652 if ((nescq->polled_completions > (cq_size / 2)) || 3755 if ((nescq->polled_completions > (cq_size / 2)) ||
3653 (nescq->polled_completions == 255)) { 3756 (nescq->polled_completions == 255)) {
3654 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes" 3757 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
3655 " are pending %u of %u.\n", 3758 " are pending %u of %u.\n",
3656 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size); 3759 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
3657 nes_write32(nesdev->regs+NES_CQE_ALLOC, 3760 nes_write32(nesdev->regs+NES_CQE_ALLOC,
3658 nescq->hw_cq.cq_number | (nescq->polled_completions << 16)); 3761 nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
3659 nescq->polled_completions = 0; 3762 nescq->polled_completions = 0;
3660 } 3763 }
3661 entry++; 3764 } else {
3662 } else 3765 /* Update the wqe index and set status to flush */
3663 break; 3766 wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3767 wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
3768 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
3769 cpu_to_le32(wqe_index);
3770 move_cq_head = 1; /* ready for next pass */
3771 }
3664 } 3772 }
3665 3773
3666 if (nescq->polled_completions) { 3774 if (nescq->polled_completions) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 41c07f29f7c9..89822d75f82e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -40,6 +40,10 @@ struct nes_device;
40#define NES_MAX_USER_DB_REGIONS 4096 40#define NES_MAX_USER_DB_REGIONS 4096
41#define NES_MAX_USER_WQ_REGIONS 4096 41#define NES_MAX_USER_WQ_REGIONS 4096
42 42
43#define NES_TERM_SENT 0x01
44#define NES_TERM_RCVD 0x02
45#define NES_TERM_DONE 0x04
46
43struct nes_ucontext { 47struct nes_ucontext {
44 struct ib_ucontext ibucontext; 48 struct ib_ucontext ibucontext;
45 struct nes_device *nesdev; 49 struct nes_device *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
119 spinlock_t lock; 123 spinlock_t lock;
120}; 124};
121 125
126struct disconn_work {
127 struct work_struct work;
128 struct nes_qp *nesqp;
129};
130
122struct iw_cm_id; 131struct iw_cm_id;
123struct ietf_mpa_frame; 132struct ietf_mpa_frame;
124 133
@@ -127,7 +136,6 @@ struct nes_qp {
127 void *allocated_buffer; 136 void *allocated_buffer;
128 struct iw_cm_id *cm_id; 137 struct iw_cm_id *cm_id;
129 struct workqueue_struct *wq; 138 struct workqueue_struct *wq;
130 struct work_struct disconn_work;
131 struct nes_cq *nesscq; 139 struct nes_cq *nesscq;
132 struct nes_cq *nesrcq; 140 struct nes_cq *nesrcq;
133 struct nes_pd *nespd; 141 struct nes_pd *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
155 void *pbl_vbase; 163 void *pbl_vbase;
156 dma_addr_t pbl_pbase; 164 dma_addr_t pbl_pbase;
157 struct page *page; 165 struct page *page;
166 struct timer_list terminate_timer;
167 enum ib_event_type terminate_eventtype;
158 wait_queue_head_t kick_waitq; 168 wait_queue_head_t kick_waitq;
159 u16 in_disconnect; 169 u16 in_disconnect;
160 u16 private_data_len; 170 u16 private_data_len;
171 u16 term_sq_flush_code;
172 u16 term_rq_flush_code;
161 u8 active_conn; 173 u8 active_conn;
162 u8 skip_lsmm; 174 u8 skip_lsmm;
163 u8 user_mode; 175 u8 user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
165 u8 hw_iwarp_state; 177 u8 hw_iwarp_state;
166 u8 flush_issued; 178 u8 flush_issued;
167 u8 hw_tcp_state; 179 u8 hw_tcp_state;
168 u8 disconn_pending; 180 u8 term_flags;
169 u8 destroyed; 181 u8 destroyed;
170}; 182};
171#endif /* NES_VERBS_H */ 183#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 181b1f32325f..8f4b4fca2a1d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -31,7 +31,6 @@
31 */ 31 */
32 32
33#include <rdma/ib_cm.h> 33#include <rdma/ib_cm.h>
34#include <rdma/ib_cache.h>
35#include <net/dst.h> 34#include <net/dst.h>
36#include <net/icmp.h> 35#include <net/icmp.h>
37#include <linux/icmpv6.h> 36#include <linux/icmpv6.h>
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e7e5adf84e84..e35f4a0ea9d5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -36,7 +36,6 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38 38
39#include <rdma/ib_cache.h>
40#include <linux/ip.h> 39#include <linux/ip.h>
41#include <linux/tcp.h> 40#include <linux/tcp.h>
42 41
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index e319d91f60a6..2bf5116deec4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
604 skb_queue_len(&neigh->queue)); 604 skb_queue_len(&neigh->queue));
605 goto err_drop; 605 goto err_drop;
606 } 606 }
607 } else 607 } else {
608 spin_unlock_irqrestore(&priv->lock, flags);
608 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha)); 609 ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
610 return;
611 }
609 } else { 612 } else {
610 neigh->ah = NULL; 613 neigh->ah = NULL;
611 614
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
688 ipoib_dbg(priv, "Send unicast ARP to %04x\n", 691 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
689 be16_to_cpu(path->pathrec.dlid)); 692 be16_to_cpu(path->pathrec.dlid));
690 693
694 spin_unlock_irqrestore(&priv->lock, flags);
691 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); 695 ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
696 return;
692 } else if ((path->query || !path_rec_start(dev, path)) && 697 } else if ((path->query || !path_rec_start(dev, path)) &&
693 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 698 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
694 /* put pseudoheader back on for next time */ 699 /* put pseudoheader back on for next time */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index a0e97532e714..25874fc680c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -720,7 +720,9 @@ out:
720 } 720 }
721 } 721 }
722 722
723 spin_unlock_irqrestore(&priv->lock, flags);
723 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); 724 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
725 return;
724 } 726 }
725 727
726unlock: 728unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
758 } 760 }
759} 761}
760 762
763static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
764 const u8 *broadcast)
765{
766 if (addrlen != INFINIBAND_ALEN)
767 return 0;
768 /* reserved QPN, prefix, scope */
769 if (memcmp(addr, broadcast, 6))
770 return 0;
771 /* signature lower, pkey */
772 if (memcmp(addr + 7, broadcast + 7, 3))
773 return 0;
774 return 1;
775}
776
761void ipoib_mcast_restart_task(struct work_struct *work) 777void ipoib_mcast_restart_task(struct work_struct *work)
762{ 778{
763 struct ipoib_dev_priv *priv = 779 struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
791 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 807 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
792 union ib_gid mgid; 808 union ib_gid mgid;
793 809
810 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
811 mclist->dmi_addrlen,
812 dev->broadcast))
813 continue;
814
794 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 815 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
795 816
796 mcast = __ipoib_mcast_find(dev, &mgid); 817 mcast = __ipoib_mcast_find(dev, &mgid);
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 95fe0452dae4..6c6a09b1c0fe 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
880}; 880};
881 881
882/* 882/*
883 * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
884 * release for their volume buttons
885 */
886static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
887 0xae, 0xb0, -1U
888};
889
890/*
883 * Samsung NC10,NC20 with Fn+F? key release not working 891 * Samsung NC10,NC20 with Fn+F? key release not working
884 */ 892 */
885static unsigned int atkbd_samsung_forced_release_keys[] = { 893static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1537 .driver_data = atkbd_hp_zv6100_forced_release_keys, 1545 .driver_data = atkbd_hp_zv6100_forced_release_keys,
1538 }, 1546 },
1539 { 1547 {
1548 .ident = "HP Presario R4000",
1549 .matches = {
1550 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1551 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
1552 },
1553 .callback = atkbd_setup_forced_release,
1554 .driver_data = atkbd_hp_r4000_forced_release_keys,
1555 },
1556 {
1557 .ident = "HP Presario R4100",
1558 .matches = {
1559 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1560 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
1561 },
1562 .callback = atkbd_setup_forced_release,
1563 .driver_data = atkbd_hp_r4000_forced_release_keys,
1564 },
1565 {
1566 .ident = "HP Presario R4200",
1567 .matches = {
1568 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1569 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
1570 },
1571 .callback = atkbd_setup_forced_release,
1572 .driver_data = atkbd_hp_r4000_forced_release_keys,
1573 },
1574 {
1540 .ident = "Inventec Symphony", 1575 .ident = "Inventec Symphony",
1541 .matches = { 1576 .matches = {
1542 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), 1577 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ae04d8a494e5..ccbf23ece8e3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), 382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
383 }, 383 },
384 }, 384 },
385 {
386 .ident = "Acer Aspire 5536",
387 .matches = {
388 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
389 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
390 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
391 },
392 },
385 { } 393 { }
386}; 394};
387 395
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3710ff88fc10..556acff3952f 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
171 */ 171 */
172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); 172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
173 173
174 return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
175 error);
176}
177
178int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
179 unsigned long chunk_size_ulong,
180 char **error)
181{
174 /* Check chunk_size is a power of 2 */ 182 /* Check chunk_size is a power of 2 */
175 if (!is_power_of_2(chunk_size_ulong)) { 183 if (!is_power_of_2(chunk_size_ulong)) {
176 *error = "Chunk size is not a power of 2"; 184 *error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
183 return -EINVAL; 191 return -EINVAL;
184 } 192 }
185 193
194 if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
195 *error = "Chunk size is too high";
196 return -EINVAL;
197 }
198
186 store->chunk_size = chunk_size_ulong; 199 store->chunk_size = chunk_size_ulong;
187 store->chunk_mask = chunk_size_ulong - 1; 200 store->chunk_mask = chunk_size_ulong - 1;
188 store->chunk_shift = ffs(chunk_size_ulong) - 1; 201 store->chunk_shift = ffs(chunk_size_ulong) - 1;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 2442c8c07898..812c71872ba0 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
168int dm_exception_store_type_register(struct dm_exception_store_type *type); 168int dm_exception_store_type_register(struct dm_exception_store_type *type);
169int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 169int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
170 170
171int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 unsigned long chunk_size_ulong,
173 char **error);
174
171int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
172 unsigned *args_used, 176 unsigned *args_used,
173 struct dm_exception_store **store); 177 struct dm_exception_store **store);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index e69b96560997..652bd33109e3 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -21,6 +21,7 @@ struct log_c {
21 struct dm_target *ti; 21 struct dm_target *ti;
22 uint32_t region_size; 22 uint32_t region_size;
23 region_t region_count; 23 region_t region_count;
24 uint64_t luid;
24 char uuid[DM_UUID_LEN]; 25 char uuid[DM_UUID_LEN];
25 26
26 char *usr_argv_str; 27 char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
63 * restored. 64 * restored.
64 */ 65 */
65retry: 66retry:
66 r = dm_consult_userspace(uuid, request_type, data, 67 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
67 data_size, rdata, rdata_size); 68 data_size, rdata, rdata_size);
68 69
69 if (r != -ESRCH) 70 if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
74 set_current_state(TASK_INTERRUPTIBLE); 75 set_current_state(TASK_INTERRUPTIBLE);
75 schedule_timeout(2*HZ); 76 schedule_timeout(2*HZ);
76 DMWARN("Attempting to contact userspace log server..."); 77 DMWARN("Attempting to contact userspace log server...");
77 r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, 78 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
79 lc->usr_argv_str,
78 strlen(lc->usr_argv_str) + 1, 80 strlen(lc->usr_argv_str) + 1,
79 NULL, NULL); 81 NULL, NULL);
80 if (!r) 82 if (!r)
81 break; 83 break;
82 } 84 }
83 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); 85 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
84 r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, 86 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
85 0, NULL, NULL); 87 0, NULL, NULL);
86 if (!r) 88 if (!r)
87 goto retry; 89 goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
111 return -ENOMEM; 113 return -ENOMEM;
112 } 114 }
113 115
114 for (i = 0, str_size = 0; i < argc; i++) 116 str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
115 str_size += sprintf(str + str_size, "%s ", argv[i]); 117 for (i = 0; i < argc; i++)
116 str_size += sprintf(str + str_size, "%llu", 118 str_size += sprintf(str + str_size, " %s", argv[i]);
117 (unsigned long long)ti->len);
118 119
119 *ctr_str = str; 120 *ctr_str = str;
120 return str_size; 121 return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
154 return -ENOMEM; 155 return -ENOMEM;
155 } 156 }
156 157
158 /* The ptr value is sufficient for local unique id */
159 lc->luid = (uint64_t)lc;
160
157 lc->ti = ti; 161 lc->ti = ti;
158 162
159 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { 163 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
173 } 177 }
174 178
175 /* Send table string */ 179 /* Send table string */
176 r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, 180 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
177 ctr_str, str_size, NULL, NULL); 181 ctr_str, str_size, NULL, NULL);
178 182
179 if (r == -ESRCH) { 183 if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
183 187
184 /* Since the region size does not change, get it now */ 188 /* Since the region size does not change, get it now */
185 rdata_size = sizeof(rdata); 189 rdata_size = sizeof(rdata);
186 r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, 190 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
187 NULL, 0, (char *)&rdata, &rdata_size); 191 NULL, 0, (char *)&rdata, &rdata_size);
188 192
189 if (r) { 193 if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
212 int r; 216 int r;
213 struct log_c *lc = log->context; 217 struct log_c *lc = log->context;
214 218
215 r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, 219 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
216 NULL, 0, 220 NULL, 0,
217 NULL, NULL); 221 NULL, NULL);
218 222
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
227 int r; 231 int r;
228 struct log_c *lc = log->context; 232 struct log_c *lc = log->context;
229 233
230 r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, 234 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
231 NULL, 0, 235 NULL, 0,
232 NULL, NULL); 236 NULL, NULL);
233 237
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
239 int r; 243 int r;
240 struct log_c *lc = log->context; 244 struct log_c *lc = log->context;
241 245
242 r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, 246 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
243 NULL, 0, 247 NULL, 0,
244 NULL, NULL); 248 NULL, NULL);
245 249
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
252 struct log_c *lc = log->context; 256 struct log_c *lc = log->context;
253 257
254 lc->in_sync_hint = 0; 258 lc->in_sync_hint = 0;
255 r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, 259 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
256 NULL, 0, 260 NULL, 0,
257 NULL, NULL); 261 NULL, NULL);
258 262
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
561 char *result, unsigned maxlen) 565 char *result, unsigned maxlen)
562{ 566{
563 int r = 0; 567 int r = 0;
568 char *table_args;
564 size_t sz = (size_t)maxlen; 569 size_t sz = (size_t)maxlen;
565 struct log_c *lc = log->context; 570 struct log_c *lc = log->context;
566 571
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
577 break; 582 break;
578 case STATUSTYPE_TABLE: 583 case STATUSTYPE_TABLE:
579 sz = 0; 584 sz = 0;
580 DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, 585 table_args = strchr(lc->usr_argv_str, ' ');
581 lc->uuid, lc->usr_argv_str); 586 BUG_ON(!table_args); /* There will always be a ' ' */
587 table_args++;
588
589 DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
590 lc->uuid, table_args);
582 break; 591 break;
583 } 592 }
584 return (r) ? 0 : (int)sz; 593 return (r) ? 0 : (int)sz;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 8ce74d95ae4d..ba0edad2d048 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
147 147
148/** 148/**
149 * dm_consult_userspace 149 * dm_consult_userspace
150 * @uuid: log's uuid (must be DM_UUID_LEN in size) 150 * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
151 * @luid: log's local unique identifier
151 * @request_type: found in include/linux/dm-log-userspace.h 152 * @request_type: found in include/linux/dm-log-userspace.h
152 * @data: data to tx to the server 153 * @data: data to tx to the server
153 * @data_size: size of data in bytes 154 * @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
163 * 164 *
164 * Returns: 0 on success, -EXXX on failure 165 * Returns: 0 on success, -EXXX on failure
165 **/ 166 **/
166int dm_consult_userspace(const char *uuid, int request_type, 167int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
167 char *data, size_t data_size, 168 char *data, size_t data_size,
168 char *rdata, size_t *rdata_size) 169 char *rdata, size_t *rdata_size)
169{ 170{
@@ -190,6 +191,7 @@ resend:
190 191
191 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 192 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
192 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 193 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
194 tfr->luid = luid;
193 tfr->seq = dm_ulog_seq++; 195 tfr->seq = dm_ulog_seq++;
194 196
195 /* 197 /*
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index c26d8e4e2710..04ee874f9153 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -11,7 +11,7 @@
11 11
12int dm_ulog_tfr_init(void); 12int dm_ulog_tfr_init(void);
13void dm_ulog_tfr_exit(void); 13void dm_ulog_tfr_exit(void);
14int dm_consult_userspace(const char *uuid, int request_type, 14int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
15 char *data, size_t data_size, 15 char *data, size_t data_size,
16 char *rdata, size_t *rdata_size); 16 char *rdata, size_t *rdata_size);
17 17
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9726577cde49..33f179e66bf5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
648 */ 648 */
649 dm_rh_inc_pending(ms->rh, &sync); 649 dm_rh_inc_pending(ms->rh, &sync);
650 dm_rh_inc_pending(ms->rh, &nosync); 650 dm_rh_inc_pending(ms->rh, &nosync);
651 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; 651
652 /*
653 * If the flush fails on a previous call and succeeds here,
654 * we must not reset the log_failure variable. We need
655 * userspace interaction to do that.
656 */
657 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
652 658
653 /* 659 /*
654 * Dispatch io. 660 * Dispatch io.
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 6e3fe4f14934..d5b2e08750d5 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -106,6 +106,13 @@ struct pstore {
106 void *zero_area; 106 void *zero_area;
107 107
108 /* 108 /*
109 * An area used for header. The header can be written
110 * concurrently with metadata (when invalidating the snapshot),
111 * so it needs a separate buffer.
112 */
113 void *header_area;
114
115 /*
109 * Used to keep track of which metadata area the data in 116 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to. 117 * 'chunk' refers to.
111 */ 118 */
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
148 */ 155 */
149 ps->area = vmalloc(len); 156 ps->area = vmalloc(len);
150 if (!ps->area) 157 if (!ps->area)
151 return r; 158 goto err_area;
152 159
153 ps->zero_area = vmalloc(len); 160 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) { 161 if (!ps->zero_area)
155 vfree(ps->area); 162 goto err_zero_area;
156 return r;
157 }
158 memset(ps->zero_area, 0, len); 163 memset(ps->zero_area, 0, len);
159 164
165 ps->header_area = vmalloc(len);
166 if (!ps->header_area)
167 goto err_header_area;
168
160 return 0; 169 return 0;
170
171err_header_area:
172 vfree(ps->zero_area);
173
174err_zero_area:
175 vfree(ps->area);
176
177err_area:
178 return r;
161} 179}
162 180
163static void free_area(struct pstore *ps) 181static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
169 if (ps->zero_area) 187 if (ps->zero_area)
170 vfree(ps->zero_area); 188 vfree(ps->zero_area);
171 ps->zero_area = NULL; 189 ps->zero_area = NULL;
190
191 if (ps->header_area)
192 vfree(ps->header_area);
193 ps->header_area = NULL;
172} 194}
173 195
174struct mdata_req { 196struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
188/* 210/*
189 * Read or write a chunk aligned and sized block of data from a device. 211 * Read or write a chunk aligned and sized block of data from a device.
190 */ 212 */
191static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) 213static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
214 int metadata)
192{ 215{
193 struct dm_io_region where = { 216 struct dm_io_region where = {
194 .bdev = ps->store->cow->bdev, 217 .bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
198 struct dm_io_request io_req = { 221 struct dm_io_request io_req = {
199 .bi_rw = rw, 222 .bi_rw = rw,
200 .mem.type = DM_IO_VMA, 223 .mem.type = DM_IO_VMA,
201 .mem.ptr.vma = ps->area, 224 .mem.ptr.vma = area,
202 .client = ps->io_client, 225 .client = ps->io_client,
203 .notify.fn = NULL, 226 .notify.fn = NULL,
204 }; 227 };
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
240 263
241 chunk = area_location(ps, ps->current_area); 264 chunk = area_location(ps, ps->current_area);
242 265
243 r = chunk_io(ps, chunk, rw, 0); 266 r = chunk_io(ps, ps->area, chunk, rw, 0);
244 if (r) 267 if (r)
245 return r; 268 return r;
246 269
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
254 277
255static int zero_disk_area(struct pstore *ps, chunk_t area) 278static int zero_disk_area(struct pstore *ps, chunk_t area)
256{ 279{
257 struct dm_io_region where = { 280 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
258 .bdev = ps->store->cow->bdev,
259 .sector = ps->store->chunk_size * area_location(ps, area),
260 .count = ps->store->chunk_size,
261 };
262 struct dm_io_request io_req = {
263 .bi_rw = WRITE,
264 .mem.type = DM_IO_VMA,
265 .mem.ptr.vma = ps->zero_area,
266 .client = ps->io_client,
267 .notify.fn = NULL,
268 };
269
270 return dm_io(&io_req, 1, &where, NULL);
271} 281}
272 282
273static int read_header(struct pstore *ps, int *new_snapshot) 283static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
276 struct disk_header *dh; 286 struct disk_header *dh;
277 chunk_t chunk_size; 287 chunk_t chunk_size;
278 int chunk_size_supplied = 1; 288 int chunk_size_supplied = 1;
289 char *chunk_err;
279 290
280 /* 291 /*
281 * Use default chunk size (or hardsect_size, if larger) if none supplied 292 * Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
297 if (r) 308 if (r)
298 return r; 309 return r;
299 310
300 r = chunk_io(ps, 0, READ, 1); 311 r = chunk_io(ps, ps->header_area, 0, READ, 1);
301 if (r) 312 if (r)
302 goto bad; 313 goto bad;
303 314
304 dh = (struct disk_header *) ps->area; 315 dh = ps->header_area;
305 316
306 if (le32_to_cpu(dh->magic) == 0) { 317 if (le32_to_cpu(dh->magic) == 0) {
307 *new_snapshot = 1; 318 *new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
319 ps->version = le32_to_cpu(dh->version); 330 ps->version = le32_to_cpu(dh->version);
320 chunk_size = le32_to_cpu(dh->chunk_size); 331 chunk_size = le32_to_cpu(dh->chunk_size);
321 332
322 if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) 333 if (ps->store->chunk_size == chunk_size)
323 return 0; 334 return 0;
324 335
325 DMWARN("chunk size %llu in device metadata overrides " 336 if (chunk_size_supplied)
326 "table chunk size of %llu.", 337 DMWARN("chunk size %llu in device metadata overrides "
327 (unsigned long long)chunk_size, 338 "table chunk size of %llu.",
328 (unsigned long long)ps->store->chunk_size); 339 (unsigned long long)chunk_size,
340 (unsigned long long)ps->store->chunk_size);
329 341
330 /* We had a bogus chunk_size. Fix stuff up. */ 342 /* We had a bogus chunk_size. Fix stuff up. */
331 free_area(ps); 343 free_area(ps);
332 344
333 ps->store->chunk_size = chunk_size; 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
334 ps->store->chunk_mask = chunk_size - 1; 346 &chunk_err);
335 ps->store->chunk_shift = ffs(chunk_size) - 1; 347 if (r) {
348 DMERR("invalid on-disk chunk size %llu: %s.",
349 (unsigned long long)chunk_size, chunk_err);
350 return r;
351 }
336 352
337 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), 353 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
338 ps->io_client); 354 ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
351{ 367{
352 struct disk_header *dh; 368 struct disk_header *dh;
353 369
354 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 370 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
355 371
356 dh = (struct disk_header *) ps->area; 372 dh = ps->header_area;
357 dh->magic = cpu_to_le32(SNAP_MAGIC); 373 dh->magic = cpu_to_le32(SNAP_MAGIC);
358 dh->valid = cpu_to_le32(ps->valid); 374 dh->valid = cpu_to_le32(ps->valid);
359 dh->version = cpu_to_le32(ps->version); 375 dh->version = cpu_to_le32(ps->version);
360 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 376 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
361 377
362 return chunk_io(ps, 0, WRITE, 1); 378 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
363} 379}
364 380
365/* 381/*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
679 ps->valid = 1; 695 ps->valid = 1;
680 ps->version = SNAPSHOT_DISK_VERSION; 696 ps->version = SNAPSHOT_DISK_VERSION;
681 ps->area = NULL; 697 ps->area = NULL;
698 ps->zero_area = NULL;
699 ps->header_area = NULL;
682 ps->next_free = 2; /* skipping the header and first area */ 700 ps->next_free = 2; /* skipping the header and first area */
683 ps->current_committed = 0; 701 ps->current_committed = 0;
684 702
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d573165cd2b7..57f1bf7f3b7a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1176 return 0; 1176 return 0;
1177} 1177}
1178 1178
1179static int snapshot_iterate_devices(struct dm_target *ti,
1180 iterate_devices_callout_fn fn, void *data)
1181{
1182 struct dm_snapshot *snap = ti->private;
1183
1184 return fn(ti, snap->origin, 0, ti->len, data);
1185}
1186
1187
1179/*----------------------------------------------------------------- 1188/*-----------------------------------------------------------------
1180 * Origin methods 1189 * Origin methods
1181 *---------------------------------------------------------------*/ 1190 *---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1410 return 0; 1419 return 0;
1411} 1420}
1412 1421
1422static int origin_iterate_devices(struct dm_target *ti,
1423 iterate_devices_callout_fn fn, void *data)
1424{
1425 struct dm_dev *dev = ti->private;
1426
1427 return fn(ti, dev, 0, ti->len, data);
1428}
1429
1413static struct target_type origin_target = { 1430static struct target_type origin_target = {
1414 .name = "snapshot-origin", 1431 .name = "snapshot-origin",
1415 .version = {1, 6, 0}, 1432 .version = {1, 7, 0},
1416 .module = THIS_MODULE, 1433 .module = THIS_MODULE,
1417 .ctr = origin_ctr, 1434 .ctr = origin_ctr,
1418 .dtr = origin_dtr, 1435 .dtr = origin_dtr,
1419 .map = origin_map, 1436 .map = origin_map,
1420 .resume = origin_resume, 1437 .resume = origin_resume,
1421 .status = origin_status, 1438 .status = origin_status,
1439 .iterate_devices = origin_iterate_devices,
1422}; 1440};
1423 1441
1424static struct target_type snapshot_target = { 1442static struct target_type snapshot_target = {
1425 .name = "snapshot", 1443 .name = "snapshot",
1426 .version = {1, 6, 0}, 1444 .version = {1, 7, 0},
1427 .module = THIS_MODULE, 1445 .module = THIS_MODULE,
1428 .ctr = snapshot_ctr, 1446 .ctr = snapshot_ctr,
1429 .dtr = snapshot_dtr, 1447 .dtr = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
1431 .end_io = snapshot_end_io, 1449 .end_io = snapshot_end_io,
1432 .resume = snapshot_resume, 1450 .resume = snapshot_resume,
1433 .status = snapshot_status, 1451 .status = snapshot_status,
1452 .iterate_devices = snapshot_iterate_devices,
1434}; 1453};
1435 1454
1436static int __init dm_snapshot_init(void) 1455static int __init dm_snapshot_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4e0e5937e42a..3e563d251733 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
329 return ret; 329 return ret;
330} 330}
331 331
332static void stripe_io_hints(struct dm_target *ti,
333 struct queue_limits *limits)
334{
335 struct stripe_c *sc = ti->private;
336 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
337
338 blk_limits_io_min(limits, chunk_size);
339 limits->io_opt = chunk_size * sc->stripes;
340}
341
332static struct target_type stripe_target = { 342static struct target_type stripe_target = {
333 .name = "striped", 343 .name = "striped",
334 .version = {1, 2, 0}, 344 .version = {1, 3, 0},
335 .module = THIS_MODULE, 345 .module = THIS_MODULE,
336 .ctr = stripe_ctr, 346 .ctr = stripe_ctr,
337 .dtr = stripe_dtr, 347 .dtr = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
339 .end_io = stripe_end_io, 349 .end_io = stripe_end_io,
340 .status = stripe_status, 350 .status = stripe_status,
341 .iterate_devices = stripe_iterate_devices, 351 .iterate_devices = stripe_iterate_devices,
352 .io_hints = stripe_io_hints,
342}; 353};
343 354
344int __init dm_stripe_init(void) 355int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d952b3441913..1a6cb3c7822e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
343} 343}
344 344
345/* 345/*
346 * If possible, this checks an area of a destination device is valid. 346 * If possible, this checks an area of a destination device is invalid.
347 */ 347 */
348static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 348static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
349 sector_t start, sector_t len, void *data) 349 sector_t start, sector_t len, void *data)
350{ 350{
351 struct queue_limits *limits = data; 351 struct queue_limits *limits = data;
352 struct block_device *bdev = dev->bdev; 352 struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
357 char b[BDEVNAME_SIZE]; 357 char b[BDEVNAME_SIZE];
358 358
359 if (!dev_size) 359 if (!dev_size)
360 return 1; 360 return 0;
361 361
362 if ((start >= dev_size) || (start + len > dev_size)) { 362 if ((start >= dev_size) || (start + len > dev_size)) {
363 DMWARN("%s: %s too small for target", 363 DMWARN("%s: %s too small for target: "
364 dm_device_name(ti->table->md), bdevname(bdev, b)); 364 "start=%llu, len=%llu, dev_size=%llu",
365 return 0; 365 dm_device_name(ti->table->md), bdevname(bdev, b),
366 (unsigned long long)start,
367 (unsigned long long)len,
368 (unsigned long long)dev_size);
369 return 1;
366 } 370 }
367 371
368 if (logical_block_size_sectors <= 1) 372 if (logical_block_size_sectors <= 1)
369 return 1; 373 return 0;
370 374
371 if (start & (logical_block_size_sectors - 1)) { 375 if (start & (logical_block_size_sectors - 1)) {
372 DMWARN("%s: start=%llu not aligned to h/w " 376 DMWARN("%s: start=%llu not aligned to h/w "
373 "logical block size %hu of %s", 377 "logical block size %u of %s",
374 dm_device_name(ti->table->md), 378 dm_device_name(ti->table->md),
375 (unsigned long long)start, 379 (unsigned long long)start,
376 limits->logical_block_size, bdevname(bdev, b)); 380 limits->logical_block_size, bdevname(bdev, b));
377 return 0; 381 return 1;
378 } 382 }
379 383
380 if (len & (logical_block_size_sectors - 1)) { 384 if (len & (logical_block_size_sectors - 1)) {
381 DMWARN("%s: len=%llu not aligned to h/w " 385 DMWARN("%s: len=%llu not aligned to h/w "
382 "logical block size %hu of %s", 386 "logical block size %u of %s",
383 dm_device_name(ti->table->md), 387 dm_device_name(ti->table->md),
384 (unsigned long long)len, 388 (unsigned long long)len,
385 limits->logical_block_size, bdevname(bdev, b)); 389 limits->logical_block_size, bdevname(bdev, b));
386 return 0; 390 return 1;
387 } 391 }
388 392
389 return 1; 393 return 0;
390} 394}
391 395
392/* 396/*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
496 } 500 }
497 501
498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 502 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 503 DMWARN("%s: target device %s is misaligned: "
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 504 "physical_block_size=%u, logical_block_size=%u, "
505 "alignment_offset=%u, start=%llu",
506 dm_device_name(ti->table->md), bdevname(bdev, b),
507 q->limits.physical_block_size,
508 q->limits.logical_block_size,
509 q->limits.alignment_offset,
510 (unsigned long long) start << 9);
511
501 512
502 /* 513 /*
503 * Check if merge fn is supported. 514 * Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
698 709
699 if (remaining) { 710 if (remaining) {
700 DMWARN("%s: table line %u (start sect %llu len %llu) " 711 DMWARN("%s: table line %u (start sect %llu len %llu) "
701 "not aligned to h/w logical block size %hu", 712 "not aligned to h/w logical block size %u",
702 dm_device_name(table->md), i, 713 dm_device_name(table->md), i,
703 (unsigned long long) ti->begin, 714 (unsigned long long) ti->begin,
704 (unsigned long long) ti->len, 715 (unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
996 ti->type->iterate_devices(ti, dm_set_device_limits, 1007 ti->type->iterate_devices(ti, dm_set_device_limits,
997 &ti_limits); 1008 &ti_limits);
998 1009
1010 /* Set I/O hints portion of queue limits */
1011 if (ti->type->io_hints)
1012 ti->type->io_hints(ti, &ti_limits);
1013
999 /* 1014 /*
1000 * Check each device area is consistent with the target's 1015 * Check each device area is consistent with the target's
1001 * overall queue limits. 1016 * overall queue limits.
1002 */ 1017 */
1003 if (!ti->type->iterate_devices(ti, device_area_is_valid, 1018 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1004 &ti_limits)) 1019 &ti_limits))
1005 return -EINVAL; 1020 return -EINVAL;
1006 1021
1007combine_limits: 1022combine_limits:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a311ea0d441..b4845b14740d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
738 dm_put(md); 738 dm_put(md);
739} 739}
740 740
741static void free_rq_clone(struct request *clone)
742{
743 struct dm_rq_target_io *tio = clone->end_io_data;
744
745 blk_rq_unprep_clone(clone);
746 free_rq_tio(tio);
747}
748
741static void dm_unprep_request(struct request *rq) 749static void dm_unprep_request(struct request *rq)
742{ 750{
743 struct request *clone = rq->special; 751 struct request *clone = rq->special;
744 struct dm_rq_target_io *tio = clone->end_io_data;
745 752
746 rq->special = NULL; 753 rq->special = NULL;
747 rq->cmd_flags &= ~REQ_DONTPREP; 754 rq->cmd_flags &= ~REQ_DONTPREP;
748 755
749 blk_rq_unprep_clone(clone); 756 free_rq_clone(clone);
750 free_rq_tio(tio);
751} 757}
752 758
753/* 759/*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
825 rq->sense_len = clone->sense_len; 831 rq->sense_len = clone->sense_len;
826 } 832 }
827 833
828 BUG_ON(clone->bio); 834 free_rq_clone(clone);
829 free_rq_tio(tio);
830 835
831 blk_end_request_all(rq, error); 836 blk_end_request_all(rq, error);
832 837
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index ae5fe91867e1..10ed195c0c1c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
736 flash->partitioned = 1; 736 flash->partitioned = 1;
737 return add_mtd_partitions(&flash->mtd, parts, nr_parts); 737 return add_mtd_partitions(&flash->mtd, parts, nr_parts);
738 } 738 }
739 } else if (data->nr_parts) 739 } else if (data && data->nr_parts)
740 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", 740 dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
741 data->nr_parts, data->name); 741 data->nr_parts, data->name);
742 742
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index fb86cacd5bdb..1002e1882996 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
135int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, 135int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
136 size_t *retlen, uint8_t *buf) 136 size_t *retlen, uint8_t *buf)
137{ 137{
138 loff_t mask = mtd->writesize - 1;
138 struct mtd_oob_ops ops; 139 struct mtd_oob_ops ops;
139 int res; 140 int res;
140 141
141 ops.mode = MTD_OOB_PLACE; 142 ops.mode = MTD_OOB_PLACE;
142 ops.ooboffs = offs & (mtd->writesize - 1); 143 ops.ooboffs = offs & mask;
143 ops.ooblen = len; 144 ops.ooblen = len;
144 ops.oobbuf = buf; 145 ops.oobbuf = buf;
145 ops.datbuf = NULL; 146 ops.datbuf = NULL;
146 147
147 res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 148 res = mtd->read_oob(mtd, offs & ~mask, &ops);
148 *retlen = ops.oobretlen; 149 *retlen = ops.oobretlen;
149 return res; 150 return res;
150} 151}
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
155int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, 156int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
156 size_t *retlen, uint8_t *buf) 157 size_t *retlen, uint8_t *buf)
157{ 158{
159 loff_t mask = mtd->writesize - 1;
158 struct mtd_oob_ops ops; 160 struct mtd_oob_ops ops;
159 int res; 161 int res;
160 162
161 ops.mode = MTD_OOB_PLACE; 163 ops.mode = MTD_OOB_PLACE;
162 ops.ooboffs = offs & (mtd->writesize - 1); 164 ops.ooboffs = offs & mask;
163 ops.ooblen = len; 165 ops.ooblen = len;
164 ops.oobbuf = buf; 166 ops.oobbuf = buf;
165 ops.datbuf = NULL; 167 ops.datbuf = NULL;
166 168
167 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 169 res = mtd->write_oob(mtd, offs & ~mask, &ops);
168 *retlen = ops.oobretlen; 170 *retlen = ops.oobretlen;
169 return res; 171 return res;
170} 172}
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
177static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, 179static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
178 size_t *retlen, uint8_t *buf, uint8_t *oob) 180 size_t *retlen, uint8_t *buf, uint8_t *oob)
179{ 181{
182 loff_t mask = mtd->writesize - 1;
180 struct mtd_oob_ops ops; 183 struct mtd_oob_ops ops;
181 int res; 184 int res;
182 185
183 ops.mode = MTD_OOB_PLACE; 186 ops.mode = MTD_OOB_PLACE;
184 ops.ooboffs = offs; 187 ops.ooboffs = offs & mask;
185 ops.ooblen = mtd->oobsize; 188 ops.ooblen = mtd->oobsize;
186 ops.oobbuf = oob; 189 ops.oobbuf = oob;
187 ops.datbuf = buf; 190 ops.datbuf = buf;
188 ops.len = len; 191 ops.len = len;
189 192
190 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); 193 res = mtd->write_oob(mtd, offs & ~mask, &ops);
191 *retlen = ops.retlen; 194 *retlen = ops.retlen;
192 return res; 195 return res;
193} 196}
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fb5df5c6203e..c97ab82ec743 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
1286 if (!other_ports) 1286 if (!other_ports)
1287 schedule_chk_task(adapter); 1287 schedule_chk_task(adapter);
1288 1288
1289 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1289 return 0; 1290 return 0;
1290} 1291}
1291 1292
@@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
1318 if (!adapter->open_device_map) 1319 if (!adapter->open_device_map)
1319 cxgb_down(adapter); 1320 cxgb_down(adapter);
1320 1321
1322 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1321 return 0; 1323 return 0;
1322} 1324}
1323 1325
@@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
2717 2719
2718 if (is_offload(adapter) && 2720 if (is_offload(adapter) &&
2719 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) { 2721 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2720 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0); 2722 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2721 offload_close(&adapter->tdev); 2723 offload_close(&adapter->tdev);
2722 } 2724 }
2723 2725
@@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
2782 } 2784 }
2783 2785
2784 if (is_offload(adapter) && !ofld_disable) 2786 if (is_offload(adapter) && !ofld_disable)
2785 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0); 2787 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2786} 2788}
2787 2789
2788/* 2790/*
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index f9f54b57b28c..75064eea1d87 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
153 mutex_unlock(&cxgb3_db_lock); 153 mutex_unlock(&cxgb3_db_lock);
154} 154}
155 155
156void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error) 156void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
157{ 157{
158 struct cxgb3_client *client; 158 struct cxgb3_client *client;
159 159
160 mutex_lock(&cxgb3_db_lock); 160 mutex_lock(&cxgb3_db_lock);
161 list_for_each_entry(client, &client_list, client_list) { 161 list_for_each_entry(client, &client_list, client_list) {
162 if (client->err_handler) 162 if (client->event_handler)
163 client->err_handler(tdev, status, error); 163 client->event_handler(tdev, event, port);
164 } 164 }
165 mutex_unlock(&cxgb3_db_lock); 165 mutex_unlock(&cxgb3_db_lock);
166} 166}
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 55945f422aec..670aa62042da 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
64void cxgb3_unregister_client(struct cxgb3_client *client); 64void cxgb3_unregister_client(struct cxgb3_client *client);
65void cxgb3_add_clients(struct t3cdev *tdev); 65void cxgb3_add_clients(struct t3cdev *tdev);
66void cxgb3_remove_clients(struct t3cdev *tdev); 66void cxgb3_remove_clients(struct t3cdev *tdev);
67void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error); 67void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
68 68
69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev, 69typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
70 struct sk_buff *skb, void *ctx); 70 struct sk_buff *skb, void *ctx);
71 71
72enum { 72enum {
73 OFFLOAD_STATUS_UP, 73 OFFLOAD_STATUS_UP,
74 OFFLOAD_STATUS_DOWN 74 OFFLOAD_STATUS_DOWN,
75 OFFLOAD_PORT_DOWN,
76 OFFLOAD_PORT_UP
75}; 77};
76 78
77struct cxgb3_client { 79struct cxgb3_client {
@@ -82,7 +84,7 @@ struct cxgb3_client {
82 int (*redirect)(void *ctx, struct dst_entry *old, 84 int (*redirect)(void *ctx, struct dst_entry *old,
83 struct dst_entry *new, struct l2t_entry *l2t); 85 struct dst_entry *new, struct l2t_entry *l2t);
84 struct list_head client_list; 86 struct list_head client_list;
85 void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error); 87 void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
86}; 88};
87 89
88/* 90/*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index e212f2c5448b..a00ec639c380 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev)
491 491
492 dev_set_drvdata(&ofdev->dev, NULL); 492 dev_set_drvdata(&ofdev->dev, NULL);
493 493
494 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 495 iounmap(priv->regs);
495 free_netdev(priv->ndev); 496 free_netdev(priv->ndev);
496 497
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index ac57b6a42c6e..ccfe276943f0 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -34,7 +34,6 @@
34 * SOFTWARE. 34 * SOFTWARE.
35 */ 35 */
36 36
37#include <linux/init.h>
38#include <linux/hardirq.h> 37#include <linux/hardirq.h>
39 38
40#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index b9ceddde46c0..bffb7995cb70 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/interrupt.h> 34#include <linux/interrupt.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
@@ -42,6 +41,10 @@
42#include "fw.h" 41#include "fw.h"
43 42
44enum { 43enum {
44 MLX4_IRQNAME_SIZE = 64
45};
46
47enum {
45 MLX4_NUM_ASYNC_EQE = 0x100, 48 MLX4_NUM_ASYNC_EQE = 0x100,
46 MLX4_NUM_SPARE_EQE = 0x80, 49 MLX4_NUM_SPARE_EQE = 0x80,
47 MLX4_EQ_ENTRY_SIZE = 0x20 50 MLX4_EQ_ENTRY_SIZE = 0x20
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
526 iounmap(priv->clr_base); 529 iounmap(priv->clr_base);
527} 530}
528 531
529int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
530{
531 struct mlx4_priv *priv = mlx4_priv(dev);
532 int ret;
533
534 /*
535 * We assume that mapping one page is enough for the whole EQ
536 * context table. This is fine with all current HCAs, because
537 * we only use 32 EQs and each EQ uses 64 bytes of context
538 * memory, or 1 KB total.
539 */
540 priv->eq_table.icm_virt = icm_virt;
541 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
542 if (!priv->eq_table.icm_page)
543 return -ENOMEM;
544 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
545 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
546 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
547 __free_page(priv->eq_table.icm_page);
548 return -ENOMEM;
549 }
550
551 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
552 if (ret) {
553 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
554 PCI_DMA_BIDIRECTIONAL);
555 __free_page(priv->eq_table.icm_page);
556 }
557
558 return ret;
559}
560
561void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
562{
563 struct mlx4_priv *priv = mlx4_priv(dev);
564
565 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
566 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
567 PCI_DMA_BIDIRECTIONAL);
568 __free_page(priv->eq_table.icm_page);
569}
570
571int mlx4_alloc_eq_table(struct mlx4_dev *dev) 532int mlx4_alloc_eq_table(struct mlx4_dev *dev)
572{ 533{
573 struct mlx4_priv *priv = mlx4_priv(dev); 534 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
615 priv->eq_table.clr_int = priv->clr_base + 576 priv->eq_table.clr_int = priv->clr_base +
616 (priv->eq_table.inta_pin < 32 ? 4 : 0); 577 (priv->eq_table.inta_pin < 32 ? 4 : 0);
617 578
618 priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); 579 priv->eq_table.irq_names =
580 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
581 GFP_KERNEL);
619 if (!priv->eq_table.irq_names) { 582 if (!priv->eq_table.irq_names) {
620 err = -ENOMEM; 583 err = -ENOMEM;
621 goto err_out_bitmap; 584 goto err_out_bitmap;
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
638 goto err_out_comp; 601 goto err_out_comp;
639 602
640 if (dev->flags & MLX4_FLAG_MSI_X) { 603 if (dev->flags & MLX4_FLAG_MSI_X) {
641 static const char async_eq_name[] = "mlx4-async";
642 const char *eq_name; 604 const char *eq_name;
643 605
644 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 606 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
645 if (i < dev->caps.num_comp_vectors) { 607 if (i < dev->caps.num_comp_vectors) {
646 snprintf(priv->eq_table.irq_names + i * 16, 16, 608 snprintf(priv->eq_table.irq_names +
647 "mlx4-comp-%d", i); 609 i * MLX4_IRQNAME_SIZE,
648 eq_name = priv->eq_table.irq_names + i * 16; 610 MLX4_IRQNAME_SIZE,
649 } else 611 "mlx4-comp-%d@pci:%s", i,
650 eq_name = async_eq_name; 612 pci_name(dev->pdev));
613 } else {
614 snprintf(priv->eq_table.irq_names +
615 i * MLX4_IRQNAME_SIZE,
616 MLX4_IRQNAME_SIZE,
617 "mlx4-async@pci:%s",
618 pci_name(dev->pdev));
619 }
651 620
621 eq_name = priv->eq_table.irq_names +
622 i * MLX4_IRQNAME_SIZE;
652 err = request_irq(priv->eq_table.eq[i].irq, 623 err = request_irq(priv->eq_table.eq[i].irq,
653 mlx4_msi_x_interrupt, 0, eq_name, 624 mlx4_msi_x_interrupt, 0, eq_name,
654 priv->eq_table.eq + i); 625 priv->eq_table.eq + i);
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
658 priv->eq_table.eq[i].have_irq = 1; 629 priv->eq_table.eq[i].have_irq = 1;
659 } 630 }
660 } else { 631 } else {
632 snprintf(priv->eq_table.irq_names,
633 MLX4_IRQNAME_SIZE,
634 DRV_NAME "@pci:%s",
635 pci_name(dev->pdev));
661 err = request_irq(dev->pdev->irq, mlx4_interrupt, 636 err = request_irq(dev->pdev->irq, mlx4_interrupt,
662 IRQF_SHARED, DRV_NAME, dev); 637 IRQF_SHARED, priv->eq_table.irq_names, dev);
663 if (err) 638 if (err)
664 goto err_out_async; 639 goto err_out_async;
665 640
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index baf4bf66062c..04b382fcb8c8 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/mm.h> 35#include <linux/mm.h>
37#include <linux/scatterlist.h> 36#include <linux/scatterlist.h>
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index dac621b1e9fc..3dd481e77f92 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
525 goto err_unmap_aux; 525 goto err_unmap_aux;
526 } 526 }
527 527
528 err = mlx4_map_eq_icm(dev, init_hca->eqc_base); 528 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
529 init_hca->eqc_base, dev_cap->eqc_entry_sz,
530 dev->caps.num_eqs, dev->caps.num_eqs,
531 0, 0);
529 if (err) { 532 if (err) {
530 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 533 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
531 goto err_unmap_cmpt; 534 goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
668 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 671 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
669 672
670err_unmap_eq: 673err_unmap_eq:
671 mlx4_unmap_eq_icm(dev); 674 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
672 675
673err_unmap_cmpt: 676err_unmap_cmpt:
674 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 677 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
698 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 701 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
699 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 702 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
700 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 703 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
704 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
701 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 705 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
702 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 706 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
703 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 707 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
704 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 708 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
705 mlx4_unmap_eq_icm(dev);
706 709
707 mlx4_UNMAP_ICM_AUX(dev); 710 mlx4_UNMAP_ICM_AUX(dev);
708 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 711 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
786 return 0; 789 return 0;
787 790
788err_close: 791err_close:
789 mlx4_close_hca(dev); 792 mlx4_CLOSE_HCA(dev, 0);
790 793
791err_free_icm: 794err_free_icm:
792 mlx4_free_icms(dev); 795 mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1070 goto err_disable_pdev; 1073 goto err_disable_pdev;
1071 } 1074 }
1072 1075
1073 err = pci_request_region(pdev, 0, DRV_NAME); 1076 err = pci_request_regions(pdev, DRV_NAME);
1074 if (err) { 1077 if (err) {
1075 dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); 1078 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1076 goto err_disable_pdev; 1079 goto err_disable_pdev;
1077 } 1080 }
1078 1081
1079 err = pci_request_region(pdev, 2, DRV_NAME);
1080 if (err) {
1081 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
1082 goto err_release_bar0;
1083 }
1084
1085 pci_set_master(pdev); 1082 pci_set_master(pdev);
1086 1083
1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1084 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1090 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1087 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1091 if (err) { 1088 if (err) {
1092 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1089 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1093 goto err_release_bar2; 1090 goto err_release_regions;
1094 } 1091 }
1095 } 1092 }
1096 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1093 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1101 if (err) { 1098 if (err) {
1102 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1099 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1103 "aborting.\n"); 1100 "aborting.\n");
1104 goto err_release_bar2; 1101 goto err_release_regions;
1105 } 1102 }
1106 } 1103 }
1107 1104
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1110 dev_err(&pdev->dev, "Device struct alloc failed, " 1107 dev_err(&pdev->dev, "Device struct alloc failed, "
1111 "aborting.\n"); 1108 "aborting.\n");
1112 err = -ENOMEM; 1109 err = -ENOMEM;
1113 goto err_release_bar2; 1110 goto err_release_regions;
1114 } 1111 }
1115 1112
1116 dev = &priv->dev; 1113 dev = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
1205err_free_dev: 1202err_free_dev:
1206 kfree(priv); 1203 kfree(priv);
1207 1204
1208err_release_bar2: 1205err_release_regions:
1209 pci_release_region(pdev, 2); 1206 pci_release_regions(pdev);
1210
1211err_release_bar0:
1212 pci_release_region(pdev, 0);
1213 1207
1214err_disable_pdev: 1208err_disable_pdev:
1215 pci_disable_device(pdev); 1209 pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1265 pci_disable_msix(pdev); 1259 pci_disable_msix(pdev);
1266 1260
1267 kfree(priv); 1261 kfree(priv);
1268 pci_release_region(pdev, 2); 1262 pci_release_regions(pdev);
1269 pci_release_region(pdev, 0);
1270 pci_disable_device(pdev); 1263 pci_disable_device(pdev);
1271 pci_set_drvdata(pdev, NULL); 1264 pci_set_drvdata(pdev, NULL);
1272 } 1265 }
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 6053c357a470..5ccbce9866fe 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/string.h> 34#include <linux/string.h>
36#include <linux/slab.h> 35#include <linux/slab.h>
37 36
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5bd79c2b184f..bc72d6e4919b 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -205,9 +205,7 @@ struct mlx4_eq_table {
205 void __iomem **uar_map; 205 void __iomem **uar_map;
206 u32 clr_mask; 206 u32 clr_mask;
207 struct mlx4_eq *eq; 207 struct mlx4_eq *eq;
208 u64 icm_virt; 208 struct mlx4_icm_table table;
209 struct page *icm_page;
210 dma_addr_t icm_dma;
211 struct mlx4_icm_table cmpt_table; 209 struct mlx4_icm_table cmpt_table;
212 int have_irq; 210 int have_irq;
213 u8 inta_pin; 211 u8 inta_pin;
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
373 struct mlx4_dev_cap *dev_cap, 371 struct mlx4_dev_cap *dev_cap,
374 struct mlx4_init_hca_param *init_hca); 372 struct mlx4_init_hca_param *init_hca);
375 373
376int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
377void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
378
379int mlx4_cmd_init(struct mlx4_dev *dev); 374int mlx4_cmd_init(struct mlx4_dev *dev);
380void mlx4_cmd_cleanup(struct mlx4_dev *dev); 375void mlx4_cmd_cleanup(struct mlx4_dev *dev);
381void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); 376void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index f96948be0a44..ca7ab8e7b4cc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -32,7 +32,6 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <linux/init.h>
36#include <linux/errno.h> 35#include <linux/errno.h>
37 36
38#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index 26d1a7a9e375..c4988d6bd5b2 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36 35
37#include <asm/page.h> 36#include <asm/page.h>
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index bd22df95adf9..ca25b9dc8378 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -32,8 +32,6 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <linux/init.h>
36
37#include "mlx4.h" 35#include "mlx4.h"
38#include "fw.h" 36#include "fw.h"
39 37
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 1c565ef8d179..42ab9fc01d3e 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -33,8 +33,6 @@
33 * SOFTWARE. 33 * SOFTWARE.
34 */ 34 */
35 35
36#include <linux/init.h>
37
38#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
39#include <linux/mlx4/qp.h> 37#include <linux/mlx4/qp.h>
40 38
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index 3951b884c0fb..e5741dab3825 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -31,7 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35#include <linux/errno.h> 34#include <linux/errno.h>
36#include <linux/pci.h> 35#include <linux/pci.h>
37#include <linux/delay.h> 36#include <linux/delay.h>
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index fe9f218691f5..1377d0dc8f1f 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -31,8 +31,6 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#include <linux/init.h>
35
36#include <linux/mlx4/cmd.h> 34#include <linux/mlx4/cmd.h>
37 35
38#include "mlx4.h" 36#include "mlx4.h"
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 42b6c6319bc2..87214a257d2a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -130,17 +130,10 @@ static inline struct tun_sock *tun_sk(struct sock *sk)
130static int tun_attach(struct tun_struct *tun, struct file *file) 130static int tun_attach(struct tun_struct *tun, struct file *file)
131{ 131{
132 struct tun_file *tfile = file->private_data; 132 struct tun_file *tfile = file->private_data;
133 const struct cred *cred = current_cred();
134 int err; 133 int err;
135 134
136 ASSERT_RTNL(); 135 ASSERT_RTNL();
137 136
138 /* Check permissions */
139 if (((tun->owner != -1 && cred->euid != tun->owner) ||
140 (tun->group != -1 && !in_egroup_p(tun->group))) &&
141 !capable(CAP_NET_ADMIN))
142 return -EPERM;
143
144 netif_tx_lock_bh(tun->dev); 137 netif_tx_lock_bh(tun->dev);
145 138
146 err = -EINVAL; 139 err = -EINVAL;
@@ -926,6 +919,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
926 919
927 dev = __dev_get_by_name(net, ifr->ifr_name); 920 dev = __dev_get_by_name(net, ifr->ifr_name);
928 if (dev) { 921 if (dev) {
922 const struct cred *cred = current_cred();
923
929 if (ifr->ifr_flags & IFF_TUN_EXCL) 924 if (ifr->ifr_flags & IFF_TUN_EXCL)
930 return -EBUSY; 925 return -EBUSY;
931 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 926 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -935,6 +930,14 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
935 else 930 else
936 return -EINVAL; 931 return -EINVAL;
937 932
933 if (((tun->owner != -1 && cred->euid != tun->owner) ||
934 (tun->group != -1 && !in_egroup_p(tun->group))) &&
935 !capable(CAP_NET_ADMIN))
936 return -EPERM;
937 err = security_tun_dev_attach(tun->sk);
938 if (err < 0)
939 return err;
940
938 err = tun_attach(tun, file); 941 err = tun_attach(tun, file);
939 if (err < 0) 942 if (err < 0)
940 return err; 943 return err;
@@ -947,6 +950,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
947 950
948 if (!capable(CAP_NET_ADMIN)) 951 if (!capable(CAP_NET_ADMIN))
949 return -EPERM; 952 return -EPERM;
953 err = security_tun_dev_create();
954 if (err < 0)
955 return err;
950 956
951 /* Set dev type */ 957 /* Set dev type */
952 if (ifr->ifr_flags & IFF_TUN) { 958 if (ifr->ifr_flags & IFF_TUN) {
@@ -989,6 +995,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
989 tun->sk = sk; 995 tun->sk = sk;
990 container_of(sk, struct tun_sock, sk)->tun = tun; 996 container_of(sk, struct tun_sock, sk)->tun = tun;
991 997
998 security_tun_dev_post_create(sk);
999
992 tun_net_init(dev); 1000 tun_net_init(dev);
993 1001
994 if (strchr(dev->name, '%')) { 1002 if (strchr(dev->name, '%')) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 6dcac73b4d29..f593fbbb4e52 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2874 return 0; 2874 return 0;
2875} 2875}
2876 2876
2877static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, 2877static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2878 u32 src_phys, u32 dest_address, u32 length) 2878 int nr, u32 dest_address, u32 len)
2879{ 2879{
2880 u32 bytes_left = length; 2880 int ret, i;
2881 u32 src_offset = 0; 2881 u32 size;
2882 u32 dest_offset = 0; 2882
2883 int status = 0;
2884 IPW_DEBUG_FW(">> \n"); 2883 IPW_DEBUG_FW(">> \n");
2885 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", 2884 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2886 src_phys, dest_address, length); 2885 nr, dest_address, len);
2887 while (bytes_left > CB_MAX_LENGTH) { 2886
2888 status = ipw_fw_dma_add_command_block(priv, 2887 for (i = 0; i < nr; i++) {
2889 src_phys + src_offset, 2888 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2890 dest_address + 2889 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2891 dest_offset, 2890 dest_address +
2892 CB_MAX_LENGTH, 0, 0); 2891 i * CB_MAX_LENGTH, size,
2893 if (status) { 2892 0, 0);
2893 if (ret) {
2894 IPW_DEBUG_FW_INFO(": Failed\n"); 2894 IPW_DEBUG_FW_INFO(": Failed\n");
2895 return -1; 2895 return -1;
2896 } else 2896 } else
2897 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2897 IPW_DEBUG_FW_INFO(": Added new cb\n");
2898
2899 src_offset += CB_MAX_LENGTH;
2900 dest_offset += CB_MAX_LENGTH;
2901 bytes_left -= CB_MAX_LENGTH;
2902 }
2903
2904 /* add the buffer tail */
2905 if (bytes_left > 0) {
2906 status =
2907 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2908 dest_address + dest_offset,
2909 bytes_left, 0, 0);
2910 if (status) {
2911 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2912 return -1;
2913 } else
2914 IPW_DEBUG_FW_INFO
2915 (": Adding new cb - the buffer tail\n");
2916 } 2898 }
2917 2899
2918 IPW_DEBUG_FW("<< \n"); 2900 IPW_DEBUG_FW("<< \n");
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3160 3142
3161static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3143static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3162{ 3144{
3163 int rc = -1; 3145 int ret = -1;
3164 int offset = 0; 3146 int offset = 0;
3165 struct fw_chunk *chunk; 3147 struct fw_chunk *chunk;
3166 dma_addr_t shared_phys; 3148 int total_nr = 0;
3167 u8 *shared_virt; 3149 int i;
3150 struct pci_pool *pool;
3151 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3152 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3168 3153
3169 IPW_DEBUG_TRACE("<< : \n"); 3154 IPW_DEBUG_TRACE("<< : \n");
3170 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3171 3155
3172 if (!shared_virt) 3156 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3157 if (!pool) {
3158 IPW_ERROR("pci_pool_create failed\n");
3173 return -ENOMEM; 3159 return -ENOMEM;
3174 3160 }
3175 memmove(shared_virt, data, len);
3176 3161
3177 /* Start the Dma */ 3162 /* Start the Dma */
3178 rc = ipw_fw_dma_enable(priv); 3163 ret = ipw_fw_dma_enable(priv);
3179 3164
3180 /* the DMA is already ready this would be a bug. */ 3165 /* the DMA is already ready this would be a bug. */
3181 BUG_ON(priv->sram_desc.last_cb_index > 0); 3166 BUG_ON(priv->sram_desc.last_cb_index > 0);
3182 3167
3183 do { 3168 do {
3169 u32 chunk_len;
3170 u8 *start;
3171 int size;
3172 int nr = 0;
3173
3184 chunk = (struct fw_chunk *)(data + offset); 3174 chunk = (struct fw_chunk *)(data + offset);
3185 offset += sizeof(struct fw_chunk); 3175 offset += sizeof(struct fw_chunk);
3176 chunk_len = le32_to_cpu(chunk->length);
3177 start = data + offset;
3178
3179 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3180 for (i = 0; i < nr; i++) {
3181 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3182 &phys[total_nr]);
3183 if (!virts[total_nr]) {
3184 ret = -ENOMEM;
3185 goto out;
3186 }
3187 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3188 CB_MAX_LENGTH);
3189 memcpy(virts[total_nr], start, size);
3190 start += size;
3191 total_nr++;
3192 /* We don't support fw chunk larger than 64*8K */
3193 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3194 }
3195
3186 /* build DMA packet and queue up for sending */ 3196 /* build DMA packet and queue up for sending */
3187 /* dma to chunk->address, the chunk->length bytes from data + 3197 /* dma to chunk->address, the chunk->length bytes from data +
3188 * offeset*/ 3198 * offeset*/
3189 /* Dma loading */ 3199 /* Dma loading */
3190 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, 3200 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3191 le32_to_cpu(chunk->address), 3201 nr, le32_to_cpu(chunk->address),
3192 le32_to_cpu(chunk->length)); 3202 chunk_len);
3193 if (rc) { 3203 if (ret) {
3194 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3204 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3195 goto out; 3205 goto out;
3196 } 3206 }
3197 3207
3198 offset += le32_to_cpu(chunk->length); 3208 offset += chunk_len;
3199 } while (offset < len); 3209 } while (offset < len);
3200 3210
3201 /* Run the DMA and wait for the answer */ 3211 /* Run the DMA and wait for the answer */
3202 rc = ipw_fw_dma_kick(priv); 3212 ret = ipw_fw_dma_kick(priv);
3203 if (rc) { 3213 if (ret) {
3204 IPW_ERROR("dmaKick Failed\n"); 3214 IPW_ERROR("dmaKick Failed\n");
3205 goto out; 3215 goto out;
3206 } 3216 }
3207 3217
3208 rc = ipw_fw_dma_wait(priv); 3218 ret = ipw_fw_dma_wait(priv);
3209 if (rc) { 3219 if (ret) {
3210 IPW_ERROR("dmaWaitSync Failed\n"); 3220 IPW_ERROR("dmaWaitSync Failed\n");
3211 goto out; 3221 goto out;
3212 } 3222 }
3213 out: 3223 out:
3214 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys); 3224 for (i = 0; i < total_nr; i++)
3215 return rc; 3225 pci_pool_free(pool, virts[i], phys[i]);
3226
3227 pci_pool_destroy(pool);
3228
3229 return ret;
3216} 3230}
3217 3231
3218/* stop nic */ 3232/* stop nic */
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 242257b19441..a7aae24f2889 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -21,7 +21,6 @@
21 21
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/oprofile.h> 23#include <linux/oprofile.h>
24#include <linux/vmalloc.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26 25
27#include "event_buffer.h" 26#include "event_buffer.h"
@@ -407,6 +406,21 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val)
407 return op_cpu_buffer_add_data(entry, val); 406 return op_cpu_buffer_add_data(entry, val);
408} 407}
409 408
409int oprofile_add_data64(struct op_entry *entry, u64 val)
410{
411 if (!entry->event)
412 return 0;
413 if (op_cpu_buffer_get_size(entry) < 2)
414 /*
415 * the function returns 0 to indicate a too small
416 * buffer, even if there is some space left
417 */
418 return 0;
419 if (!op_cpu_buffer_add_data(entry, (u32)val))
420 return 0;
421 return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
422}
423
410int oprofile_write_commit(struct op_entry *entry) 424int oprofile_write_commit(struct op_entry *entry)
411{ 425{
412 if (!entry->event) 426 if (!entry->event)
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index 3cffce90f82a..dc8a0428260d 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -12,6 +12,8 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/oprofile.h> 13#include <linux/oprofile.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/workqueue.h>
16#include <linux/time.h>
15#include <asm/mutex.h> 17#include <asm/mutex.h>
16 18
17#include "oprof.h" 19#include "oprof.h"
@@ -87,6 +89,69 @@ out:
87 return err; 89 return err;
88} 90}
89 91
92#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
93
94static void switch_worker(struct work_struct *work);
95static DECLARE_DELAYED_WORK(switch_work, switch_worker);
96
97static void start_switch_worker(void)
98{
99 if (oprofile_ops.switch_events)
100 schedule_delayed_work(&switch_work, oprofile_time_slice);
101}
102
103static void stop_switch_worker(void)
104{
105 cancel_delayed_work_sync(&switch_work);
106}
107
108static void switch_worker(struct work_struct *work)
109{
110 if (oprofile_ops.switch_events())
111 return;
112
113 atomic_inc(&oprofile_stats.multiplex_counter);
114 start_switch_worker();
115}
116
117/* User inputs in ms, converts to jiffies */
118int oprofile_set_timeout(unsigned long val_msec)
119{
120 int err = 0;
121 unsigned long time_slice;
122
123 mutex_lock(&start_mutex);
124
125 if (oprofile_started) {
126 err = -EBUSY;
127 goto out;
128 }
129
130 if (!oprofile_ops.switch_events) {
131 err = -EINVAL;
132 goto out;
133 }
134
135 time_slice = msecs_to_jiffies(val_msec);
136 if (time_slice == MAX_JIFFY_OFFSET) {
137 err = -EINVAL;
138 goto out;
139 }
140
141 oprofile_time_slice = time_slice;
142
143out:
144 mutex_unlock(&start_mutex);
145 return err;
146
147}
148
149#else
150
151static inline void start_switch_worker(void) { }
152static inline void stop_switch_worker(void) { }
153
154#endif
90 155
91/* Actually start profiling (echo 1>/dev/oprofile/enable) */ 156/* Actually start profiling (echo 1>/dev/oprofile/enable) */
92int oprofile_start(void) 157int oprofile_start(void)
@@ -108,6 +173,8 @@ int oprofile_start(void)
108 if ((err = oprofile_ops.start())) 173 if ((err = oprofile_ops.start()))
109 goto out; 174 goto out;
110 175
176 start_switch_worker();
177
111 oprofile_started = 1; 178 oprofile_started = 1;
112out: 179out:
113 mutex_unlock(&start_mutex); 180 mutex_unlock(&start_mutex);
@@ -123,6 +190,9 @@ void oprofile_stop(void)
123 goto out; 190 goto out;
124 oprofile_ops.stop(); 191 oprofile_ops.stop();
125 oprofile_started = 0; 192 oprofile_started = 0;
193
194 stop_switch_worker();
195
126 /* wake up the daemon to read what remains */ 196 /* wake up the daemon to read what remains */
127 wake_up_buffer_waiter(); 197 wake_up_buffer_waiter();
128out: 198out:
@@ -155,7 +225,6 @@ post_sync:
155 mutex_unlock(&start_mutex); 225 mutex_unlock(&start_mutex);
156} 226}
157 227
158
159int oprofile_set_backtrace(unsigned long val) 228int oprofile_set_backtrace(unsigned long val)
160{ 229{
161 int err = 0; 230 int err = 0;
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index c288d3c24b50..cb92f5c98c1a 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -24,6 +24,8 @@ struct oprofile_operations;
24extern unsigned long oprofile_buffer_size; 24extern unsigned long oprofile_buffer_size;
25extern unsigned long oprofile_cpu_buffer_size; 25extern unsigned long oprofile_cpu_buffer_size;
26extern unsigned long oprofile_buffer_watershed; 26extern unsigned long oprofile_buffer_watershed;
27extern unsigned long oprofile_time_slice;
28
27extern struct oprofile_operations oprofile_ops; 29extern struct oprofile_operations oprofile_ops;
28extern unsigned long oprofile_started; 30extern unsigned long oprofile_started;
29extern unsigned long oprofile_backtrace_depth; 31extern unsigned long oprofile_backtrace_depth;
@@ -35,5 +37,6 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root);
35void oprofile_timer_init(struct oprofile_operations *ops); 37void oprofile_timer_init(struct oprofile_operations *ops);
36 38
37int oprofile_set_backtrace(unsigned long depth); 39int oprofile_set_backtrace(unsigned long depth);
40int oprofile_set_timeout(unsigned long time);
38 41
39#endif /* OPROF_H */ 42#endif /* OPROF_H */
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 5d36ffc30dd5..bbd7516e0869 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/oprofile.h> 11#include <linux/oprofile.h>
12#include <linux/jiffies.h>
12 13
13#include "event_buffer.h" 14#include "event_buffer.h"
14#include "oprofile_stats.h" 15#include "oprofile_stats.h"
@@ -17,10 +18,51 @@
17#define BUFFER_SIZE_DEFAULT 131072 18#define BUFFER_SIZE_DEFAULT 131072
18#define CPU_BUFFER_SIZE_DEFAULT 8192 19#define CPU_BUFFER_SIZE_DEFAULT 8192
19#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */ 20#define BUFFER_WATERSHED_DEFAULT 32768 /* FIXME: tune */
21#define TIME_SLICE_DEFAULT 1
20 22
21unsigned long oprofile_buffer_size; 23unsigned long oprofile_buffer_size;
22unsigned long oprofile_cpu_buffer_size; 24unsigned long oprofile_cpu_buffer_size;
23unsigned long oprofile_buffer_watershed; 25unsigned long oprofile_buffer_watershed;
26unsigned long oprofile_time_slice;
27
28#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
29
30static ssize_t timeout_read(struct file *file, char __user *buf,
31 size_t count, loff_t *offset)
32{
33 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
34 buf, count, offset);
35}
36
37
38static ssize_t timeout_write(struct file *file, char const __user *buf,
39 size_t count, loff_t *offset)
40{
41 unsigned long val;
42 int retval;
43
44 if (*offset)
45 return -EINVAL;
46
47 retval = oprofilefs_ulong_from_user(&val, buf, count);
48 if (retval)
49 return retval;
50
51 retval = oprofile_set_timeout(val);
52
53 if (retval)
54 return retval;
55 return count;
56}
57
58
59static const struct file_operations timeout_fops = {
60 .read = timeout_read,
61 .write = timeout_write,
62};
63
64#endif
65
24 66
25static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 67static ssize_t depth_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
26{ 68{
@@ -129,6 +171,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
129 oprofile_buffer_size = BUFFER_SIZE_DEFAULT; 171 oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
130 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT; 172 oprofile_cpu_buffer_size = CPU_BUFFER_SIZE_DEFAULT;
131 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT; 173 oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
174 oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
132 175
133 oprofilefs_create_file(sb, root, "enable", &enable_fops); 176 oprofilefs_create_file(sb, root, "enable", &enable_fops);
134 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666); 177 oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
@@ -139,6 +182,9 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
139 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops); 182 oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
140 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops); 183 oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
141 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops); 184 oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
185#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
186 oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
187#endif
142 oprofile_create_stats_files(sb, root); 188 oprofile_create_stats_files(sb, root);
143 if (oprofile_ops.create_files) 189 if (oprofile_ops.create_files)
144 oprofile_ops.create_files(sb, root); 190 oprofile_ops.create_files(sb, root);
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 3c2270a8300c..61689e814d46 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -34,6 +34,7 @@ void oprofile_reset_stats(void)
34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); 34 atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35 atomic_set(&oprofile_stats.event_lost_overflow, 0); 35 atomic_set(&oprofile_stats.event_lost_overflow, 0);
36 atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); 36 atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
37 atomic_set(&oprofile_stats.multiplex_counter, 0);
37} 38}
38 39
39 40
@@ -76,4 +77,8 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
76 &oprofile_stats.event_lost_overflow); 77 &oprofile_stats.event_lost_overflow);
77 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping", 78 oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
78 &oprofile_stats.bt_lost_no_mapping); 79 &oprofile_stats.bt_lost_no_mapping);
80#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
81 oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
82 &oprofile_stats.multiplex_counter);
83#endif
79} 84}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 3da0d08dc1f9..0b54e46c3c14 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -17,6 +17,7 @@ struct oprofile_stat_struct {
17 atomic_t sample_lost_no_mapping; 17 atomic_t sample_lost_no_mapping;
18 atomic_t bt_lost_no_mapping; 18 atomic_t bt_lost_no_mapping;
19 atomic_t event_lost_overflow; 19 atomic_t event_lost_overflow;
20 atomic_t multiplex_counter;
20}; 21};
21 22
22extern struct oprofile_stat_struct oprofile_stats; 23extern struct oprofile_stat_struct oprofile_stats;
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 4f5b8712931f..44803644ca05 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -55,15 +55,12 @@ static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
55 return desc->irq_2_iommu; 55 return desc->irq_2_iommu;
56} 56}
57 57
58static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node) 58static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
59{ 59{
60 struct irq_desc *desc; 60 struct irq_desc *desc;
61 struct irq_2_iommu *irq_iommu; 61 struct irq_2_iommu *irq_iommu;
62 62
63 /* 63 desc = irq_to_desc(irq);
64 * alloc irq desc if not allocated already.
65 */
66 desc = irq_to_desc_alloc_node(irq, node);
67 if (!desc) { 64 if (!desc) {
68 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 65 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
69 return NULL; 66 return NULL;
@@ -72,16 +69,11 @@ static struct irq_2_iommu *irq_2_iommu_alloc_node(unsigned int irq, int node)
72 irq_iommu = desc->irq_2_iommu; 69 irq_iommu = desc->irq_2_iommu;
73 70
74 if (!irq_iommu) 71 if (!irq_iommu)
75 desc->irq_2_iommu = get_one_free_irq_2_iommu(node); 72 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
76 73
77 return desc->irq_2_iommu; 74 return desc->irq_2_iommu;
78} 75}
79 76
80static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
81{
82 return irq_2_iommu_alloc_node(irq, cpu_to_node(boot_cpu_id));
83}
84
85#else /* !CONFIG_SPARSE_IRQ */ 77#else /* !CONFIG_SPARSE_IRQ */
86 78
87static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 79static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e3a87210e947..e03fe98f0619 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
598} 598}
599 599
600/** 600/**
601 * pci_sriov_resource_alignment - get resource alignment for VF BAR
602 * @dev: the PCI device
603 * @resno: the resource number
604 *
605 * Returns the alignment of the VF BAR found in the SR-IOV capability.
606 * This is not the same as the resource size which is defined as
607 * the VF BAR size multiplied by the number of VFs. The alignment
608 * is just the VF BAR size.
609 */
610int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
611{
612 struct resource tmp;
613 enum pci_bar_type type;
614 int reg = pci_iov_resource_bar(dev, resno, &type);
615
616 if (!reg)
617 return 0;
618
619 __pci_read_base(dev, type, &tmp, reg);
620 return resource_alignment(&tmp);
621}
622
623/**
601 * pci_restore_iov_state - restore the state of the IOV capability 624 * pci_restore_iov_state - restore the state of the IOV capability
602 * @dev: the PCI device 625 * @dev: the PCI device
603 */ 626 */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f73bcbedf37c..5ff4d25bf0e9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
243extern void pci_iov_release(struct pci_dev *dev); 243extern void pci_iov_release(struct pci_dev *dev);
244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
245 enum pci_bar_type *type); 245 enum pci_bar_type *type);
246extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
246extern void pci_restore_iov_state(struct pci_dev *dev); 247extern void pci_restore_iov_state(struct pci_dev *dev);
247extern int pci_iov_bus_range(struct pci_bus *bus); 248extern int pci_iov_bus_range(struct pci_bus *bus);
248 249
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
298} 299}
299#endif /* CONFIG_PCI_IOV */ 300#endif /* CONFIG_PCI_IOV */
300 301
302static inline int pci_resource_alignment(struct pci_dev *dev,
303 struct resource *res)
304{
305#ifdef CONFIG_PCI_IOV
306 int resno = res - dev->resource;
307
308 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
309 return pci_sriov_resource_alignment(dev, resno);
310#endif
311 return resource_alignment(res);
312}
313
301#endif /* DRIVERS_PCI_H */ 314#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 06b965623962..85ce23997be4 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -992,7 +992,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
992 992
993static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) 993static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
994{ 994{
995 /* set sb600/sb700/sb800 sata to ahci mode */ 995 /* set SBX00 SATA in IDE mode to AHCI mode */
996 u8 tmp; 996 u8 tmp;
997 997
998 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); 998 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
@@ -1011,6 +1011,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); 1011DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1012DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1013DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); 1013DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1014DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
1015DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
1014 1016
1015/* 1017/*
1016 * Serverworks CSB5 IDE does not fully support native mode 1018 * Serverworks CSB5 IDE does not fully support native mode
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index b636e245445d..7c443b4583ab 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,7 +25,7 @@
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28#include "pci.h"
29 29
30static void pbus_assign_resources_sorted(const struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
384 continue; 384 continue;
385 r_size = resource_size(r); 385 r_size = resource_size(r);
386 /* For bridges size != alignment */ 386 /* For bridges size != alignment */
387 align = resource_alignment(r); 387 align = pci_resource_alignment(dev, r);
388 order = __ffs(align) - 20; 388 order = __ffs(align) - 20;
389 if (order > 11) { 389 if (order > 11) {
390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1898c7b47907..88cdd1a937d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
144 144
145 size = resource_size(res); 145 size = resource_size(res);
146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
147 align = resource_alignment(res); 147 align = pci_resource_alignment(dev, res);
148 148
149 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
150 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
178 struct pci_bus *bus; 178 struct pci_bus *bus;
179 int ret; 179 int ret;
180 180
181 align = resource_alignment(res); 181 align = pci_resource_alignment(dev, res);
182 if (!align) { 182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " 183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n", 184 "alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
259 if (!(r->flags) || r->parent) 259 if (!(r->flags) || r->parent)
260 continue; 260 continue;
261 261
262 r_align = resource_alignment(r); 262 r_align = pci_resource_alignment(dev, r);
263 if (!r_align) { 263 if (!r_align) {
264 dev_warn(&dev->dev, "BAR %d: bogus alignment " 264 dev_warn(&dev->dev, "BAR %d: bogus alignment "
265 "%pR flags %#lx\n", 265 "%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
271 struct resource_list *ln = list->next; 271 struct resource_list *ln = list->next;
272 272
273 if (ln) 273 if (ln)
274 align = resource_alignment(ln->res); 274 align = pci_resource_alignment(ln->dev, ln->res);
275 275
276 if (r_align > align) { 276 if (r_align > align) {
277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 3f62dd50bbbe..e109da4583a8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -669,14 +669,14 @@ static void dasd_profile_end(struct dasd_block *block,
669 * memory and 2) dasd_smalloc_request uses the static ccw memory 669 * memory and 2) dasd_smalloc_request uses the static ccw memory
670 * that gets allocated for each device. 670 * that gets allocated for each device.
671 */ 671 */
672struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 672struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
673 int datasize, 673 int datasize,
674 struct dasd_device *device) 674 struct dasd_device *device)
675{ 675{
676 struct dasd_ccw_req *cqr; 676 struct dasd_ccw_req *cqr;
677 677
678 /* Sanity checks */ 678 /* Sanity checks */
679 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 679 BUG_ON(datasize > PAGE_SIZE ||
680 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 680 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
681 681
682 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 682 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
@@ -700,14 +700,13 @@ struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
700 return ERR_PTR(-ENOMEM); 700 return ERR_PTR(-ENOMEM);
701 } 701 }
702 } 702 }
703 strncpy((char *) &cqr->magic, magic, 4); 703 cqr->magic = magic;
704 ASCEBC((char *) &cqr->magic, 4);
705 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 704 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
706 dasd_get_device(device); 705 dasd_get_device(device);
707 return cqr; 706 return cqr;
708} 707}
709 708
710struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 709struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
711 int datasize, 710 int datasize,
712 struct dasd_device *device) 711 struct dasd_device *device)
713{ 712{
@@ -717,7 +716,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
717 int size; 716 int size;
718 717
719 /* Sanity checks */ 718 /* Sanity checks */
720 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 719 BUG_ON(datasize > PAGE_SIZE ||
721 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 720 (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
722 721
723 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 722 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
@@ -744,8 +743,7 @@ struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
744 cqr->data = data; 743 cqr->data = data;
745 memset(cqr->data, 0, datasize); 744 memset(cqr->data, 0, datasize);
746 } 745 }
747 strncpy((char *) &cqr->magic, magic, 4); 746 cqr->magic = magic;
748 ASCEBC((char *) &cqr->magic, 4);
749 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 747 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
750 dasd_get_device(device); 748 dasd_get_device(device);
751 return cqr; 749 return cqr;
@@ -899,9 +897,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
899 switch (rc) { 897 switch (rc) {
900 case 0: 898 case 0:
901 cqr->status = DASD_CQR_IN_IO; 899 cqr->status = DASD_CQR_IN_IO;
902 DBF_DEV_EVENT(DBF_DEBUG, device,
903 "start_IO: request %p started successful",
904 cqr);
905 break; 900 break;
906 case -EBUSY: 901 case -EBUSY:
907 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 902 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
@@ -1699,8 +1694,11 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1699 * for that. State DASD_STATE_ONLINE is normal block device 1694 * for that. State DASD_STATE_ONLINE is normal block device
1700 * operation. 1695 * operation.
1701 */ 1696 */
1702 if (basedev->state < DASD_STATE_READY) 1697 if (basedev->state < DASD_STATE_READY) {
1698 while ((req = blk_fetch_request(block->request_queue)))
1699 __blk_end_request_all(req, -EIO);
1703 return; 1700 return;
1701 }
1704 /* Now we try to fetch requests from the request queue */ 1702 /* Now we try to fetch requests from the request queue */
1705 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1703 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1706 if (basedev->features & DASD_FEATURE_READONLY && 1704 if (basedev->features & DASD_FEATURE_READONLY &&
@@ -2530,7 +2528,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2530static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2528static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2531 void *rdc_buffer, 2529 void *rdc_buffer,
2532 int rdc_buffer_size, 2530 int rdc_buffer_size,
2533 char *magic) 2531 int magic)
2534{ 2532{
2535 struct dasd_ccw_req *cqr; 2533 struct dasd_ccw_req *cqr;
2536 struct ccw1 *ccw; 2534 struct ccw1 *ccw;
@@ -2561,7 +2559,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2561} 2559}
2562 2560
2563 2561
2564int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2562int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2565 void *rdc_buffer, int rdc_buffer_size) 2563 void *rdc_buffer, int rdc_buffer_size)
2566{ 2564{
2567 int ret; 2565 int ret;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 27991b692056..e8ff7b0c961d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,7 +7,7 @@
7 * 7 *
8 */ 8 */
9 9
10#define KMSG_COMPONENT "dasd" 10#define KMSG_COMPONENT "dasd-eckd"
11 11
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5b7bbc87593b..70a008c00522 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -5,7 +5,7 @@
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */ 6 */
7 7
8#define KMSG_COMPONENT "dasd" 8#define KMSG_COMPONENT "dasd-eckd"
9 9
10#include <linux/list.h> 10#include <linux/list.h>
11#include <asm/ebcdic.h> 11#include <asm/ebcdic.h>
@@ -379,8 +379,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
379 int rc; 379 int rc;
380 unsigned long flags; 380 unsigned long flags;
381 381
382 cqr = dasd_kmalloc_request("ECKD", 382 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
383 1 /* PSF */ + 1 /* RSSD */ ,
384 (sizeof(struct dasd_psf_prssd_data)), 383 (sizeof(struct dasd_psf_prssd_data)),
385 device); 384 device);
386 if (IS_ERR(cqr)) 385 if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 644086ba2ede..4e49b4a6c880 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,7 +8,7 @@
8 * 8 *
9 */ 9 */
10 10
11#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd-diag"
12 12
13#include <linux/stddef.h> 13#include <linux/stddef.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -523,8 +523,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
523 /* Build the request */ 523 /* Build the request */
524 datasize = sizeof(struct dasd_diag_req) + 524 datasize = sizeof(struct dasd_diag_req) +
525 count*sizeof(struct dasd_diag_bio); 525 count*sizeof(struct dasd_diag_bio);
526 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, 526 cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
527 datasize, memdev);
528 if (IS_ERR(cqr)) 527 if (IS_ERR(cqr))
529 return cqr; 528 return cqr;
530 529
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c11770f5b368..a1ce573648a2 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,7 +10,7 @@
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11 */ 11 */
12 12
13#define KMSG_COMPONENT "dasd" 13#define KMSG_COMPONENT "dasd-eckd"
14 14
15#include <linux/stddef.h> 15#include <linux/stddef.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -730,7 +730,8 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
730 struct dasd_ccw_req *cqr; 730 struct dasd_ccw_req *cqr;
731 struct ccw1 *ccw; 731 struct ccw1 *ccw;
732 732
733 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 733 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
734 device);
734 735
735 if (IS_ERR(cqr)) { 736 if (IS_ERR(cqr)) {
736 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 737 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -934,8 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
934 struct dasd_eckd_private *private; 935 struct dasd_eckd_private *private;
935 936
936 private = (struct dasd_eckd_private *) device->private; 937 private = (struct dasd_eckd_private *) device->private;
937 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 938 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
938 1 /* PSF */ + 1 /* RSSD */ ,
939 (sizeof(struct dasd_psf_prssd_data) + 939 (sizeof(struct dasd_psf_prssd_data) +
940 sizeof(struct dasd_rssd_features)), 940 sizeof(struct dasd_rssd_features)),
941 device); 941 device);
@@ -998,7 +998,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
998 struct dasd_psf_ssc_data *psf_ssc_data; 998 struct dasd_psf_ssc_data *psf_ssc_data;
999 struct ccw1 *ccw; 999 struct ccw1 *ccw;
1000 1000
1001 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 1001 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1002 sizeof(struct dasd_psf_ssc_data), 1002 sizeof(struct dasd_psf_ssc_data),
1003 device); 1003 device);
1004 1004
@@ -1149,8 +1149,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1149 goto out_err3; 1149 goto out_err3;
1150 1150
1151 /* Read Device Characteristics */ 1151 /* Read Device Characteristics */
1152 rc = dasd_generic_read_dev_chars(device, "ECKD", &private->rdc_data, 1152 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1153 64); 1153 &private->rdc_data, 64);
1154 if (rc) { 1154 if (rc) {
1155 DBF_EVENT(DBF_WARNING, 1155 DBF_EVENT(DBF_WARNING,
1156 "Read device characteristics failed, rc=%d for " 1156 "Read device characteristics failed, rc=%d for "
@@ -1217,8 +1217,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1217 1217
1218 cplength = 8; 1218 cplength = 8;
1219 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1219 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1220 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1220 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1221 cplength, datasize, device);
1222 if (IS_ERR(cqr)) 1221 if (IS_ERR(cqr))
1223 return cqr; 1222 return cqr;
1224 ccw = cqr->cpaddr; 1223 ccw = cqr->cpaddr;
@@ -1499,8 +1498,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1499 return ERR_PTR(-EINVAL); 1498 return ERR_PTR(-EINVAL);
1500 } 1499 }
1501 /* Allocate the format ccw request. */ 1500 /* Allocate the format ccw request. */
1502 fcp = dasd_smalloc_request(dasd_eckd_discipline.name, 1501 fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1503 cplength, datasize, device);
1504 if (IS_ERR(fcp)) 1502 if (IS_ERR(fcp))
1505 return fcp; 1503 return fcp;
1506 1504
@@ -1783,8 +1781,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1783 datasize += count*sizeof(struct LO_eckd_data); 1781 datasize += count*sizeof(struct LO_eckd_data);
1784 } 1782 }
1785 /* Allocate the ccw request. */ 1783 /* Allocate the ccw request. */
1786 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1784 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1787 cplength, datasize, startdev); 1785 startdev);
1788 if (IS_ERR(cqr)) 1786 if (IS_ERR(cqr))
1789 return cqr; 1787 return cqr;
1790 ccw = cqr->cpaddr; 1788 ccw = cqr->cpaddr;
@@ -1948,8 +1946,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1948 cidaw * sizeof(unsigned long long); 1946 cidaw * sizeof(unsigned long long);
1949 1947
1950 /* Allocate the ccw request. */ 1948 /* Allocate the ccw request. */
1951 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1949 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1952 cplength, datasize, startdev); 1950 startdev);
1953 if (IS_ERR(cqr)) 1951 if (IS_ERR(cqr))
1954 return cqr; 1952 return cqr;
1955 ccw = cqr->cpaddr; 1953 ccw = cqr->cpaddr;
@@ -2249,8 +2247,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2249 2247
2250 /* Allocate the ccw request. */ 2248 /* Allocate the ccw request. */
2251 itcw_size = itcw_calc_size(0, ctidaw, 0); 2249 itcw_size = itcw_calc_size(0, ctidaw, 0);
2252 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2250 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2253 0, itcw_size, startdev);
2254 if (IS_ERR(cqr)) 2251 if (IS_ERR(cqr))
2255 return cqr; 2252 return cqr;
2256 2253
@@ -2557,8 +2554,7 @@ dasd_eckd_release(struct dasd_device *device)
2557 if (!capable(CAP_SYS_ADMIN)) 2554 if (!capable(CAP_SYS_ADMIN))
2558 return -EACCES; 2555 return -EACCES;
2559 2556
2560 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2557 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2561 1, 32, device);
2562 if (IS_ERR(cqr)) { 2558 if (IS_ERR(cqr)) {
2563 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2559 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2564 "Could not allocate initialization request"); 2560 "Could not allocate initialization request");
@@ -2600,8 +2596,7 @@ dasd_eckd_reserve(struct dasd_device *device)
2600 if (!capable(CAP_SYS_ADMIN)) 2596 if (!capable(CAP_SYS_ADMIN))
2601 return -EACCES; 2597 return -EACCES;
2602 2598
2603 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2599 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2604 1, 32, device);
2605 if (IS_ERR(cqr)) { 2600 if (IS_ERR(cqr)) {
2606 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2601 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2607 "Could not allocate initialization request"); 2602 "Could not allocate initialization request");
@@ -2642,8 +2637,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
2642 if (!capable(CAP_SYS_ADMIN)) 2637 if (!capable(CAP_SYS_ADMIN))
2643 return -EACCES; 2638 return -EACCES;
2644 2639
2645 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2640 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2646 1, 32, device);
2647 if (IS_ERR(cqr)) { 2641 if (IS_ERR(cqr)) {
2648 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2642 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2649 "Could not allocate initialization request"); 2643 "Could not allocate initialization request");
@@ -2681,8 +2675,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2681 struct ccw1 *ccw; 2675 struct ccw1 *ccw;
2682 int rc; 2676 int rc;
2683 2677
2684 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2678 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
2685 1 /* PSF */ + 1 /* RSSD */ ,
2686 (sizeof(struct dasd_psf_prssd_data) + 2679 (sizeof(struct dasd_psf_prssd_data) +
2687 sizeof(struct dasd_rssd_perf_stats_t)), 2680 sizeof(struct dasd_rssd_perf_stats_t)),
2688 device); 2681 device);
@@ -2828,7 +2821,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2828 } 2821 }
2829 2822
2830 /* setup CCWs for PSF + RSSD */ 2823 /* setup CCWs for PSF + RSSD */
2831 cqr = dasd_smalloc_request("ECKD", 2 , 0, device); 2824 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2832 if (IS_ERR(cqr)) { 2825 if (IS_ERR(cqr)) {
2833 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2826 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2834 "Could not allocate initialization request"); 2827 "Could not allocate initialization request");
@@ -3254,7 +3247,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3254 3247
3255 /* Read Device Characteristics */ 3248 /* Read Device Characteristics */
3256 memset(&private->rdc_data, 0, sizeof(private->rdc_data)); 3249 memset(&private->rdc_data, 0, sizeof(private->rdc_data));
3257 rc = dasd_generic_read_dev_chars(device, "ECKD", 3250 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3258 &private->rdc_data, 64); 3251 &private->rdc_data, 64);
3259 if (rc) { 3252 if (rc) {
3260 DBF_EVENT(DBF_WARNING, 3253 DBF_EVENT(DBF_WARNING,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c24c8c30380d..d96039eae59b 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -6,7 +6,7 @@
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */ 7 */
8 8
9#define KMSG_COMPONENT "dasd" 9#define KMSG_COMPONENT "dasd-eckd"
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
@@ -464,7 +464,7 @@ int dasd_eer_enable(struct dasd_device *device)
464 if (!device->discipline || strcmp(device->discipline->name, "ECKD")) 464 if (!device->discipline || strcmp(device->discipline->name, "ECKD"))
465 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ 465 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */
466 466
467 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, 467 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
468 SNSS_DATA_SIZE, device); 468 SNSS_DATA_SIZE, device);
469 if (IS_ERR(cqr)) 469 if (IS_ERR(cqr))
470 return -ENOMEM; 470 return -ENOMEM;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index cb8f9cef7429..7656384a811d 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -99,8 +99,8 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
99 cqr->lpm = LPM_ANYPATH; 99 cqr->lpm = LPM_ANYPATH;
100 cqr->status = DASD_CQR_FILLED; 100 cqr->status = DASD_CQR_FILLED;
101 } else { 101 } else {
102 dev_err(&device->cdev->dev, 102 pr_err("%s: default ERP has run out of retries and failed\n",
103 "default ERP has run out of retries and failed\n"); 103 dev_name(&device->cdev->dev));
104 cqr->status = DASD_CQR_FAILED; 104 cqr->status = DASD_CQR_FAILED;
105 cqr->stopclk = get_clock(); 105 cqr->stopclk = get_clock();
106 } 106 }
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 31849ad5e59f..f245377e8e27 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -5,7 +5,7 @@
5 * Copyright IBM Corp. 1999, 2009 5 * Copyright IBM Corp. 1999, 2009
6 */ 6 */
7 7
8#define KMSG_COMPONENT "dasd" 8#define KMSG_COMPONENT "dasd-fba"
9 9
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
@@ -152,8 +152,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
152 block->base = device; 152 block->base = device;
153 153
154 /* Read Device Characteristics */ 154 /* Read Device Characteristics */
155 rc = dasd_generic_read_dev_chars(device, "FBA ", &private->rdc_data, 155 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
156 32); 156 &private->rdc_data, 32);
157 if (rc) { 157 if (rc) {
158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
159 "error %d for device: %s", 159 "error %d for device: %s",
@@ -305,8 +305,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
305 datasize += (count - 1)*sizeof(struct LO_fba_data); 305 datasize += (count - 1)*sizeof(struct LO_fba_data);
306 } 306 }
307 /* Allocate the ccw request. */ 307 /* Allocate the ccw request. */
308 cqr = dasd_smalloc_request(dasd_fba_discipline.name, 308 cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
309 cplength, datasize, memdev);
310 if (IS_ERR(cqr)) 309 if (IS_ERR(cqr))
311 return cqr; 310 return cqr;
312 ccw = cqr->cpaddr; 311 ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index b699ca356ac5..5e47a1ee52b9 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -59,6 +59,11 @@
59#include <asm/dasd.h> 59#include <asm/dasd.h>
60#include <asm/idals.h> 60#include <asm/idals.h>
61 61
62/* DASD discipline magic */
63#define DASD_ECKD_MAGIC 0xC5C3D2C4
64#define DASD_DIAG_MAGIC 0xC4C9C1C7
65#define DASD_FBA_MAGIC 0xC6C2C140
66
62/* 67/*
63 * SECTION: Type definitions 68 * SECTION: Type definitions
64 */ 69 */
@@ -540,9 +545,9 @@ extern struct block_device_operations dasd_device_operations;
540extern struct kmem_cache *dasd_page_cache; 545extern struct kmem_cache *dasd_page_cache;
541 546
542struct dasd_ccw_req * 547struct dasd_ccw_req *
543dasd_kmalloc_request(char *, int, int, struct dasd_device *); 548dasd_kmalloc_request(int , int, int, struct dasd_device *);
544struct dasd_ccw_req * 549struct dasd_ccw_req *
545dasd_smalloc_request(char *, int, int, struct dasd_device *); 550dasd_smalloc_request(int , int, int, struct dasd_device *);
546void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); 551void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
547void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 552void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
548 553
@@ -587,7 +592,7 @@ void dasd_generic_handle_state_change(struct dasd_device *);
587int dasd_generic_pm_freeze(struct ccw_device *); 592int dasd_generic_pm_freeze(struct ccw_device *);
588int dasd_generic_restore_device(struct ccw_device *); 593int dasd_generic_restore_device(struct ccw_device *);
589 594
590int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); 595int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
591char *dasd_get_sense(struct irb *); 596char *dasd_get_sense(struct irb *);
592 597
593/* externals in dasd_devmap.c */ 598/* externals in dasd_devmap.c */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index df918ef27965..f756a1b0c57a 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -98,8 +98,8 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
98 if (!capable (CAP_SYS_ADMIN)) 98 if (!capable (CAP_SYS_ADMIN))
99 return -EACCES; 99 return -EACCES;
100 100
101 dev_info(&base->cdev->dev, "The DASD has been put in the quiesce " 101 pr_info("%s: The DASD has been put in the quiesce "
102 "state\n"); 102 "state\n", dev_name(&base->cdev->dev));
103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
104 base->stopped |= DASD_STOPPED_QUIESCE; 104 base->stopped |= DASD_STOPPED_QUIESCE;
105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -119,8 +119,8 @@ static int dasd_ioctl_resume(struct dasd_block *block)
119 if (!capable (CAP_SYS_ADMIN)) 119 if (!capable (CAP_SYS_ADMIN))
120 return -EACCES; 120 return -EACCES;
121 121
122 dev_info(&base->cdev->dev, "I/O operations have been resumed " 122 pr_info("%s: I/O operations have been resumed "
123 "on the DASD\n"); 123 "on the DASD\n", dev_name(&base->cdev->dev));
124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
125 base->stopped &= ~DASD_STOPPED_QUIESCE; 125 base->stopped &= ~DASD_STOPPED_QUIESCE;
126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
@@ -146,8 +146,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
146 return -EPERM; 146 return -EPERM;
147 147
148 if (base->state != DASD_STATE_BASIC) { 148 if (base->state != DASD_STATE_BASIC) {
149 dev_warn(&base->cdev->dev, 149 pr_warning("%s: The DASD cannot be formatted while it is "
150 "The DASD cannot be formatted while it is enabled\n"); 150 "enabled\n", dev_name(&base->cdev->dev));
151 return -EBUSY; 151 return -EBUSY;
152 } 152 }
153 153
@@ -175,9 +175,9 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
175 dasd_sfree_request(cqr, cqr->memdev); 175 dasd_sfree_request(cqr, cqr->memdev);
176 if (rc) { 176 if (rc) {
177 if (rc != -ERESTARTSYS) 177 if (rc != -ERESTARTSYS)
178 dev_err(&base->cdev->dev, 178 pr_err("%s: Formatting unit %d failed with "
179 "Formatting unit %d failed with " 179 "rc=%d\n", dev_name(&base->cdev->dev),
180 "rc=%d\n", fdata->start_unit, rc); 180 fdata->start_unit, rc);
181 return rc; 181 return rc;
182 } 182 }
183 fdata->start_unit++; 183 fdata->start_unit++;
@@ -204,9 +204,9 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
204 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 204 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
205 return -EFAULT; 205 return -EFAULT;
206 if (bdev != bdev->bd_contains) { 206 if (bdev != bdev->bd_contains) {
207 dev_warn(&block->base->cdev->dev, 207 pr_warning("%s: The specified DASD is a partition and cannot "
208 "The specified DASD is a partition and cannot be " 208 "be formatted\n",
209 "formatted\n"); 209 dev_name(&block->base->cdev->dev));
210 return -EINVAL; 210 return -EINVAL;
211 } 211 }
212 return dasd_format(block, &fdata); 212 return dasd_format(block, &fdata);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index db442cd6621e..ee604e92a5fa 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -42,7 +42,6 @@
42#include <linux/suspend.h> 42#include <linux/suspend.h>
43#include <linux/platform_device.h> 43#include <linux/platform_device.h>
44#include <asm/uaccess.h> 44#include <asm/uaccess.h>
45#include <asm/checksum.h>
46 45
47#define XPRAM_NAME "xpram" 46#define XPRAM_NAME "xpram"
48#define XPRAM_DEVS 1 /* one partition */ 47#define XPRAM_DEVS 1 /* one partition */
@@ -51,7 +50,6 @@
51typedef struct { 50typedef struct {
52 unsigned int size; /* size of xpram segment in pages */ 51 unsigned int size; /* size of xpram segment in pages */
53 unsigned int offset; /* start page of xpram segment */ 52 unsigned int offset; /* start page of xpram segment */
54 unsigned int csum; /* partition checksum for suspend */
55} xpram_device_t; 53} xpram_device_t;
56 54
57static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; 55static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
@@ -387,58 +385,6 @@ out:
387} 385}
388 386
389/* 387/*
390 * Save checksums for all partitions.
391 */
392static int xpram_save_checksums(void)
393{
394 unsigned long mem_page;
395 int rc, i;
396
397 rc = 0;
398 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
399 if (!mem_page)
400 return -ENOMEM;
401 for (i = 0; i < xpram_devs; i++) {
402 rc = xpram_page_in(mem_page, xpram_devices[i].offset);
403 if (rc)
404 goto fail;
405 xpram_devices[i].csum = csum_partial((const void *) mem_page,
406 PAGE_SIZE, 0);
407 }
408fail:
409 free_page(mem_page);
410 return rc ? -ENXIO : 0;
411}
412
413/*
414 * Verify checksums for all partitions.
415 */
416static int xpram_validate_checksums(void)
417{
418 unsigned long mem_page;
419 unsigned int csum;
420 int rc, i;
421
422 rc = 0;
423 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
424 if (!mem_page)
425 return -ENOMEM;
426 for (i = 0; i < xpram_devs; i++) {
427 rc = xpram_page_in(mem_page, xpram_devices[i].offset);
428 if (rc)
429 goto fail;
430 csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0);
431 if (xpram_devices[i].csum != csum) {
432 rc = -EINVAL;
433 goto fail;
434 }
435 }
436fail:
437 free_page(mem_page);
438 return rc ? -ENXIO : 0;
439}
440
441/*
442 * Resume failed: Print error message and call panic. 388 * Resume failed: Print error message and call panic.
443 */ 389 */
444static void xpram_resume_error(const char *message) 390static void xpram_resume_error(const char *message)
@@ -458,21 +404,10 @@ static int xpram_restore(struct device *dev)
458 xpram_resume_error("xpram disappeared"); 404 xpram_resume_error("xpram disappeared");
459 if (xpram_pages != xpram_highest_page_index() + 1) 405 if (xpram_pages != xpram_highest_page_index() + 1)
460 xpram_resume_error("Size of xpram changed"); 406 xpram_resume_error("Size of xpram changed");
461 if (xpram_validate_checksums())
462 xpram_resume_error("Data of xpram changed");
463 return 0; 407 return 0;
464} 408}
465 409
466/*
467 * Save necessary state in suspend.
468 */
469static int xpram_freeze(struct device *dev)
470{
471 return xpram_save_checksums();
472}
473
474static struct dev_pm_ops xpram_pm_ops = { 410static struct dev_pm_ops xpram_pm_ops = {
475 .freeze = xpram_freeze,
476 .restore = xpram_restore, 411 .restore = xpram_restore,
477}; 412};
478 413
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 0769ced52dbd..4e34d3686c23 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -82,6 +82,16 @@ config SCLP_CPI
82 You should only select this option if you know what you are doing, 82 You should only select this option if you know what you are doing,
83 need this feature and intend to run your kernel in LPAR. 83 need this feature and intend to run your kernel in LPAR.
84 84
85config SCLP_ASYNC
86 tristate "Support for Call Home via Asynchronous SCLP Records"
87 depends on S390
88 help
89 This option enables the call home function, which is able to inform
90 the service element and connected organisations about a kernel panic.
91 You should only select this option if you know what you are doing,
92 want for inform other people about your kernel panics,
93 need this feature and intend to run your kernel in LPAR.
94
85config S390_TAPE 95config S390_TAPE
86 tristate "S/390 tape device support" 96 tristate "S/390 tape device support"
87 depends on CCW 97 depends on CCW
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 7e73e39a1741..efb500ab66c0 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o 18obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
19obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
19 20
20obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o 21obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
21obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o 22obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 3234e90bd7f9..89ece1c235aa 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -581,7 +581,7 @@ static int __init mon_init(void)
581 monreader_device->release = (void (*)(struct device *))kfree; 581 monreader_device->release = (void (*)(struct device *))kfree;
582 rc = device_register(monreader_device); 582 rc = device_register(monreader_device);
583 if (rc) { 583 if (rc) {
584 kfree(monreader_device); 584 put_device(monreader_device);
585 goto out_driver; 585 goto out_driver;
586 } 586 }
587 587
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 60e7cb07095b..6bb5a6bdfab5 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -27,6 +27,7 @@
27#define EVTYP_VT220MSG 0x1A 27#define EVTYP_VT220MSG 0x1A
28#define EVTYP_CONFMGMDATA 0x04 28#define EVTYP_CONFMGMDATA 0x04
29#define EVTYP_SDIAS 0x1C 29#define EVTYP_SDIAS 0x1C
30#define EVTYP_ASYNC 0x0A
30 31
31#define EVTYP_OPCMD_MASK 0x80000000 32#define EVTYP_OPCMD_MASK 0x80000000
32#define EVTYP_MSG_MASK 0x40000000 33#define EVTYP_MSG_MASK 0x40000000
@@ -38,6 +39,7 @@
38#define EVTYP_VT220MSG_MASK 0x00000040 39#define EVTYP_VT220MSG_MASK 0x00000040
39#define EVTYP_CONFMGMDATA_MASK 0x10000000 40#define EVTYP_CONFMGMDATA_MASK 0x10000000
40#define EVTYP_SDIAS_MASK 0x00000010 41#define EVTYP_SDIAS_MASK 0x00000010
42#define EVTYP_ASYNC_MASK 0x00400000
41 43
42#define GNRLMSGFLGS_DOM 0x8000 44#define GNRLMSGFLGS_DOM 0x8000
43#define GNRLMSGFLGS_SNDALRM 0x4000 45#define GNRLMSGFLGS_SNDALRM 0x4000
@@ -85,12 +87,12 @@ struct sccb_header {
85} __attribute__((packed)); 87} __attribute__((packed));
86 88
87extern u64 sclp_facilities; 89extern u64 sclp_facilities;
88
89#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 90#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
90#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 91#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
91#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) 92#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
92#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) 93#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
93 94
95
94struct gds_subvector { 96struct gds_subvector {
95 u8 length; 97 u8 length;
96 u8 key; 98 u8 key;
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
new file mode 100644
index 000000000000..daaec185ed36
--- /dev/null
+++ b/drivers/s390/char/sclp_async.c
@@ -0,0 +1,224 @@
1/*
2 * Enable Asynchronous Notification via SCLP.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
6 *
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/stat.h>
13#include <linux/string.h>
14#include <linux/ctype.h>
15#include <linux/kmod.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/proc_fs.h>
19#include <linux/sysctl.h>
20#include <linux/utsname.h>
21#include "sclp.h"
22
23static int callhome_enabled;
24static struct sclp_req *request;
25static struct sclp_async_sccb *sccb;
26static int sclp_async_send_wait(char *message);
27static struct ctl_table_header *callhome_sysctl_header;
28static DEFINE_SPINLOCK(sclp_async_lock);
29static char nodename[64];
30#define SCLP_NORMAL_WRITE 0x00
31
32struct async_evbuf {
33 struct evbuf_header header;
34 u64 reserved;
35 u8 rflags;
36 u8 empty;
37 u8 rtype;
38 u8 otype;
39 char comp_id[12];
40 char data[3000]; /* there is still some space left */
41} __attribute__((packed));
42
43struct sclp_async_sccb {
44 struct sccb_header header;
45 struct async_evbuf evbuf;
46} __attribute__((packed));
47
48static struct sclp_register sclp_async_register = {
49 .send_mask = EVTYP_ASYNC_MASK,
50};
51
52static int call_home_on_panic(struct notifier_block *self,
53 unsigned long event, void *data)
54{
55 strncat(data, nodename, strlen(nodename));
56 sclp_async_send_wait(data);
57 return NOTIFY_DONE;
58}
59
60static struct notifier_block call_home_panic_nb = {
61 .notifier_call = call_home_on_panic,
62 .priority = INT_MAX,
63};
64
65static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
66 void __user *buffer, size_t *count,
67 loff_t *ppos)
68{
69 unsigned long val;
70 int len, rc;
71 char buf[2];
72
73 if (!*count | (*ppos && !write)) {
74 *count = 0;
75 return 0;
76 }
77 if (!write) {
78 len = sprintf(buf, "%d\n", callhome_enabled);
79 buf[len] = '\0';
80 rc = copy_to_user(buffer, buf, sizeof(buf));
81 if (rc != 0)
82 return -EFAULT;
83 } else {
84 len = *count;
85 rc = copy_from_user(buf, buffer, sizeof(buf));
86 if (rc != 0)
87 return -EFAULT;
88 if (strict_strtoul(buf, 0, &val) != 0)
89 return -EINVAL;
90 if (val != 0 && val != 1)
91 return -EINVAL;
92 callhome_enabled = val;
93 }
94 *count = len;
95 *ppos += len;
96 return 0;
97}
98
99static struct ctl_table callhome_table[] = {
100 {
101 .procname = "callhome",
102 .mode = 0644,
103 .proc_handler = &proc_handler_callhome,
104 },
105 { .ctl_name = 0 }
106};
107
108static struct ctl_table kern_dir_table[] = {
109 {
110 .ctl_name = CTL_KERN,
111 .procname = "kernel",
112 .maxlen = 0,
113 .mode = 0555,
114 .child = callhome_table,
115 },
116 { .ctl_name = 0 }
117};
118
119/*
120 * Function used to transfer asynchronous notification
121 * records which waits for send completion
122 */
123static int sclp_async_send_wait(char *message)
124{
125 struct async_evbuf *evb;
126 int rc;
127 unsigned long flags;
128
129 if (!callhome_enabled)
130 return 0;
131 sccb->evbuf.header.type = EVTYP_ASYNC;
132 sccb->evbuf.rtype = 0xA5;
133 sccb->evbuf.otype = 0x00;
134 evb = &sccb->evbuf;
135 request->command = SCLP_CMDW_WRITE_EVENT_DATA;
136 request->sccb = sccb;
137 request->status = SCLP_REQ_FILLED;
138 strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
139 /*
140 * Retain Queue
141 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
142 */
143 strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
144 sccb->evbuf.header.length = sizeof(sccb->evbuf);
145 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
146 sccb->header.function_code = SCLP_NORMAL_WRITE;
147 rc = sclp_add_request(request);
148 if (rc)
149 return rc;
150 spin_lock_irqsave(&sclp_async_lock, flags);
151 while (request->status != SCLP_REQ_DONE &&
152 request->status != SCLP_REQ_FAILED) {
153 sclp_sync_wait();
154 }
155 spin_unlock_irqrestore(&sclp_async_lock, flags);
156 if (request->status != SCLP_REQ_DONE)
157 return -EIO;
158 rc = ((struct sclp_async_sccb *)
159 request->sccb)->header.response_code;
160 if (rc != 0x0020)
161 return -EIO;
162 if (evb->header.flags != 0x80)
163 return -EIO;
164 return rc;
165}
166
167static int __init sclp_async_init(void)
168{
169 int rc;
170
171 rc = sclp_register(&sclp_async_register);
172 if (rc)
173 return rc;
174 callhome_sysctl_header = register_sysctl_table(kern_dir_table);
175 if (!callhome_sysctl_header) {
176 rc = -ENOMEM;
177 goto out_sclp;
178 }
179 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
180 rc = -EOPNOTSUPP;
181 goto out_sclp;
182 }
183 rc = -ENOMEM;
184 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
185 if (!request)
186 goto out_sys;
187 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
188 if (!sccb)
189 goto out_mem;
190 rc = atomic_notifier_chain_register(&panic_notifier_list,
191 &call_home_panic_nb);
192 if (rc)
193 goto out_mem;
194
195 strncpy(nodename, init_utsname()->nodename, 64);
196 return 0;
197
198out_mem:
199 kfree(request);
200 free_page((unsigned long) sccb);
201out_sys:
202 unregister_sysctl_table(callhome_sysctl_header);
203out_sclp:
204 sclp_unregister(&sclp_async_register);
205 return rc;
206
207}
208module_init(sclp_async_init);
209
210static void __exit sclp_async_exit(void)
211{
212 atomic_notifier_chain_unregister(&panic_notifier_list,
213 &call_home_panic_nb);
214 unregister_sysctl_table(callhome_sysctl_header);
215 sclp_unregister(&sclp_async_register);
216 free_page((unsigned long) sccb);
217 kfree(request);
218}
219module_exit(sclp_async_exit);
220
221MODULE_AUTHOR("Copyright IBM Corp. 2009");
222MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
223MODULE_LICENSE("GPL");
224MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 5a519fac37b7..2fe45ff77b75 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -8,7 +8,7 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape" 11#define KMSG_COMPONENT "tape_34xx"
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 418f72dd39b4..e4cc3aae9162 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -8,7 +8,7 @@
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
9 */ 9 */
10 10
11#define KMSG_COMPONENT "tape" 11#define KMSG_COMPONENT "tape_3590"
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
@@ -39,8 +39,6 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
39 * - Read Alternate: implemented 39 * - Read Alternate: implemented
40 *******************************************************************/ 40 *******************************************************************/
41 41
42#define KMSG_COMPONENT "tape"
43
44static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { 42static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
45 [0x00] = "", 43 [0x00] = "",
46 [0x10] = "Lost Sense", 44 [0x10] = "Lost Sense",
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 47ff695255ea..4cb9e70507ab 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -302,8 +302,6 @@ tapeblock_revalidate_disk(struct gendisk *disk)
302 if (!device->blk_data.medium_changed) 302 if (!device->blk_data.medium_changed)
303 return 0; 303 return 0;
304 304
305 dev_info(&device->cdev->dev, "Determining the size of the recorded "
306 "area...\n");
307 rc = tape_mtop(device, MTFSFM, 1); 305 rc = tape_mtop(device, MTFSFM, 1);
308 if (rc) 306 if (rc)
309 return rc; 307 return rc;
@@ -312,6 +310,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
312 if (rc < 0) 310 if (rc < 0)
313 return rc; 311 return rc;
314 312
313 pr_info("%s: Determining the size of the recorded area...\n",
314 dev_name(&device->cdev->dev));
315 DBF_LH(3, "Image file ends at %d\n", rc); 315 DBF_LH(3, "Image file ends at %d\n", rc);
316 nr_of_blks = rc; 316 nr_of_blks = rc;
317 317
@@ -330,8 +330,8 @@ tapeblock_revalidate_disk(struct gendisk *disk)
330 device->bof = rc; 330 device->bof = rc;
331 nr_of_blks -= rc; 331 nr_of_blks -= rc;
332 332
333 dev_info(&device->cdev->dev, "The size of the recorded area is %i " 333 pr_info("%s: The size of the recorded area is %i blocks\n",
334 "blocks\n", nr_of_blks); 334 dev_name(&device->cdev->dev), nr_of_blks);
335 set_capacity(device->blk_data.disk, 335 set_capacity(device->blk_data.disk,
336 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512)); 336 nr_of_blks*(TAPEBLOCK_HSEC_SIZE/512));
337 337
@@ -366,8 +366,8 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
366 366
367 if (device->required_tapemarks) { 367 if (device->required_tapemarks) {
368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
369 dev_warn(&device->cdev->dev, "Opening the tape failed because" 369 pr_warning("%s: Opening the tape failed because of missing "
370 " of missing end-of-file marks\n"); 370 "end-of-file marks\n", dev_name(&device->cdev->dev));
371 rc = -EPERM; 371 rc = -EPERM;
372 goto put_device; 372 goto put_device;
373 } 373 }
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 1d420d947596..5cd31e071647 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -214,13 +214,15 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
214 switch(newstate){ 214 switch(newstate){
215 case MS_UNLOADED: 215 case MS_UNLOADED:
216 device->tape_generic_status |= GMT_DR_OPEN(~0); 216 device->tape_generic_status |= GMT_DR_OPEN(~0);
217 dev_info(&device->cdev->dev, "The tape cartridge has been " 217 if (device->medium_state == MS_LOADED)
218 "successfully unloaded\n"); 218 pr_info("%s: The tape cartridge has been successfully "
219 "unloaded\n", dev_name(&device->cdev->dev));
219 break; 220 break;
220 case MS_LOADED: 221 case MS_LOADED:
221 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 222 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
222 dev_info(&device->cdev->dev, "A tape cartridge has been " 223 if (device->medium_state == MS_UNLOADED)
223 "mounted\n"); 224 pr_info("%s: A tape cartridge has been mounted\n",
225 dev_name(&device->cdev->dev));
224 break; 226 break;
225 default: 227 default:
226 // print nothing 228 // print nothing
@@ -358,11 +360,11 @@ tape_generic_online(struct tape_device *device,
358 360
359out_char: 361out_char:
360 tapechar_cleanup_device(device); 362 tapechar_cleanup_device(device);
363out_minor:
364 tape_remove_minor(device);
361out_discipline: 365out_discipline:
362 device->discipline->cleanup_device(device); 366 device->discipline->cleanup_device(device);
363 device->discipline = NULL; 367 device->discipline = NULL;
364out_minor:
365 tape_remove_minor(device);
366out: 368out:
367 module_put(discipline->owner); 369 module_put(discipline->owner);
368 return rc; 370 return rc;
@@ -654,8 +656,8 @@ tape_generic_remove(struct ccw_device *cdev)
654 */ 656 */
655 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 657 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
656 device->cdev_id); 658 device->cdev_id);
657 dev_warn(&device->cdev->dev, "A tape unit was detached" 659 pr_warning("%s: A tape unit was detached while in "
658 " while in use\n"); 660 "use\n", dev_name(&device->cdev->dev));
659 tape_state_set(device, TS_NOT_OPER); 661 tape_state_set(device, TS_NOT_OPER);
660 __tape_discard_requests(device); 662 __tape_discard_requests(device);
661 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 663 spin_unlock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 1a9420ba518d..750354ad16e5 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -68,7 +68,7 @@ tape_std_assign(struct tape_device *device)
68 * to another host (actually this shouldn't happen but it does). 68 * to another host (actually this shouldn't happen but it does).
69 * So we set up a timeout for this call. 69 * So we set up a timeout for this call.
70 */ 70 */
71 init_timer(&timeout); 71 init_timer_on_stack(&timeout);
72 timeout.function = tape_std_assign_timeout; 72 timeout.function = tape_std_assign_timeout;
73 timeout.data = (unsigned long) request; 73 timeout.data = (unsigned long) request;
74 timeout.expires = jiffies + 2 * HZ; 74 timeout.expires = jiffies + 2 * HZ;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index c20a4fe6da51..d1a142fa3eb4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -765,8 +765,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
765 } else 765 } else
766 return -ENOMEM; 766 return -ENOMEM;
767 ret = device_register(dev); 767 ret = device_register(dev);
768 if (ret) 768 if (ret) {
769 put_device(dev);
769 return ret; 770 return ret;
771 }
770 772
771 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group); 773 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
772 if (ret) { 774 if (ret) {
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 31b902e94f7b..77571b68539a 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -1026,9 +1026,15 @@ static int __init ur_init(void)
1026 1026
1027 debug_set_level(vmur_dbf, 6); 1027 debug_set_level(vmur_dbf, 6);
1028 1028
1029 vmur_class = class_create(THIS_MODULE, "vmur");
1030 if (IS_ERR(vmur_class)) {
1031 rc = PTR_ERR(vmur_class);
1032 goto fail_free_dbf;
1033 }
1034
1029 rc = ccw_driver_register(&ur_driver); 1035 rc = ccw_driver_register(&ur_driver);
1030 if (rc) 1036 if (rc)
1031 goto fail_free_dbf; 1037 goto fail_class_destroy;
1032 1038
1033 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1039 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur");
1034 if (rc) { 1040 if (rc) {
@@ -1038,18 +1044,13 @@ static int __init ur_init(void)
1038 } 1044 }
1039 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1045 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0);
1040 1046
1041 vmur_class = class_create(THIS_MODULE, "vmur");
1042 if (IS_ERR(vmur_class)) {
1043 rc = PTR_ERR(vmur_class);
1044 goto fail_unregister_region;
1045 }
1046 pr_info("%s loaded.\n", ur_banner); 1047 pr_info("%s loaded.\n", ur_banner);
1047 return 0; 1048 return 0;
1048 1049
1049fail_unregister_region:
1050 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1051fail_unregister_driver: 1050fail_unregister_driver:
1052 ccw_driver_unregister(&ur_driver); 1051 ccw_driver_unregister(&ur_driver);
1052fail_class_destroy:
1053 class_destroy(vmur_class);
1053fail_free_dbf: 1054fail_free_dbf:
1054 debug_unregister(vmur_dbf); 1055 debug_unregister(vmur_dbf);
1055 return rc; 1056 return rc;
@@ -1057,9 +1058,9 @@ fail_free_dbf:
1057 1058
1058static void __exit ur_exit(void) 1059static void __exit ur_exit(void)
1059{ 1060{
1060 class_destroy(vmur_class);
1061 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1061 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
1062 ccw_driver_unregister(&ur_driver); 1062 ccw_driver_unregister(&ur_driver);
1063 class_destroy(vmur_class);
1063 debug_unregister(vmur_dbf); 1064 debug_unregister(vmur_dbf);
1064 pr_info("%s unloaded.\n", ur_banner); 1065 pr_info("%s unloaded.\n", ur_banner);
1065} 1066}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 1bbae433fbd8..c431198bdbc4 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -275,7 +275,7 @@ struct zcore_header {
275 u32 num_pages; 275 u32 num_pages;
276 u32 pad1; 276 u32 pad1;
277 u64 tod; 277 u64 tod;
278 cpuid_t cpu_id; 278 struct cpuid cpu_id;
279 u32 arch_id; 279 u32 arch_id;
280 u32 volnr; 280 u32 volnr;
281 u32 build_arch; 281 u32 build_arch;
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index adb3dd301528..fa4c9662f65e 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 3e5f304ad88f..40002830d48a 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -417,7 +417,8 @@ int chp_new(struct chp_id chpid)
417 if (ret) { 417 if (ret) {
418 CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n", 418 CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
419 chpid.cssid, chpid.id, ret); 419 chpid.cssid, chpid.id, ret);
420 goto out_free; 420 put_device(&chp->dev);
421 goto out;
421 } 422 }
422 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); 423 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
423 if (ret) { 424 if (ret) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 425e8f89a6c5..37aa611d4ac5 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -37,29 +37,6 @@ struct channel_path_desc {
37 37
38struct channel_path; 38struct channel_path;
39 39
40struct css_general_char {
41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
44 u32 aif : 1; /* bit 41 */
45 u32 : 3;
46 u32 mcss : 1; /* bit 45 */
47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
49 u32 ext_mb : 1; /* bit 48 */
50 u32 : 7;
51 u32 aif_tdd : 1; /* bit 56 */
52 u32 : 1;
53 u32 qebsm : 1; /* bit 58 */
54 u32 : 8;
55 u32 aif_osa : 1; /* bit 67 */
56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
61}__attribute__((packed));
62
63struct css_chsc_char { 40struct css_chsc_char {
64 u64 res; 41 u64 res;
65 u64 : 20; 42 u64 : 20;
@@ -72,7 +49,6 @@ struct css_chsc_char {
72 u32 : 19; 49 u32 : 19;
73}__attribute__((packed)); 50}__attribute__((packed));
74 51
75extern struct css_general_char css_general_characteristics;
76extern struct css_chsc_char css_chsc_characteristics; 52extern struct css_chsc_char css_chsc_characteristics;
77 53
78struct chsc_ssd_info { 54struct chsc_ssd_info {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 5ec7789bd9d8..138124fcfcad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -139,12 +139,11 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
139 __u8 lpm, /* logical path mask */ 139 __u8 lpm, /* logical path mask */
140 __u8 key) /* storage key */ 140 __u8 key) /* storage key */
141{ 141{
142 char dbf_txt[15];
143 int ccode; 142 int ccode;
144 union orb *orb; 143 union orb *orb;
145 144
146 CIO_TRACE_EVENT(4, "stIO"); 145 CIO_TRACE_EVENT(5, "stIO");
147 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 146 CIO_TRACE_EVENT(5, dev_name(&sch->dev));
148 147
149 orb = &to_io_private(sch)->orb; 148 orb = &to_io_private(sch)->orb;
150 memset(orb, 0, sizeof(union orb)); 149 memset(orb, 0, sizeof(union orb));
@@ -169,8 +168,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
169 ccode = ssch(sch->schid, orb); 168 ccode = ssch(sch->schid, orb);
170 169
171 /* process condition code */ 170 /* process condition code */
172 sprintf(dbf_txt, "ccode:%d", ccode); 171 CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
173 CIO_TRACE_EVENT(4, dbf_txt);
174 172
175 switch (ccode) { 173 switch (ccode) {
176 case 0: 174 case 0:
@@ -201,16 +199,14 @@ cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
201int 199int
202cio_resume (struct subchannel *sch) 200cio_resume (struct subchannel *sch)
203{ 201{
204 char dbf_txt[15];
205 int ccode; 202 int ccode;
206 203
207 CIO_TRACE_EVENT (4, "resIO"); 204 CIO_TRACE_EVENT(4, "resIO");
208 CIO_TRACE_EVENT(4, dev_name(&sch->dev)); 205 CIO_TRACE_EVENT(4, dev_name(&sch->dev));
209 206
210 ccode = rsch (sch->schid); 207 ccode = rsch (sch->schid);
211 208
212 sprintf (dbf_txt, "ccode:%d", ccode); 209 CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
213 CIO_TRACE_EVENT (4, dbf_txt);
214 210
215 switch (ccode) { 211 switch (ccode) {
216 case 0: 212 case 0:
@@ -235,13 +231,12 @@ cio_resume (struct subchannel *sch)
235int 231int
236cio_halt(struct subchannel *sch) 232cio_halt(struct subchannel *sch)
237{ 233{
238 char dbf_txt[15];
239 int ccode; 234 int ccode;
240 235
241 if (!sch) 236 if (!sch)
242 return -ENODEV; 237 return -ENODEV;
243 238
244 CIO_TRACE_EVENT (2, "haltIO"); 239 CIO_TRACE_EVENT(2, "haltIO");
245 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 240 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
246 241
247 /* 242 /*
@@ -249,8 +244,7 @@ cio_halt(struct subchannel *sch)
249 */ 244 */
250 ccode = hsch (sch->schid); 245 ccode = hsch (sch->schid);
251 246
252 sprintf (dbf_txt, "ccode:%d", ccode); 247 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
253 CIO_TRACE_EVENT (2, dbf_txt);
254 248
255 switch (ccode) { 249 switch (ccode) {
256 case 0: 250 case 0:
@@ -270,13 +264,12 @@ cio_halt(struct subchannel *sch)
270int 264int
271cio_clear(struct subchannel *sch) 265cio_clear(struct subchannel *sch)
272{ 266{
273 char dbf_txt[15];
274 int ccode; 267 int ccode;
275 268
276 if (!sch) 269 if (!sch)
277 return -ENODEV; 270 return -ENODEV;
278 271
279 CIO_TRACE_EVENT (2, "clearIO"); 272 CIO_TRACE_EVENT(2, "clearIO");
280 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 273 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
281 274
282 /* 275 /*
@@ -284,8 +277,7 @@ cio_clear(struct subchannel *sch)
284 */ 277 */
285 ccode = csch (sch->schid); 278 ccode = csch (sch->schid);
286 279
287 sprintf (dbf_txt, "ccode:%d", ccode); 280 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
288 CIO_TRACE_EVENT (2, dbf_txt);
289 281
290 switch (ccode) { 282 switch (ccode) {
291 case 0: 283 case 0:
@@ -306,19 +298,17 @@ cio_clear(struct subchannel *sch)
306int 298int
307cio_cancel (struct subchannel *sch) 299cio_cancel (struct subchannel *sch)
308{ 300{
309 char dbf_txt[15];
310 int ccode; 301 int ccode;
311 302
312 if (!sch) 303 if (!sch)
313 return -ENODEV; 304 return -ENODEV;
314 305
315 CIO_TRACE_EVENT (2, "cancelIO"); 306 CIO_TRACE_EVENT(2, "cancelIO");
316 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 307 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
317 308
318 ccode = xsch (sch->schid); 309 ccode = xsch (sch->schid);
319 310
320 sprintf (dbf_txt, "ccode:%d", ccode); 311 CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
321 CIO_TRACE_EVENT (2, dbf_txt);
322 312
323 switch (ccode) { 313 switch (ccode) {
324 case 0: /* success */ 314 case 0: /* success */
@@ -429,11 +419,10 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
429 */ 419 */
430int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 420int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
431{ 421{
432 char dbf_txt[15];
433 int retry; 422 int retry;
434 int ret; 423 int ret;
435 424
436 CIO_TRACE_EVENT (2, "ensch"); 425 CIO_TRACE_EVENT(2, "ensch");
437 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 426 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
438 427
439 if (sch_is_pseudo_sch(sch)) 428 if (sch_is_pseudo_sch(sch))
@@ -460,8 +449,7 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
460 } else 449 } else
461 break; 450 break;
462 } 451 }
463 sprintf (dbf_txt, "ret:%d", ret); 452 CIO_HEX_EVENT(2, &ret, sizeof(ret));
464 CIO_TRACE_EVENT (2, dbf_txt);
465 return ret; 453 return ret;
466} 454}
467EXPORT_SYMBOL_GPL(cio_enable_subchannel); 455EXPORT_SYMBOL_GPL(cio_enable_subchannel);
@@ -472,11 +460,10 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
472 */ 460 */
473int cio_disable_subchannel(struct subchannel *sch) 461int cio_disable_subchannel(struct subchannel *sch)
474{ 462{
475 char dbf_txt[15];
476 int retry; 463 int retry;
477 int ret; 464 int ret;
478 465
479 CIO_TRACE_EVENT (2, "dissch"); 466 CIO_TRACE_EVENT(2, "dissch");
480 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 467 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
481 468
482 if (sch_is_pseudo_sch(sch)) 469 if (sch_is_pseudo_sch(sch))
@@ -495,8 +482,7 @@ int cio_disable_subchannel(struct subchannel *sch)
495 } else 482 } else
496 break; 483 break;
497 } 484 }
498 sprintf (dbf_txt, "ret:%d", ret); 485 CIO_HEX_EVENT(2, &ret, sizeof(ret));
499 CIO_TRACE_EVENT (2, dbf_txt);
500 return ret; 486 return ret;
501} 487}
502EXPORT_SYMBOL_GPL(cio_disable_subchannel); 488EXPORT_SYMBOL_GPL(cio_disable_subchannel);
@@ -578,11 +564,6 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
578 goto out; 564 goto out;
579 } 565 }
580 mutex_init(&sch->reg_mutex); 566 mutex_init(&sch->reg_mutex);
581 /* Set a name for the subchannel */
582 if (cio_is_console(schid))
583 sch->dev.init_name = cio_get_console_sch_name(schid);
584 else
585 dev_set_name(&sch->dev, "0.%x.%04x", schid.ssid, schid.sch_no);
586 567
587 /* 568 /*
588 * The first subchannel that is not-operational (ccode==3) 569 * The first subchannel that is not-operational (ccode==3)
@@ -686,7 +667,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
686 667
687#ifdef CONFIG_CCW_CONSOLE 668#ifdef CONFIG_CCW_CONSOLE
688static struct subchannel console_subchannel; 669static struct subchannel console_subchannel;
689static char console_sch_name[10] = "0.x.xxxx";
690static struct io_subchannel_private console_priv; 670static struct io_subchannel_private console_priv;
691static int console_subchannel_in_use; 671static int console_subchannel_in_use;
692 672
@@ -873,12 +853,6 @@ cio_get_console_subchannel(void)
873 return &console_subchannel; 853 return &console_subchannel;
874} 854}
875 855
876const char *cio_get_console_sch_name(struct subchannel_id schid)
877{
878 snprintf(console_sch_name, 10, "0.%x.%04x", schid.ssid, schid.sch_no);
879 return (const char *)console_sch_name;
880}
881
882#endif 856#endif
883static int 857static int
884__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 858__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 5150fba742ac..2e43558c704b 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -133,15 +133,11 @@ extern int cio_is_console(struct subchannel_id);
133extern struct subchannel *cio_get_console_subchannel(void); 133extern struct subchannel *cio_get_console_subchannel(void);
134extern spinlock_t * cio_get_console_lock(void); 134extern spinlock_t * cio_get_console_lock(void);
135extern void *cio_get_console_priv(void); 135extern void *cio_get_console_priv(void);
136extern const char *cio_get_console_sch_name(struct subchannel_id schid);
137extern const char *cio_get_console_cdev_name(struct subchannel *sch);
138#else 136#else
139#define cio_is_console(schid) 0 137#define cio_is_console(schid) 0
140#define cio_get_console_subchannel() NULL 138#define cio_get_console_subchannel() NULL
141#define cio_get_console_lock() NULL 139#define cio_get_console_lock() NULL
142#define cio_get_console_priv() NULL 140#define cio_get_console_priv() NULL
143#define cio_get_console_sch_name(schid) NULL
144#define cio_get_console_cdev_name(sch) NULL
145#endif 141#endif
146 142
147#endif 143#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 85d43c6bcb66..e995123fd805 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -152,24 +152,15 @@ css_alloc_subchannel(struct subchannel_id schid)
152} 152}
153 153
154static void 154static void
155css_free_subchannel(struct subchannel *sch)
156{
157 if (sch) {
158 /* Reset intparm to zeroes. */
159 sch->config.intparm = 0;
160 cio_commit_config(sch);
161 kfree(sch->lock);
162 kfree(sch);
163 }
164}
165
166static void
167css_subchannel_release(struct device *dev) 155css_subchannel_release(struct device *dev)
168{ 156{
169 struct subchannel *sch; 157 struct subchannel *sch;
170 158
171 sch = to_subchannel(dev); 159 sch = to_subchannel(dev);
172 if (!cio_is_console(sch->schid)) { 160 if (!cio_is_console(sch->schid)) {
161 /* Reset intparm to zeroes. */
162 sch->config.intparm = 0;
163 cio_commit_config(sch);
173 kfree(sch->lock); 164 kfree(sch->lock);
174 kfree(sch); 165 kfree(sch);
175 } 166 }
@@ -180,6 +171,8 @@ static int css_sch_device_register(struct subchannel *sch)
180 int ret; 171 int ret;
181 172
182 mutex_lock(&sch->reg_mutex); 173 mutex_lock(&sch->reg_mutex);
174 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
175 sch->schid.sch_no);
183 ret = device_register(&sch->dev); 176 ret = device_register(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 177 mutex_unlock(&sch->reg_mutex);
185 return ret; 178 return ret;
@@ -327,7 +320,7 @@ int css_probe_device(struct subchannel_id schid)
327 return PTR_ERR(sch); 320 return PTR_ERR(sch);
328 ret = css_register_subchannel(sch); 321 ret = css_register_subchannel(sch);
329 if (ret) 322 if (ret)
330 css_free_subchannel(sch); 323 put_device(&sch->dev);
331 return ret; 324 return ret;
332} 325}
333 326
@@ -644,7 +637,10 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
644 * not working) so we do it now. This is true e.g. for the 637 * not working) so we do it now. This is true e.g. for the
645 * console subchannel. 638 * console subchannel.
646 */ 639 */
647 css_register_subchannel(sch); 640 if (css_register_subchannel(sch)) {
641 if (!cio_is_console(schid))
642 put_device(&sch->dev);
643 }
648 return 0; 644 return 0;
649} 645}
650 646
@@ -661,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
661 css->global_pgid.pgid_high.cpu_addr = 0; 657 css->global_pgid.pgid_high.cpu_addr = 0;
662#endif 658#endif
663 } 659 }
664 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 660 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident;
665 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 661 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine;
666 css->global_pgid.tod_high = tod_high; 662 css->global_pgid.tod_high = tod_high;
667 663
668} 664}
@@ -920,8 +916,10 @@ init_channel_subsystem (void)
920 goto out_device; 916 goto out_device;
921 } 917 }
922 ret = device_register(&css->pseudo_subchannel->dev); 918 ret = device_register(&css->pseudo_subchannel->dev);
923 if (ret) 919 if (ret) {
920 put_device(&css->pseudo_subchannel->dev);
924 goto out_file; 921 goto out_file;
922 }
925 } 923 }
926 ret = register_reboot_notifier(&css_reboot_notifier); 924 ret = register_reboot_notifier(&css_reboot_notifier);
927 if (ret) 925 if (ret)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d593bc76afe3..0f95405c2c5e 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -307,8 +307,11 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
307 307
308static void ccw_device_unregister(struct ccw_device *cdev) 308static void ccw_device_unregister(struct ccw_device *cdev)
309{ 309{
310 if (test_and_clear_bit(1, &cdev->private->registered)) 310 if (test_and_clear_bit(1, &cdev->private->registered)) {
311 device_del(&cdev->dev); 311 device_del(&cdev->dev);
312 /* Release reference from device_initialize(). */
313 put_device(&cdev->dev);
314 }
312} 315}
313 316
314static void ccw_device_remove_orphan_cb(struct work_struct *work) 317static void ccw_device_remove_orphan_cb(struct work_struct *work)
@@ -319,7 +322,6 @@ static void ccw_device_remove_orphan_cb(struct work_struct *work)
319 priv = container_of(work, struct ccw_device_private, kick_work); 322 priv = container_of(work, struct ccw_device_private, kick_work);
320 cdev = priv->cdev; 323 cdev = priv->cdev;
321 ccw_device_unregister(cdev); 324 ccw_device_unregister(cdev);
322 put_device(&cdev->dev);
323 /* Release cdev reference for workqueue processing. */ 325 /* Release cdev reference for workqueue processing. */
324 put_device(&cdev->dev); 326 put_device(&cdev->dev);
325} 327}
@@ -333,15 +335,15 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
333 * Forced offline in disconnected state means 335 * Forced offline in disconnected state means
334 * 'throw away device'. 336 * 'throw away device'.
335 */ 337 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 if (ccw_device_is_orphan(cdev)) { 338 if (ccw_device_is_orphan(cdev)) {
340 /* 339 /*
341 * Deregister ccw device. 340 * Deregister ccw device.
342 * Unfortunately, we cannot do this directly from the 341 * Unfortunately, we cannot do this directly from the
343 * attribute method. 342 * attribute method.
344 */ 343 */
344 /* Get cdev reference for workqueue processing. */
345 if (!get_device(&cdev->dev))
346 return;
345 spin_lock_irqsave(cdev->ccwlock, flags); 347 spin_lock_irqsave(cdev->ccwlock, flags);
346 cdev->private->state = DEV_STATE_NOT_OPER; 348 cdev->private->state = DEV_STATE_NOT_OPER;
347 spin_unlock_irqrestore(cdev->ccwlock, flags); 349 spin_unlock_irqrestore(cdev->ccwlock, flags);
@@ -380,30 +382,34 @@ int ccw_device_set_offline(struct ccw_device *cdev)
380 } 382 }
381 cdev->online = 0; 383 cdev->online = 0;
382 spin_lock_irq(cdev->ccwlock); 384 spin_lock_irq(cdev->ccwlock);
383 ret = ccw_device_offline(cdev); 385 /* Wait until a final state or DISCONNECTED is reached */
384 if (ret == -ENODEV) { 386 while (!dev_fsm_final_state(cdev) &&
385 if (cdev->private->state != DEV_STATE_NOT_OPER) { 387 cdev->private->state != DEV_STATE_DISCONNECTED) {
386 cdev->private->state = DEV_STATE_OFFLINE;
387 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
388 }
389 spin_unlock_irq(cdev->ccwlock); 388 spin_unlock_irq(cdev->ccwlock);
390 /* Give up reference from ccw_device_set_online(). */ 389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
391 put_device(&cdev->dev); 390 cdev->private->state == DEV_STATE_DISCONNECTED));
392 return ret; 391 spin_lock_irq(cdev->ccwlock);
393 } 392 }
393 ret = ccw_device_offline(cdev);
394 if (ret)
395 goto error;
394 spin_unlock_irq(cdev->ccwlock); 396 spin_unlock_irq(cdev->ccwlock);
395 if (ret == 0) { 397 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
396 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 398 cdev->private->state == DEV_STATE_DISCONNECTED));
397 /* Give up reference from ccw_device_set_online(). */ 399 /* Give up reference from ccw_device_set_online(). */
398 put_device(&cdev->dev); 400 put_device(&cdev->dev);
399 } else { 401 return 0;
400 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 402
401 "device 0.%x.%04x\n", 403error:
402 ret, cdev->private->dev_id.ssid, 404 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
403 cdev->private->dev_id.devno); 405 ret, cdev->private->dev_id.ssid,
404 cdev->online = 1; 406 cdev->private->dev_id.devno);
405 } 407 cdev->private->state = DEV_STATE_OFFLINE;
406 return ret; 408 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
409 spin_unlock_irq(cdev->ccwlock);
410 /* Give up reference from ccw_device_set_online(). */
411 put_device(&cdev->dev);
412 return -ENODEV;
407} 413}
408 414
409/** 415/**
@@ -421,6 +427,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
421int ccw_device_set_online(struct ccw_device *cdev) 427int ccw_device_set_online(struct ccw_device *cdev)
422{ 428{
423 int ret; 429 int ret;
430 int ret2;
424 431
425 if (!cdev) 432 if (!cdev)
426 return -ENODEV; 433 return -ENODEV;
@@ -444,28 +451,53 @@ int ccw_device_set_online(struct ccw_device *cdev)
444 put_device(&cdev->dev); 451 put_device(&cdev->dev);
445 return ret; 452 return ret;
446 } 453 }
447 if (cdev->private->state != DEV_STATE_ONLINE) { 454 spin_lock_irq(cdev->ccwlock);
455 /* Check if online processing was successful */
456 if ((cdev->private->state != DEV_STATE_ONLINE) &&
457 (cdev->private->state != DEV_STATE_W4SENSE)) {
458 spin_unlock_irq(cdev->ccwlock);
448 /* Give up online reference since onlining failed. */ 459 /* Give up online reference since onlining failed. */
449 put_device(&cdev->dev); 460 put_device(&cdev->dev);
450 return -ENODEV; 461 return -ENODEV;
451 } 462 }
452 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { 463 spin_unlock_irq(cdev->ccwlock);
453 cdev->online = 1; 464 if (cdev->drv->set_online)
454 return 0; 465 ret = cdev->drv->set_online(cdev);
455 } 466 if (ret)
467 goto rollback;
468 cdev->online = 1;
469 return 0;
470
471rollback:
456 spin_lock_irq(cdev->ccwlock); 472 spin_lock_irq(cdev->ccwlock);
457 ret = ccw_device_offline(cdev); 473 /* Wait until a final state or DISCONNECTED is reached */
474 while (!dev_fsm_final_state(cdev) &&
475 cdev->private->state != DEV_STATE_DISCONNECTED) {
476 spin_unlock_irq(cdev->ccwlock);
477 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
478 cdev->private->state == DEV_STATE_DISCONNECTED));
479 spin_lock_irq(cdev->ccwlock);
480 }
481 ret2 = ccw_device_offline(cdev);
482 if (ret2)
483 goto error;
458 spin_unlock_irq(cdev->ccwlock); 484 spin_unlock_irq(cdev->ccwlock);
459 if (ret == 0) 485 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
460 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 486 cdev->private->state == DEV_STATE_DISCONNECTED));
461 else
462 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
463 "device 0.%x.%04x\n",
464 ret, cdev->private->dev_id.ssid,
465 cdev->private->dev_id.devno);
466 /* Give up online reference since onlining failed. */ 487 /* Give up online reference since onlining failed. */
467 put_device(&cdev->dev); 488 put_device(&cdev->dev);
468 return (ret == 0) ? -ENODEV : ret; 489 return ret;
490
491error:
492 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
493 "device 0.%x.%04x\n",
494 ret2, cdev->private->dev_id.ssid,
495 cdev->private->dev_id.devno);
496 cdev->private->state = DEV_STATE_OFFLINE;
497 spin_unlock_irq(cdev->ccwlock);
498 /* Give up online reference since onlining failed. */
499 put_device(&cdev->dev);
500 return ret;
469} 501}
470 502
471static int online_store_handle_offline(struct ccw_device *cdev) 503static int online_store_handle_offline(struct ccw_device *cdev)
@@ -637,8 +669,12 @@ static int ccw_device_register(struct ccw_device *cdev)
637 int ret; 669 int ret;
638 670
639 dev->bus = &ccw_bus_type; 671 dev->bus = &ccw_bus_type;
640 672 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
641 if ((ret = device_add(dev))) 673 cdev->private->dev_id.devno);
674 if (ret)
675 return ret;
676 ret = device_add(dev);
677 if (ret)
642 return ret; 678 return ret;
643 679
644 set_bit(1, &cdev->private->registered); 680 set_bit(1, &cdev->private->registered);
@@ -1024,9 +1060,6 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1024 return; 1060 return;
1025 sch = to_subchannel(cdev->dev.parent); 1061 sch = to_subchannel(cdev->dev.parent);
1026 css_sch_device_unregister(sch); 1062 css_sch_device_unregister(sch);
1027 /* Reset intparm to zeroes. */
1028 sch->config.intparm = 0;
1029 cio_commit_config(sch);
1030 /* Release cdev reference for workqueue processing.*/ 1063 /* Release cdev reference for workqueue processing.*/
1031 put_device(&cdev->dev); 1064 put_device(&cdev->dev);
1032 /* Release subchannel reference for local processing. */ 1065 /* Release subchannel reference for local processing. */
@@ -1035,6 +1068,9 @@ static void ccw_device_call_sch_unregister(struct work_struct *work)
1035 1068
1036void ccw_device_schedule_sch_unregister(struct ccw_device *cdev) 1069void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1037{ 1070{
1071 /* Get cdev reference for workqueue processing. */
1072 if (!get_device(&cdev->dev))
1073 return;
1038 PREPARE_WORK(&cdev->private->kick_work, 1074 PREPARE_WORK(&cdev->private->kick_work,
1039 ccw_device_call_sch_unregister); 1075 ccw_device_call_sch_unregister);
1040 queue_work(slow_path_wq, &cdev->private->kick_work); 1076 queue_work(slow_path_wq, &cdev->private->kick_work);
@@ -1055,9 +1091,6 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1055 /* Device did not respond in time. */ 1091 /* Device did not respond in time. */
1056 case DEV_STATE_NOT_OPER: 1092 case DEV_STATE_NOT_OPER:
1057 cdev->private->flags.recog_done = 1; 1093 cdev->private->flags.recog_done = 1;
1058 /* Remove device found not operational. */
1059 if (!get_device(&cdev->dev))
1060 break;
1061 ccw_device_schedule_sch_unregister(cdev); 1094 ccw_device_schedule_sch_unregister(cdev);
1062 if (atomic_dec_and_test(&ccw_device_init_count)) 1095 if (atomic_dec_and_test(&ccw_device_init_count))
1063 wake_up(&ccw_device_init_wq); 1096 wake_up(&ccw_device_init_wq);
@@ -1095,13 +1128,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1095 init_waitqueue_head(&priv->wait_q); 1128 init_waitqueue_head(&priv->wait_q);
1096 init_timer(&priv->timer); 1129 init_timer(&priv->timer);
1097 1130
1098 /* Set an initial name for the device. */
1099 if (cio_is_console(sch->schid))
1100 cdev->dev.init_name = cio_get_console_cdev_name(sch);
1101 else
1102 dev_set_name(&cdev->dev, "0.%x.%04x",
1103 sch->schid.ssid, sch->schib.pmcw.dev);
1104
1105 /* Increase counter of devices currently in recognition. */ 1131 /* Increase counter of devices currently in recognition. */
1106 atomic_inc(&ccw_device_init_count); 1132 atomic_inc(&ccw_device_init_count);
1107 1133
@@ -1171,8 +1197,8 @@ static void io_subchannel_irq(struct subchannel *sch)
1171 1197
1172 cdev = sch_get_cdev(sch); 1198 cdev = sch_get_cdev(sch);
1173 1199
1174 CIO_TRACE_EVENT(3, "IRQ"); 1200 CIO_TRACE_EVENT(6, "IRQ");
1175 CIO_TRACE_EVENT(3, dev_name(&sch->dev)); 1201 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1176 if (cdev) 1202 if (cdev)
1177 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1203 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1178} 1204}
@@ -1210,9 +1236,6 @@ static void io_subchannel_do_unreg(struct work_struct *work)
1210 1236
1211 sch = container_of(work, struct subchannel, work); 1237 sch = container_of(work, struct subchannel, work);
1212 css_sch_device_unregister(sch); 1238 css_sch_device_unregister(sch);
1213 /* Reset intparm to zeroes. */
1214 sch->config.intparm = 0;
1215 cio_commit_config(sch);
1216 put_device(&sch->dev); 1239 put_device(&sch->dev);
1217} 1240}
1218 1241
@@ -1334,7 +1357,6 @@ io_subchannel_remove (struct subchannel *sch)
1334 cdev->private->state = DEV_STATE_NOT_OPER; 1357 cdev->private->state = DEV_STATE_NOT_OPER;
1335 spin_unlock_irqrestore(cdev->ccwlock, flags); 1358 spin_unlock_irqrestore(cdev->ccwlock, flags);
1336 ccw_device_unregister(cdev); 1359 ccw_device_unregister(cdev);
1337 put_device(&cdev->dev);
1338 kfree(sch->private); 1360 kfree(sch->private);
1339 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1361 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1340 return 0; 1362 return 0;
@@ -1571,8 +1593,6 @@ static int purge_fn(struct device *dev, void *data)
1571 spin_unlock_irq(cdev->ccwlock); 1593 spin_unlock_irq(cdev->ccwlock);
1572 if (!unreg) 1594 if (!unreg)
1573 goto out; 1595 goto out;
1574 if (!get_device(&cdev->dev))
1575 goto out;
1576 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid, 1596 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1577 priv->dev_id.devno); 1597 priv->dev_id.devno);
1578 ccw_device_schedule_sch_unregister(cdev); 1598 ccw_device_schedule_sch_unregister(cdev);
@@ -1688,10 +1708,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1688 spin_unlock_irqrestore(sch->lock, flags); 1708 spin_unlock_irqrestore(sch->lock, flags);
1689 css_sch_device_unregister(sch); 1709 css_sch_device_unregister(sch);
1690 spin_lock_irqsave(sch->lock, flags); 1710 spin_lock_irqsave(sch->lock, flags);
1691
1692 /* Reset intparm to zeroes. */
1693 sch->config.intparm = 0;
1694 cio_commit_config(sch);
1695 break; 1711 break;
1696 case REPROBE: 1712 case REPROBE:
1697 ccw_device_trigger_reprobe(cdev); 1713 ccw_device_trigger_reprobe(cdev);
@@ -1712,7 +1728,6 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1712 1728
1713#ifdef CONFIG_CCW_CONSOLE 1729#ifdef CONFIG_CCW_CONSOLE
1714static struct ccw_device console_cdev; 1730static struct ccw_device console_cdev;
1715static char console_cdev_name[10] = "0.x.xxxx";
1716static struct ccw_device_private console_private; 1731static struct ccw_device_private console_private;
1717static int console_cdev_in_use; 1732static int console_cdev_in_use;
1718 1733
@@ -1796,13 +1811,6 @@ int ccw_device_force_console(void)
1796 return ccw_device_pm_restore(&console_cdev.dev); 1811 return ccw_device_pm_restore(&console_cdev.dev);
1797} 1812}
1798EXPORT_SYMBOL_GPL(ccw_device_force_console); 1813EXPORT_SYMBOL_GPL(ccw_device_force_console);
1799
1800const char *cio_get_console_cdev_name(struct subchannel *sch)
1801{
1802 snprintf(console_cdev_name, 10, "0.%x.%04x",
1803 sch->schid.ssid, sch->schib.pmcw.dev);
1804 return (const char *)console_cdev_name;
1805}
1806#endif 1814#endif
1807 1815
1808/* 1816/*
@@ -2020,7 +2028,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
2020 spin_unlock_irq(sch->lock); 2028 spin_unlock_irq(sch->lock);
2021 if (ret) { 2029 if (ret) {
2022 CIO_MSG_EVENT(0, "Couldn't start recognition for device " 2030 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2023 "%s (ret=%d)\n", dev_name(&cdev->dev), ret); 2031 "0.%x.%04x (ret=%d)\n",
2032 cdev->private->dev_id.ssid,
2033 cdev->private->dev_id.devno, ret);
2024 spin_lock_irq(sch->lock); 2034 spin_lock_irq(sch->lock);
2025 cdev->private->state = DEV_STATE_DISCONNECTED; 2035 cdev->private->state = DEV_STATE_DISCONNECTED;
2026 spin_unlock_irq(sch->lock); 2036 spin_unlock_irq(sch->lock);
@@ -2083,8 +2093,9 @@ static int ccw_device_pm_restore(struct device *dev)
2083 } 2093 }
2084 /* check if the device id has changed */ 2094 /* check if the device id has changed */
2085 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 2095 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
2086 CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from " 2096 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
2087 "%04x to %04x)\n", dev_name(&sch->dev), 2097 "changed from %04x to %04x)\n",
2098 sch->schid.ssid, sch->schid.sch_no,
2088 cdev->private->dev_id.devno, 2099 cdev->private->dev_id.devno,
2089 sch->schib.pmcw.dev); 2100 sch->schib.pmcw.dev);
2090 goto out_unreg_unlock; 2101 goto out_unreg_unlock;
@@ -2117,8 +2128,9 @@ static int ccw_device_pm_restore(struct device *dev)
2117 if (cm_enabled) { 2128 if (cm_enabled) {
2118 ret = ccw_set_cmf(cdev, 1); 2129 ret = ccw_set_cmf(cdev, 1);
2119 if (ret) { 2130 if (ret) {
2120 CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed " 2131 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2121 "(rc=%d)\n", dev_name(&cdev->dev), ret); 2132 "(rc=%d)\n", cdev->private->dev_id.ssid,
2133 cdev->private->dev_id.devno, ret);
2122 ret = 0; 2134 ret = 0;
2123 } 2135 }
2124 } 2136 }
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 3db88c52d287..e728ce447f6e 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -394,6 +394,13 @@ ccw_device_done(struct ccw_device *cdev, int state)
394 ccw_device_schedule_sch_unregister(cdev); 394 ccw_device_schedule_sch_unregister(cdev);
395 cdev->private->flags.donotify = 0; 395 cdev->private->flags.donotify = 0;
396 } 396 }
397 if (state == DEV_STATE_NOT_OPER) {
398 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
399 cdev->private->dev_id.devno, sch->schid.sch_no);
400 if (!ccw_device_notify(cdev, CIO_GONE))
401 ccw_device_schedule_sch_unregister(cdev);
402 cdev->private->flags.donotify = 0;
403 }
397 404
398 if (cdev->private->flags.donotify) { 405 if (cdev->private->flags.donotify) {
399 cdev->private->flags.donotify = 0; 406 cdev->private->flags.donotify = 0;
@@ -731,6 +738,17 @@ static void ccw_device_generic_notoper(struct ccw_device *cdev,
731} 738}
732 739
733/* 740/*
741 * Handle path verification event in offline state.
742 */
743static void ccw_device_offline_verify(struct ccw_device *cdev,
744 enum dev_event dev_event)
745{
746 struct subchannel *sch = to_subchannel(cdev->dev.parent);
747
748 css_schedule_eval(sch->schid);
749}
750
751/*
734 * Handle path verification event. 752 * Handle path verification event.
735 */ 753 */
736static void 754static void
@@ -887,6 +905,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
887 } 905 }
888call_handler: 906call_handler:
889 cdev->private->state = DEV_STATE_ONLINE; 907 cdev->private->state = DEV_STATE_ONLINE;
908 /* In case sensing interfered with setting the device online */
909 wake_up(&cdev->private->wait_q);
890 /* Call the handler. */ 910 /* Call the handler. */
891 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 911 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
892 /* Start delayed path verification. */ 912 /* Start delayed path verification. */
@@ -1149,7 +1169,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1149 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1169 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1150 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1170 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq,
1151 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1171 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1152 [DEV_EVENT_VERIFY] = ccw_device_nop, 1172 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1153 }, 1173 },
1154 [DEV_STATE_VERIFY] = { 1174 [DEV_STATE_VERIFY] = {
1155 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1175 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b1241f8fae88..ff7748a9199d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/drivers/s390/cio/qdio.h 2 * linux/drivers/s390/cio/qdio.h
3 * 3 *
4 * Copyright 2000,2008 IBM Corp. 4 * Copyright 2000,2009 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */ 7 */
@@ -246,6 +246,7 @@ struct qdio_q {
246 atomic_t nr_buf_used; 246 atomic_t nr_buf_used;
247 247
248 struct qdio_irq *irq_ptr; 248 struct qdio_irq *irq_ptr;
249 struct dentry *debugfs_q;
249 struct tasklet_struct tasklet; 250 struct tasklet_struct tasklet;
250 251
251 /* error condition during a data transfer */ 252 /* error condition during a data transfer */
@@ -267,6 +268,7 @@ struct qdio_irq {
267 struct qib qib; 268 struct qib qib;
268 u32 *dsci; /* address of device state change indicator */ 269 u32 *dsci; /* address of device state change indicator */
269 struct ccw_device *cdev; 270 struct ccw_device *cdev;
271 struct dentry *debugfs_dev;
270 272
271 unsigned long int_parm; 273 unsigned long int_parm;
272 struct subchannel_id schid; 274 struct subchannel_id schid;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index b8626d4df116..1b78f639ead3 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,14 +1,12 @@
1/* 1/*
2 * drivers/s390/cio/qdio_debug.c 2 * drivers/s390/cio/qdio_debug.c
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
6 * Author: Jan Glauber (jang@linux.vnet.ibm.com) 6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
7 */ 7 */
8#include <linux/proc_fs.h>
9#include <linux/seq_file.h> 8#include <linux/seq_file.h>
10#include <linux/debugfs.h> 9#include <linux/debugfs.h>
11#include <asm/qdio.h>
12#include <asm/debug.h> 10#include <asm/debug.h>
13#include "qdio_debug.h" 11#include "qdio_debug.h"
14#include "qdio.h" 12#include "qdio.h"
@@ -17,10 +15,7 @@ debug_info_t *qdio_dbf_setup;
17debug_info_t *qdio_dbf_error; 15debug_info_t *qdio_dbf_error;
18 16
19static struct dentry *debugfs_root; 17static struct dentry *debugfs_root;
20#define MAX_DEBUGFS_QUEUES 32 18#define QDIO_DEBUGFS_NAME_LEN 10
21static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
22static DEFINE_MUTEX(debugfs_mutex);
23#define QDIO_DEBUGFS_NAME_LEN 40
24 19
25void qdio_allocate_dbf(struct qdio_initialize *init_data, 20void qdio_allocate_dbf(struct qdio_initialize *init_data,
26 struct qdio_irq *irq_ptr) 21 struct qdio_irq *irq_ptr)
@@ -130,20 +125,6 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
130 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
131} 126}
132 127
133static void remove_debugfs_entry(struct qdio_q *q)
134{
135 int i;
136
137 for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
138 if (!debugfs_queues[i])
139 continue;
140 if (debugfs_queues[i]->d_inode->i_private == q) {
141 debugfs_remove(debugfs_queues[i]);
142 debugfs_queues[i] = NULL;
143 }
144 }
145}
146
147static struct file_operations debugfs_fops = { 128static struct file_operations debugfs_fops = {
148 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
149 .open = qstat_seq_open, 130 .open = qstat_seq_open,
@@ -155,22 +136,15 @@ static struct file_operations debugfs_fops = {
155 136
156static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev) 137static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
157{ 138{
158 int i = 0;
159 char name[QDIO_DEBUGFS_NAME_LEN]; 139 char name[QDIO_DEBUGFS_NAME_LEN];
160 140
161 while (debugfs_queues[i] != NULL) { 141 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
162 i++;
163 if (i >= MAX_DEBUGFS_QUEUES)
164 return;
165 }
166 snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%s_%d",
167 dev_name(&cdev->dev),
168 q->is_input_q ? "input" : "output", 142 q->is_input_q ? "input" : "output",
169 q->nr); 143 q->nr);
170 debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR, 144 q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
171 debugfs_root, q, &debugfs_fops); 145 q->irq_ptr->debugfs_dev, q, &debugfs_fops);
172 if (IS_ERR(debugfs_queues[i])) 146 if (IS_ERR(q->debugfs_q))
173 debugfs_queues[i] = NULL; 147 q->debugfs_q = NULL;
174} 148}
175 149
176void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 150void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -178,12 +152,14 @@ void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
178 struct qdio_q *q; 152 struct qdio_q *q;
179 int i; 153 int i;
180 154
181 mutex_lock(&debugfs_mutex); 155 irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
156 debugfs_root);
157 if (IS_ERR(irq_ptr->debugfs_dev))
158 irq_ptr->debugfs_dev = NULL;
182 for_each_input_queue(irq_ptr, q, i) 159 for_each_input_queue(irq_ptr, q, i)
183 setup_debugfs_entry(q, cdev); 160 setup_debugfs_entry(q, cdev);
184 for_each_output_queue(irq_ptr, q, i) 161 for_each_output_queue(irq_ptr, q, i)
185 setup_debugfs_entry(q, cdev); 162 setup_debugfs_entry(q, cdev);
186 mutex_unlock(&debugfs_mutex);
187} 163}
188 164
189void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev) 165void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
@@ -191,17 +167,16 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd
191 struct qdio_q *q; 167 struct qdio_q *q;
192 int i; 168 int i;
193 169
194 mutex_lock(&debugfs_mutex);
195 for_each_input_queue(irq_ptr, q, i) 170 for_each_input_queue(irq_ptr, q, i)
196 remove_debugfs_entry(q); 171 debugfs_remove(q->debugfs_q);
197 for_each_output_queue(irq_ptr, q, i) 172 for_each_output_queue(irq_ptr, q, i)
198 remove_debugfs_entry(q); 173 debugfs_remove(q->debugfs_q);
199 mutex_unlock(&debugfs_mutex); 174 debugfs_remove(irq_ptr->debugfs_dev);
200} 175}
201 176
202int __init qdio_debug_init(void) 177int __init qdio_debug_init(void)
203{ 178{
204 debugfs_root = debugfs_create_dir("qdio_queues", NULL); 179 debugfs_root = debugfs_create_dir("qdio", NULL);
205 180
206 qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); 181 qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
207 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); 182 debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 0038750ad945..9aef402a5f1b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -798,8 +798,10 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
798 798
799 if (!qdio_inbound_q_done(q)) { 799 if (!qdio_inbound_q_done(q)) {
800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 800 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 801 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
802 tasklet_schedule(&q->tasklet); 802 tasklet_schedule(&q->tasklet);
803 return;
804 }
803 } 805 }
804 806
805 qdio_stop_polling(q); 807 qdio_stop_polling(q);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ed3dcdea7fe1..090b32a339c6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -648,7 +648,9 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state)
648 /* Poll on the device until all requests are finished. */ 648 /* Poll on the device until all requests are finished. */
649 do { 649 do {
650 flags = 0; 650 flags = 0;
651 spin_lock_bh(&ap_dev->lock);
651 __ap_poll_device(ap_dev, &flags); 652 __ap_poll_device(ap_dev, &flags);
653 spin_unlock_bh(&ap_dev->lock);
652 } while ((flags & 1) || (flags & 2)); 654 } while ((flags & 1) || (flags & 2));
653 655
654 ap_device_remove(dev); 656 ap_device_remove(dev);
@@ -1109,12 +1111,15 @@ static void ap_scan_bus(struct work_struct *unused)
1109 1111
1110 ap_dev->device.bus = &ap_bus_type; 1112 ap_dev->device.bus = &ap_bus_type;
1111 ap_dev->device.parent = ap_root_device; 1113 ap_dev->device.parent = ap_root_device;
1112 dev_set_name(&ap_dev->device, "card%02x", 1114 if (dev_set_name(&ap_dev->device, "card%02x",
1113 AP_QID_DEVICE(ap_dev->qid)); 1115 AP_QID_DEVICE(ap_dev->qid))) {
1116 kfree(ap_dev);
1117 continue;
1118 }
1114 ap_dev->device.release = ap_device_release; 1119 ap_dev->device.release = ap_device_release;
1115 rc = device_register(&ap_dev->device); 1120 rc = device_register(&ap_dev->device);
1116 if (rc) { 1121 if (rc) {
1117 kfree(ap_dev); 1122 put_device(&ap_dev->device);
1118 continue; 1123 continue;
1119 } 1124 }
1120 /* Add device attributes. */ 1125 /* Add device attributes. */
@@ -1407,14 +1412,12 @@ static void ap_reset(struct ap_device *ap_dev)
1407 1412
1408static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1413static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1409{ 1414{
1410 spin_lock(&ap_dev->lock);
1411 if (!ap_dev->unregistered) { 1415 if (!ap_dev->unregistered) {
1412 if (ap_poll_queue(ap_dev, flags)) 1416 if (ap_poll_queue(ap_dev, flags))
1413 ap_dev->unregistered = 1; 1417 ap_dev->unregistered = 1;
1414 if (ap_dev->reset == AP_RESET_DO) 1418 if (ap_dev->reset == AP_RESET_DO)
1415 ap_reset(ap_dev); 1419 ap_reset(ap_dev);
1416 } 1420 }
1417 spin_unlock(&ap_dev->lock);
1418 return 0; 1421 return 0;
1419} 1422}
1420 1423
@@ -1441,7 +1444,9 @@ static void ap_poll_all(unsigned long dummy)
1441 flags = 0; 1444 flags = 0;
1442 spin_lock(&ap_device_list_lock); 1445 spin_lock(&ap_device_list_lock);
1443 list_for_each_entry(ap_dev, &ap_device_list, list) { 1446 list_for_each_entry(ap_dev, &ap_device_list, list) {
1447 spin_lock(&ap_dev->lock);
1444 __ap_poll_device(ap_dev, &flags); 1448 __ap_poll_device(ap_dev, &flags);
1449 spin_unlock(&ap_dev->lock);
1445 } 1450 }
1446 spin_unlock(&ap_device_list_lock); 1451 spin_unlock(&ap_device_list_lock);
1447 } while (flags & 1); 1452 } while (flags & 1);
@@ -1487,7 +1492,9 @@ static int ap_poll_thread(void *data)
1487 flags = 0; 1492 flags = 0;
1488 spin_lock_bh(&ap_device_list_lock); 1493 spin_lock_bh(&ap_device_list_lock);
1489 list_for_each_entry(ap_dev, &ap_device_list, list) { 1494 list_for_each_entry(ap_dev, &ap_device_list, list) {
1495 spin_lock(&ap_dev->lock);
1490 __ap_poll_device(ap_dev, &flags); 1496 __ap_poll_device(ap_dev, &flags);
1497 spin_unlock(&ap_dev->lock);
1491 } 1498 }
1492 spin_unlock_bh(&ap_device_list_lock); 1499 spin_unlock_bh(&ap_device_list_lock);
1493 } 1500 }
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index e38e5d306faf..2930fc763ac5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
403 return len; 403 return len;
404} 404}
405 405
406void __init s390_virtio_console_init(void) 406static int __init s390_virtio_console_init(void)
407{ 407{
408 virtio_cons_early_init(early_put_chars); 408 if (!MACHINE_IS_KVM)
409 return -ENODEV;
410 return virtio_cons_early_init(early_put_chars);
409} 411}
412console_initcall(s390_virtio_console_init);
413
410 414
411/* 415/*
412 * We do this after core stuff, but before the drivers. 416 * We do this after core stuff, but before the drivers.
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8c36eafcfbfe..87dff11061b0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1839,9 +1839,10 @@ static int netiucv_register_device(struct net_device *ndev)
1839 return -ENOMEM; 1839 return -ENOMEM;
1840 1840
1841 ret = device_register(dev); 1841 ret = device_register(dev);
1842 1842 if (ret) {
1843 if (ret) 1843 put_device(dev);
1844 return ret; 1844 return ret;
1845 }
1845 ret = netiucv_add_files(dev); 1846 ret = netiucv_add_files(dev);
1846 if (ret) 1847 if (ret)
1847 goto out_unreg; 1848 goto out_unreg;
@@ -2226,8 +2227,10 @@ static int __init netiucv_init(void)
2226 netiucv_dev->release = (void (*)(struct device *))kfree; 2227 netiucv_dev->release = (void (*)(struct device *))kfree;
2227 netiucv_dev->driver = &netiucv_driver; 2228 netiucv_dev->driver = &netiucv_driver;
2228 rc = device_register(netiucv_dev); 2229 rc = device_register(netiucv_dev);
2229 if (rc) 2230 if (rc) {
2231 put_device(netiucv_dev);
2230 goto out_driver; 2232 goto out_driver;
2233 }
2231 netiucv_banner(); 2234 netiucv_banner();
2232 return rc; 2235 return rc;
2233 2236
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index e76a320d373b..102000d1af6f 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -219,13 +219,13 @@ static int __init smsg_init(void)
219 smsg_dev->driver = &smsg_driver; 219 smsg_dev->driver = &smsg_driver;
220 rc = device_register(smsg_dev); 220 rc = device_register(smsg_dev);
221 if (rc) 221 if (rc)
222 goto out_free_dev; 222 goto out_put;
223 223
224 cpcmd("SET SMSG IUCV", NULL, 0, NULL); 224 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
225 return 0; 225 return 0;
226 226
227out_free_dev: 227out_put:
228 kfree(smsg_dev); 228 put_device(smsg_dev);
229out_free_path: 229out_free_path:
230 iucv_path_free(smsg_path); 230 iucv_path_free(smsg_path);
231 smsg_path = NULL; 231 smsg_path = NULL;
diff --git a/drivers/scsi/cxgb3i/cxgb3i_init.c b/drivers/scsi/cxgb3i/cxgb3i_init.c
index 042d9bce9914..d0ab23a58355 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_init.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_init.c
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
26 26
27static void open_s3_dev(struct t3cdev *); 27static void open_s3_dev(struct t3cdev *);
28static void close_s3_dev(struct t3cdev *); 28static void close_s3_dev(struct t3cdev *);
29static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error); 29static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
30 30
31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; 31static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
32static struct cxgb3_client t3c_client = { 32static struct cxgb3_client t3c_client = {
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
34 .handlers = cxgb3i_cpl_handlers, 34 .handlers = cxgb3i_cpl_handlers,
35 .add = open_s3_dev, 35 .add = open_s3_dev,
36 .remove = close_s3_dev, 36 .remove = close_s3_dev,
37 .err_handler = s3_err_handler, 37 .event_handler = s3_event_handler,
38}; 38};
39 39
40/** 40/**
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
66 cxgb3i_ddp_cleanup(t3dev); 66 cxgb3i_ddp_cleanup(t3dev);
67} 67}
68 68
69static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error) 69static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
70{ 70{
71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev); 71 struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
72 72
73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n", 73 cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
74 snic, tdev, status, error); 74 snic, tdev, event, port);
75 if (!snic) 75 if (!snic)
76 return; 76 return;
77 77
78 switch (status) { 78 switch (event) {
79 case OFFLOAD_STATUS_DOWN: 79 case OFFLOAD_STATUS_DOWN:
80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET; 80 snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
81 break; 81 break;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 9d7c99394ec6..640f65c6ef84 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1752,12 +1752,12 @@ static int comedi_open(struct inode *inode, struct file *file)
1752 mutex_lock(&dev->mutex); 1752 mutex_lock(&dev->mutex);
1753 if (dev->attached) 1753 if (dev->attached)
1754 goto ok; 1754 goto ok;
1755 if (!capable(CAP_SYS_MODULE) && dev->in_request_module) { 1755 if (!capable(CAP_NET_ADMIN) && dev->in_request_module) {
1756 DPRINTK("in request module\n"); 1756 DPRINTK("in request module\n");
1757 mutex_unlock(&dev->mutex); 1757 mutex_unlock(&dev->mutex);
1758 return -ENODEV; 1758 return -ENODEV;
1759 } 1759 }
1760 if (capable(CAP_SYS_MODULE) && dev->in_request_module) 1760 if (capable(CAP_NET_ADMIN) && dev->in_request_module)
1761 goto ok; 1761 goto ok;
1762 1762
1763 dev->in_request_module = 1; 1763 dev->in_request_module = 1;
@@ -1770,8 +1770,8 @@ static int comedi_open(struct inode *inode, struct file *file)
1770 1770
1771 dev->in_request_module = 0; 1771 dev->in_request_module = 0;
1772 1772
1773 if (!dev->attached && !capable(CAP_SYS_MODULE)) { 1773 if (!dev->attached && !capable(CAP_NET_ADMIN)) {
1774 DPRINTK("not attached and not CAP_SYS_MODULE\n"); 1774 DPRINTK("not attached and not CAP_NET_ADMIN\n");
1775 mutex_unlock(&dev->mutex); 1775 mutex_unlock(&dev->mutex);
1776 return -ENODEV; 1776 return -ENODEV;
1777 } 1777 }
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index 7b605795b770..e63c9bea6c54 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -1950,14 +1950,7 @@ static int pohmelfs_get_sb(struct file_system_type *fs_type,
1950 */ 1950 */
1951static void pohmelfs_kill_super(struct super_block *sb) 1951static void pohmelfs_kill_super(struct super_block *sb)
1952{ 1952{
1953 struct writeback_control wbc = { 1953 sync_inodes_sb(sb);
1954 .sync_mode = WB_SYNC_ALL,
1955 .range_start = 0,
1956 .range_end = LLONG_MAX,
1957 .nr_to_write = LONG_MAX,
1958 };
1959 generic_sync_sb_inodes(sb, &wbc);
1960
1961 kill_anon_super(sb); 1954 kill_anon_super(sb);
1962} 1955}
1963 1956
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b7c1603cd4bd..7c1e65d54872 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -501,22 +501,22 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
501 } 501 }
502 } 502 }
503 503
504 /* 504 if (last_bss > elf_bss) {
505 * Now fill out the bss section. First pad the last page up 505 /*
506 * to the page boundary, and then perform a mmap to make sure 506 * Now fill out the bss section. First pad the last page up
507 * that there are zero-mapped pages up to and including the 507 * to the page boundary, and then perform a mmap to make sure
508 * last bss page. 508 * that there are zero-mapped pages up to and including the
509 */ 509 * last bss page.
510 if (padzero(elf_bss)) { 510 */
511 error = -EFAULT; 511 if (padzero(elf_bss)) {
512 goto out_close; 512 error = -EFAULT;
513 } 513 goto out_close;
514 }
514 515
515 /* What we have mapped so far */ 516 /* What we have mapped so far */
516 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); 517 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
517 518
518 /* Map the last of the bss segment */ 519 /* Map the last of the bss segment */
519 if (last_bss > elf_bss) {
520 down_write(&current->mm->mmap_sem); 520 down_write(&current->mm->mmap_sem);
521 error = do_brk(elf_bss, last_bss - elf_bss); 521 error = do_brk(elf_bss, last_bss - elf_bss);
522 up_write(&current->mm->mmap_sem); 522 up_write(&current->mm->mmap_sem);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e83be2e4602c..15831d5c7367 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1352,6 +1352,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1352{ 1352{
1353 int err; 1353 int err;
1354 1354
1355 bdi->name = "btrfs";
1355 bdi->capabilities = BDI_CAP_MAP_COPY; 1356 bdi->capabilities = BDI_CAP_MAP_COPY;
1356 err = bdi_init(bdi); 1357 err = bdi_init(bdi);
1357 if (err) 1358 if (err)
diff --git a/fs/buffer.c b/fs/buffer.c
index 28f320fac4d4..90a98865b0cc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,7 +281,7 @@ static void free_more_memory(void)
281 struct zone *zone; 281 struct zone *zone;
282 int nid; 282 int nid;
283 283
284 wakeup_pdflush(1024); 284 wakeup_flusher_threads(1024);
285 yield(); 285 yield();
286 286
287 for_each_online_node(nid) { 287 for_each_online_node(nid) {
diff --git a/fs/char_dev.c b/fs/char_dev.c
index a173551e19d7..3cbc57f932d2 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -31,6 +31,7 @@
31 * - no readahead or I/O queue unplugging required 31 * - no readahead or I/O queue unplugging required
32 */ 32 */
33struct backing_dev_info directly_mappable_cdev_bdi = { 33struct backing_dev_info directly_mappable_cdev_bdi = {
34 .name = "char",
34 .capabilities = ( 35 .capabilities = (
35#ifdef CONFIG_MMU 36#ifdef CONFIG_MMU
36 /* permit private copies of the data to be taken */ 37 /* permit private copies of the data to be taken */
@@ -237,8 +238,10 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
237} 238}
238 239
239/** 240/**
240 * register_chrdev() - Register a major number for character devices. 241 * __register_chrdev() - create and register a cdev occupying a range of minors
241 * @major: major device number or 0 for dynamic allocation 242 * @major: major device number or 0 for dynamic allocation
243 * @baseminor: first of the requested range of minor numbers
244 * @count: the number of minor numbers required
242 * @name: name of this range of devices 245 * @name: name of this range of devices
243 * @fops: file operations associated with this devices 246 * @fops: file operations associated with this devices
244 * 247 *
@@ -254,19 +257,17 @@ int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
254 * /dev. It only helps to keep track of the different owners of devices. If 257 * /dev. It only helps to keep track of the different owners of devices. If
255 * your module name has only one type of devices it's ok to use e.g. the name 258 * your module name has only one type of devices it's ok to use e.g. the name
256 * of the module here. 259 * of the module here.
257 *
258 * This function registers a range of 256 minor numbers. The first minor number
259 * is 0.
260 */ 260 */
261int register_chrdev(unsigned int major, const char *name, 261int __register_chrdev(unsigned int major, unsigned int baseminor,
262 const struct file_operations *fops) 262 unsigned int count, const char *name,
263 const struct file_operations *fops)
263{ 264{
264 struct char_device_struct *cd; 265 struct char_device_struct *cd;
265 struct cdev *cdev; 266 struct cdev *cdev;
266 char *s; 267 char *s;
267 int err = -ENOMEM; 268 int err = -ENOMEM;
268 269
269 cd = __register_chrdev_region(major, 0, 256, name); 270 cd = __register_chrdev_region(major, baseminor, count, name);
270 if (IS_ERR(cd)) 271 if (IS_ERR(cd))
271 return PTR_ERR(cd); 272 return PTR_ERR(cd);
272 273
@@ -280,7 +281,7 @@ int register_chrdev(unsigned int major, const char *name,
280 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/')) 281 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
281 *s = '!'; 282 *s = '!';
282 283
283 err = cdev_add(cdev, MKDEV(cd->major, 0), 256); 284 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
284 if (err) 285 if (err)
285 goto out; 286 goto out;
286 287
@@ -290,7 +291,7 @@ int register_chrdev(unsigned int major, const char *name,
290out: 291out:
291 kobject_put(&cdev->kobj); 292 kobject_put(&cdev->kobj);
292out2: 293out2:
293 kfree(__unregister_chrdev_region(cd->major, 0, 256)); 294 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
294 return err; 295 return err;
295} 296}
296 297
@@ -316,10 +317,23 @@ void unregister_chrdev_region(dev_t from, unsigned count)
316 } 317 }
317} 318}
318 319
319void unregister_chrdev(unsigned int major, const char *name) 320/**
321 * __unregister_chrdev - unregister and destroy a cdev
322 * @major: major device number
323 * @baseminor: first of the range of minor numbers
324 * @count: the number of minor numbers this cdev is occupying
325 * @name: name of this range of devices
326 *
327 * Unregister and destroy the cdev occupying the region described by
328 * @major, @baseminor and @count. This function undoes what
329 * __register_chrdev() did.
330 */
331void __unregister_chrdev(unsigned int major, unsigned int baseminor,
332 unsigned int count, const char *name)
320{ 333{
321 struct char_device_struct *cd; 334 struct char_device_struct *cd;
322 cd = __unregister_chrdev_region(major, 0, 256); 335
336 cd = __unregister_chrdev_region(major, baseminor, count);
323 if (cd && cd->cdev) 337 if (cd && cd->cdev)
324 cdev_del(cd->cdev); 338 cdev_del(cd->cdev);
325 kfree(cd); 339 kfree(cd);
@@ -568,6 +582,6 @@ EXPORT_SYMBOL(cdev_alloc);
568EXPORT_SYMBOL(cdev_del); 582EXPORT_SYMBOL(cdev_del);
569EXPORT_SYMBOL(cdev_add); 583EXPORT_SYMBOL(cdev_add);
570EXPORT_SYMBOL(cdev_index); 584EXPORT_SYMBOL(cdev_index);
571EXPORT_SYMBOL(register_chrdev); 585EXPORT_SYMBOL(__register_chrdev);
572EXPORT_SYMBOL(unregister_chrdev); 586EXPORT_SYMBOL(__unregister_chrdev);
573EXPORT_SYMBOL(directly_mappable_cdev_bdi); 587EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/compat.c b/fs/compat.c
index 94502dab972a..6d6f98fe64a0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1485,20 +1485,15 @@ int compat_do_execve(char * filename,
1485 if (!bprm) 1485 if (!bprm)
1486 goto out_files; 1486 goto out_files;
1487 1487
1488 retval = -ERESTARTNOINTR; 1488 retval = prepare_bprm_creds(bprm);
1489 if (mutex_lock_interruptible(&current->cred_guard_mutex)) 1489 if (retval)
1490 goto out_free; 1490 goto out_free;
1491 current->in_execve = 1;
1492
1493 retval = -ENOMEM;
1494 bprm->cred = prepare_exec_creds();
1495 if (!bprm->cred)
1496 goto out_unlock;
1497 1491
1498 retval = check_unsafe_exec(bprm); 1492 retval = check_unsafe_exec(bprm);
1499 if (retval < 0) 1493 if (retval < 0)
1500 goto out_unlock; 1494 goto out_free;
1501 clear_in_exec = retval; 1495 clear_in_exec = retval;
1496 current->in_execve = 1;
1502 1497
1503 file = open_exec(filename); 1498 file = open_exec(filename);
1504 retval = PTR_ERR(file); 1499 retval = PTR_ERR(file);
@@ -1547,7 +1542,6 @@ int compat_do_execve(char * filename,
1547 /* execve succeeded */ 1542 /* execve succeeded */
1548 current->fs->in_exec = 0; 1543 current->fs->in_exec = 0;
1549 current->in_execve = 0; 1544 current->in_execve = 0;
1550 mutex_unlock(&current->cred_guard_mutex);
1551 acct_update_integrals(current); 1545 acct_update_integrals(current);
1552 free_bprm(bprm); 1546 free_bprm(bprm);
1553 if (displaced) 1547 if (displaced)
@@ -1567,10 +1561,7 @@ out_file:
1567out_unmark: 1561out_unmark:
1568 if (clear_in_exec) 1562 if (clear_in_exec)
1569 current->fs->in_exec = 0; 1563 current->fs->in_exec = 0;
1570
1571out_unlock:
1572 current->in_execve = 0; 1564 current->in_execve = 0;
1573 mutex_unlock(&current->cred_guard_mutex);
1574 1565
1575out_free: 1566out_free:
1576 free_bprm(bprm); 1567 free_bprm(bprm);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index 4921e7426d95..a2f746066c5d 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -51,6 +51,7 @@ static const struct address_space_operations configfs_aops = {
51}; 51};
52 52
53static struct backing_dev_info configfs_backing_dev_info = { 53static struct backing_dev_info configfs_backing_dev_info = {
54 .name = "configfs",
54 .ra_pages = 0, /* No readahead */ 55 .ra_pages = 0, /* No readahead */
55 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 56 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
56}; 57};
diff --git a/fs/dcache.c b/fs/dcache.c
index 9e5cd3c3a6ba..a100fa35a48f 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,6 +32,7 @@
32#include <linux/swap.h> 32#include <linux/swap.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/fs_struct.h> 34#include <linux/fs_struct.h>
35#include <linux/hardirq.h>
35#include "internal.h" 36#include "internal.h"
36 37
37int sysctl_vfs_cache_pressure __read_mostly = 100; 38int sysctl_vfs_cache_pressure __read_mostly = 100;
diff --git a/fs/exec.c b/fs/exec.c
index fb4f3cdda78c..172ceb6edde4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1016,6 +1016,35 @@ out:
1016EXPORT_SYMBOL(flush_old_exec); 1016EXPORT_SYMBOL(flush_old_exec);
1017 1017
1018/* 1018/*
1019 * Prepare credentials and lock ->cred_guard_mutex.
1020 * install_exec_creds() commits the new creds and drops the lock.
1021 * Or, if exec fails before, free_bprm() should release ->cred and
1022 * and unlock.
1023 */
1024int prepare_bprm_creds(struct linux_binprm *bprm)
1025{
1026 if (mutex_lock_interruptible(&current->cred_guard_mutex))
1027 return -ERESTARTNOINTR;
1028
1029 bprm->cred = prepare_exec_creds();
1030 if (likely(bprm->cred))
1031 return 0;
1032
1033 mutex_unlock(&current->cred_guard_mutex);
1034 return -ENOMEM;
1035}
1036
1037void free_bprm(struct linux_binprm *bprm)
1038{
1039 free_arg_pages(bprm);
1040 if (bprm->cred) {
1041 mutex_unlock(&current->cred_guard_mutex);
1042 abort_creds(bprm->cred);
1043 }
1044 kfree(bprm);
1045}
1046
1047/*
1019 * install the new credentials for this executable 1048 * install the new credentials for this executable
1020 */ 1049 */
1021void install_exec_creds(struct linux_binprm *bprm) 1050void install_exec_creds(struct linux_binprm *bprm)
@@ -1024,12 +1053,13 @@ void install_exec_creds(struct linux_binprm *bprm)
1024 1053
1025 commit_creds(bprm->cred); 1054 commit_creds(bprm->cred);
1026 bprm->cred = NULL; 1055 bprm->cred = NULL;
1027 1056 /*
1028 /* cred_guard_mutex must be held at least to this point to prevent 1057 * cred_guard_mutex must be held at least to this point to prevent
1029 * ptrace_attach() from altering our determination of the task's 1058 * ptrace_attach() from altering our determination of the task's
1030 * credentials; any time after this it may be unlocked */ 1059 * credentials; any time after this it may be unlocked.
1031 1060 */
1032 security_bprm_committed_creds(bprm); 1061 security_bprm_committed_creds(bprm);
1062 mutex_unlock(&current->cred_guard_mutex);
1033} 1063}
1034EXPORT_SYMBOL(install_exec_creds); 1064EXPORT_SYMBOL(install_exec_creds);
1035 1065
@@ -1246,14 +1276,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1246 1276
1247EXPORT_SYMBOL(search_binary_handler); 1277EXPORT_SYMBOL(search_binary_handler);
1248 1278
1249void free_bprm(struct linux_binprm *bprm)
1250{
1251 free_arg_pages(bprm);
1252 if (bprm->cred)
1253 abort_creds(bprm->cred);
1254 kfree(bprm);
1255}
1256
1257/* 1279/*
1258 * sys_execve() executes a new program. 1280 * sys_execve() executes a new program.
1259 */ 1281 */
@@ -1277,20 +1299,15 @@ int do_execve(char * filename,
1277 if (!bprm) 1299 if (!bprm)
1278 goto out_files; 1300 goto out_files;
1279 1301
1280 retval = -ERESTARTNOINTR; 1302 retval = prepare_bprm_creds(bprm);
1281 if (mutex_lock_interruptible(&current->cred_guard_mutex)) 1303 if (retval)
1282 goto out_free; 1304 goto out_free;
1283 current->in_execve = 1;
1284
1285 retval = -ENOMEM;
1286 bprm->cred = prepare_exec_creds();
1287 if (!bprm->cred)
1288 goto out_unlock;
1289 1305
1290 retval = check_unsafe_exec(bprm); 1306 retval = check_unsafe_exec(bprm);
1291 if (retval < 0) 1307 if (retval < 0)
1292 goto out_unlock; 1308 goto out_free;
1293 clear_in_exec = retval; 1309 clear_in_exec = retval;
1310 current->in_execve = 1;
1294 1311
1295 file = open_exec(filename); 1312 file = open_exec(filename);
1296 retval = PTR_ERR(file); 1313 retval = PTR_ERR(file);
@@ -1340,7 +1357,6 @@ int do_execve(char * filename,
1340 /* execve succeeded */ 1357 /* execve succeeded */
1341 current->fs->in_exec = 0; 1358 current->fs->in_exec = 0;
1342 current->in_execve = 0; 1359 current->in_execve = 0;
1343 mutex_unlock(&current->cred_guard_mutex);
1344 acct_update_integrals(current); 1360 acct_update_integrals(current);
1345 free_bprm(bprm); 1361 free_bprm(bprm);
1346 if (displaced) 1362 if (displaced)
@@ -1360,10 +1376,7 @@ out_file:
1360out_unmark: 1376out_unmark:
1361 if (clear_in_exec) 1377 if (clear_in_exec)
1362 current->fs->in_exec = 0; 1378 current->fs->in_exec = 0;
1363
1364out_unlock:
1365 current->in_execve = 0; 1379 current->in_execve = 0;
1366 mutex_unlock(&current->cred_guard_mutex);
1367 1380
1368out_free: 1381out_free:
1369 free_bprm(bprm); 1382 free_bprm(bprm);
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index d636e1297cad..a63d44256a70 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -230,7 +230,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
230 return error; 230 return error;
231} 231}
232 232
233static int 233int
234ext2_check_acl(struct inode *inode, int mask) 234ext2_check_acl(struct inode *inode, int mask)
235{ 235{
236 struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS); 236 struct posix_acl *acl = ext2_get_acl(inode, ACL_TYPE_ACCESS);
@@ -246,12 +246,6 @@ ext2_check_acl(struct inode *inode, int mask)
246 return -EAGAIN; 246 return -EAGAIN;
247} 247}
248 248
249int
250ext2_permission(struct inode *inode, int mask)
251{
252 return generic_permission(inode, mask, ext2_check_acl);
253}
254
255/* 249/*
256 * Initialize the ACLs of a new inode. Called from ext2_new_inode. 250 * Initialize the ACLs of a new inode. Called from ext2_new_inode.
257 * 251 *
diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h
index ecefe478898f..3ff6cbb9ac44 100644
--- a/fs/ext2/acl.h
+++ b/fs/ext2/acl.h
@@ -54,13 +54,13 @@ static inline int ext2_acl_count(size_t size)
54#ifdef CONFIG_EXT2_FS_POSIX_ACL 54#ifdef CONFIG_EXT2_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext2_permission (struct inode *, int); 57extern int ext2_check_acl (struct inode *, int);
58extern int ext2_acl_chmod (struct inode *); 58extern int ext2_acl_chmod (struct inode *);
59extern int ext2_init_acl (struct inode *, struct inode *); 59extern int ext2_init_acl (struct inode *, struct inode *);
60 60
61#else 61#else
62#include <linux/sched.h> 62#include <linux/sched.h>
63#define ext2_permission NULL 63#define ext2_check_acl NULL
64#define ext2_get_acl NULL 64#define ext2_get_acl NULL
65#define ext2_set_acl NULL 65#define ext2_set_acl NULL
66 66
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2b9e47dc9222..a2f3afd1a1c1 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -85,6 +85,6 @@ const struct inode_operations ext2_file_inode_operations = {
85 .removexattr = generic_removexattr, 85 .removexattr = generic_removexattr,
86#endif 86#endif
87 .setattr = ext2_setattr, 87 .setattr = ext2_setattr,
88 .permission = ext2_permission, 88 .check_acl = ext2_check_acl,
89 .fiemap = ext2_fiemap, 89 .fiemap = ext2_fiemap,
90}; 90};
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index e1dedb0f7873..23701f289e98 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -362,6 +362,10 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
362 if (dir_de) { 362 if (dir_de) {
363 if (old_dir != new_dir) 363 if (old_dir != new_dir)
364 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); 364 ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
365 else {
366 kunmap(dir_page);
367 page_cache_release(dir_page);
368 }
365 inode_dec_link_count(old_dir); 369 inode_dec_link_count(old_dir);
366 } 370 }
367 return 0; 371 return 0;
@@ -396,7 +400,7 @@ const struct inode_operations ext2_dir_inode_operations = {
396 .removexattr = generic_removexattr, 400 .removexattr = generic_removexattr,
397#endif 401#endif
398 .setattr = ext2_setattr, 402 .setattr = ext2_setattr,
399 .permission = ext2_permission, 403 .check_acl = ext2_check_acl,
400}; 404};
401 405
402const struct inode_operations ext2_special_inode_operations = { 406const struct inode_operations ext2_special_inode_operations = {
@@ -407,5 +411,5 @@ const struct inode_operations ext2_special_inode_operations = {
407 .removexattr = generic_removexattr, 411 .removexattr = generic_removexattr,
408#endif 412#endif
409 .setattr = ext2_setattr, 413 .setattr = ext2_setattr,
410 .permission = ext2_permission, 414 .check_acl = ext2_check_acl,
411}; 415};
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index e167bae37ef0..c9b0df376b5f 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -238,7 +238,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type,
238 return error; 238 return error;
239} 239}
240 240
241static int 241int
242ext3_check_acl(struct inode *inode, int mask) 242ext3_check_acl(struct inode *inode, int mask)
243{ 243{
244 struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS); 244 struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
@@ -254,12 +254,6 @@ ext3_check_acl(struct inode *inode, int mask)
254 return -EAGAIN; 254 return -EAGAIN;
255} 255}
256 256
257int
258ext3_permission(struct inode *inode, int mask)
259{
260 return generic_permission(inode, mask, ext3_check_acl);
261}
262
263/* 257/*
264 * Initialize the ACLs of a new inode. Called from ext3_new_inode. 258 * Initialize the ACLs of a new inode. Called from ext3_new_inode.
265 * 259 *
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
index 07d15a3a5969..597334626de9 100644
--- a/fs/ext3/acl.h
+++ b/fs/ext3/acl.h
@@ -54,13 +54,13 @@ static inline int ext3_acl_count(size_t size)
54#ifdef CONFIG_EXT3_FS_POSIX_ACL 54#ifdef CONFIG_EXT3_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext3_permission (struct inode *, int); 57extern int ext3_check_acl (struct inode *, int);
58extern int ext3_acl_chmod (struct inode *); 58extern int ext3_acl_chmod (struct inode *);
59extern int ext3_init_acl (handle_t *, struct inode *, struct inode *); 59extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
60 60
61#else /* CONFIG_EXT3_FS_POSIX_ACL */ 61#else /* CONFIG_EXT3_FS_POSIX_ACL */
62#include <linux/sched.h> 62#include <linux/sched.h>
63#define ext3_permission NULL 63#define ext3_check_acl NULL
64 64
65static inline int 65static inline int
66ext3_acl_chmod(struct inode *inode) 66ext3_acl_chmod(struct inode *inode)
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 5b49704b231b..299253214789 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -137,7 +137,7 @@ const struct inode_operations ext3_file_inode_operations = {
137 .listxattr = ext3_listxattr, 137 .listxattr = ext3_listxattr,
138 .removexattr = generic_removexattr, 138 .removexattr = generic_removexattr,
139#endif 139#endif
140 .permission = ext3_permission, 140 .check_acl = ext3_check_acl,
141 .fiemap = ext3_fiemap, 141 .fiemap = ext3_fiemap,
142}; 142};
143 143
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 6ff7b9730234..aad6400c9b77 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -2445,7 +2445,7 @@ const struct inode_operations ext3_dir_inode_operations = {
2445 .listxattr = ext3_listxattr, 2445 .listxattr = ext3_listxattr,
2446 .removexattr = generic_removexattr, 2446 .removexattr = generic_removexattr,
2447#endif 2447#endif
2448 .permission = ext3_permission, 2448 .check_acl = ext3_check_acl,
2449}; 2449};
2450 2450
2451const struct inode_operations ext3_special_inode_operations = { 2451const struct inode_operations ext3_special_inode_operations = {
@@ -2456,5 +2456,5 @@ const struct inode_operations ext3_special_inode_operations = {
2456 .listxattr = ext3_listxattr, 2456 .listxattr = ext3_listxattr,
2457 .removexattr = generic_removexattr, 2457 .removexattr = generic_removexattr,
2458#endif 2458#endif
2459 .permission = ext3_permission, 2459 .check_acl = ext3_check_acl,
2460}; 2460};
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index f6d8967149ca..0df88b2a69b0 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -236,7 +236,7 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
236 return error; 236 return error;
237} 237}
238 238
239static int 239int
240ext4_check_acl(struct inode *inode, int mask) 240ext4_check_acl(struct inode *inode, int mask)
241{ 241{
242 struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS); 242 struct posix_acl *acl = ext4_get_acl(inode, ACL_TYPE_ACCESS);
@@ -252,12 +252,6 @@ ext4_check_acl(struct inode *inode, int mask)
252 return -EAGAIN; 252 return -EAGAIN;
253} 253}
254 254
255int
256ext4_permission(struct inode *inode, int mask)
257{
258 return generic_permission(inode, mask, ext4_check_acl);
259}
260
261/* 255/*
262 * Initialize the ACLs of a new inode. Called from ext4_new_inode. 256 * Initialize the ACLs of a new inode. Called from ext4_new_inode.
263 * 257 *
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index 949789d2bba6..9d843d5deac4 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -54,13 +54,13 @@ static inline int ext4_acl_count(size_t size)
54#ifdef CONFIG_EXT4_FS_POSIX_ACL 54#ifdef CONFIG_EXT4_FS_POSIX_ACL
55 55
56/* acl.c */ 56/* acl.c */
57extern int ext4_permission(struct inode *, int); 57extern int ext4_check_acl(struct inode *, int);
58extern int ext4_acl_chmod(struct inode *); 58extern int ext4_acl_chmod(struct inode *);
59extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); 59extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
60 60
61#else /* CONFIG_EXT4_FS_POSIX_ACL */ 61#else /* CONFIG_EXT4_FS_POSIX_ACL */
62#include <linux/sched.h> 62#include <linux/sched.h>
63#define ext4_permission NULL 63#define ext4_check_acl NULL
64 64
65static inline int 65static inline int
66ext4_acl_chmod(struct inode *inode) 66ext4_acl_chmod(struct inode *inode)
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3f1873fef1c6..27f3c5354c0e 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -207,7 +207,7 @@ const struct inode_operations ext4_file_inode_operations = {
207 .listxattr = ext4_listxattr, 207 .listxattr = ext4_listxattr,
208 .removexattr = generic_removexattr, 208 .removexattr = generic_removexattr,
209#endif 209#endif
210 .permission = ext4_permission, 210 .check_acl = ext4_check_acl,
211 .fallocate = ext4_fallocate, 211 .fallocate = ext4_fallocate,
212 .fiemap = ext4_fiemap, 212 .fiemap = ext4_fiemap,
213}; 213};
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index de04013d16ff..114abe5d2c1d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2536,7 +2536,7 @@ const struct inode_operations ext4_dir_inode_operations = {
2536 .listxattr = ext4_listxattr, 2536 .listxattr = ext4_listxattr,
2537 .removexattr = generic_removexattr, 2537 .removexattr = generic_removexattr,
2538#endif 2538#endif
2539 .permission = ext4_permission, 2539 .check_acl = ext4_check_acl,
2540 .fiemap = ext4_fiemap, 2540 .fiemap = ext4_fiemap,
2541}; 2541};
2542 2542
@@ -2548,5 +2548,5 @@ const struct inode_operations ext4_special_inode_operations = {
2548 .listxattr = ext4_listxattr, 2548 .listxattr = ext4_listxattr,
2549 .removexattr = generic_removexattr, 2549 .removexattr = generic_removexattr,
2550#endif 2550#endif
2551 .permission = ext4_permission, 2551 .check_acl = ext4_check_acl,
2552}; 2552};
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c54226be5294..da86ef58e427 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -19,171 +19,223 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/kthread.h>
23#include <linux/freezer.h>
22#include <linux/writeback.h> 24#include <linux/writeback.h>
23#include <linux/blkdev.h> 25#include <linux/blkdev.h>
24#include <linux/backing-dev.h> 26#include <linux/backing-dev.h>
25#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
26#include "internal.h" 28#include "internal.h"
27 29
30#define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
28 31
29/** 32/*
30 * writeback_acquire - attempt to get exclusive writeback access to a device 33 * We don't actually have pdflush, but this one is exported though /proc...
31 * @bdi: the device's backing_dev_info structure
32 *
33 * It is a waste of resources to have more than one pdflush thread blocked on
34 * a single request queue. Exclusion at the request_queue level is obtained
35 * via a flag in the request_queue's backing_dev_info.state.
36 *
37 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
38 * unless they implement their own. Which is somewhat inefficient, as this
39 * may prevent concurrent writeback against multiple devices.
40 */ 34 */
41static int writeback_acquire(struct backing_dev_info *bdi) 35int nr_pdflush_threads;
36
37/*
38 * Work items for the bdi_writeback threads
39 */
40struct bdi_work {
41 struct list_head list;
42 struct list_head wait_list;
43 struct rcu_head rcu_head;
44
45 unsigned long seen;
46 atomic_t pending;
47
48 struct super_block *sb;
49 unsigned long nr_pages;
50 enum writeback_sync_modes sync_mode;
51
52 unsigned long state;
53};
54
55enum {
56 WS_USED_B = 0,
57 WS_ONSTACK_B,
58};
59
60#define WS_USED (1 << WS_USED_B)
61#define WS_ONSTACK (1 << WS_ONSTACK_B)
62
63static inline bool bdi_work_on_stack(struct bdi_work *work)
42{ 64{
43 return !test_and_set_bit(BDI_pdflush, &bdi->state); 65 return test_bit(WS_ONSTACK_B, &work->state);
66}
67
68static inline void bdi_work_init(struct bdi_work *work,
69 struct writeback_control *wbc)
70{
71 INIT_RCU_HEAD(&work->rcu_head);
72 work->sb = wbc->sb;
73 work->nr_pages = wbc->nr_to_write;
74 work->sync_mode = wbc->sync_mode;
75 work->state = WS_USED;
76}
77
78static inline void bdi_work_init_on_stack(struct bdi_work *work,
79 struct writeback_control *wbc)
80{
81 bdi_work_init(work, wbc);
82 work->state |= WS_ONSTACK;
44} 83}
45 84
46/** 85/**
47 * writeback_in_progress - determine whether there is writeback in progress 86 * writeback_in_progress - determine whether there is writeback in progress
48 * @bdi: the device's backing_dev_info structure. 87 * @bdi: the device's backing_dev_info structure.
49 * 88 *
50 * Determine whether there is writeback in progress against a backing device. 89 * Determine whether there is writeback waiting to be handled against a
90 * backing device.
51 */ 91 */
52int writeback_in_progress(struct backing_dev_info *bdi) 92int writeback_in_progress(struct backing_dev_info *bdi)
53{ 93{
54 return test_bit(BDI_pdflush, &bdi->state); 94 return !list_empty(&bdi->work_list);
55} 95}
56 96
57/** 97static void bdi_work_clear(struct bdi_work *work)
58 * writeback_release - relinquish exclusive writeback access against a device.
59 * @bdi: the device's backing_dev_info structure
60 */
61static void writeback_release(struct backing_dev_info *bdi)
62{ 98{
63 BUG_ON(!writeback_in_progress(bdi)); 99 clear_bit(WS_USED_B, &work->state);
64 clear_bit(BDI_pdflush, &bdi->state); 100 smp_mb__after_clear_bit();
101 wake_up_bit(&work->state, WS_USED_B);
65} 102}
66 103
67static noinline void block_dump___mark_inode_dirty(struct inode *inode) 104static void bdi_work_free(struct rcu_head *head)
68{ 105{
69 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 106 struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
70 struct dentry *dentry;
71 const char *name = "?";
72 107
73 dentry = d_find_alias(inode); 108 if (!bdi_work_on_stack(work))
74 if (dentry) { 109 kfree(work);
75 spin_lock(&dentry->d_lock); 110 else
76 name = (const char *) dentry->d_name.name; 111 bdi_work_clear(work);
77 }
78 printk(KERN_DEBUG
79 "%s(%d): dirtied inode %lu (%s) on %s\n",
80 current->comm, task_pid_nr(current), inode->i_ino,
81 name, inode->i_sb->s_id);
82 if (dentry) {
83 spin_unlock(&dentry->d_lock);
84 dput(dentry);
85 }
86 }
87} 112}
88 113
89/** 114static void wb_work_complete(struct bdi_work *work)
90 * __mark_inode_dirty - internal function
91 * @inode: inode to mark
92 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
93 * Mark an inode as dirty. Callers should use mark_inode_dirty or
94 * mark_inode_dirty_sync.
95 *
96 * Put the inode on the super block's dirty list.
97 *
98 * CAREFUL! We mark it dirty unconditionally, but move it onto the
99 * dirty list only if it is hashed or if it refers to a blockdev.
100 * If it was not hashed, it will never be added to the dirty list
101 * even if it is later hashed, as it will have been marked dirty already.
102 *
103 * In short, make sure you hash any inodes _before_ you start marking
104 * them dirty.
105 *
106 * This function *must* be atomic for the I_DIRTY_PAGES case -
107 * set_page_dirty() is called under spinlock in several places.
108 *
109 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
110 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
111 * the kernel-internal blockdev inode represents the dirtying time of the
112 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
113 * page->mapping->host, so the page-dirtying time is recorded in the internal
114 * blockdev inode.
115 */
116void __mark_inode_dirty(struct inode *inode, int flags)
117{ 115{
118 struct super_block *sb = inode->i_sb; 116 const enum writeback_sync_modes sync_mode = work->sync_mode;
119 117
120 /* 118 /*
121 * Don't do this for I_DIRTY_PAGES - that doesn't actually 119 * For allocated work, we can clear the done/seen bit right here.
122 * dirty the inode itself 120 * For on-stack work, we need to postpone both the clear and free
121 * to after the RCU grace period, since the stack could be invalidated
122 * as soon as bdi_work_clear() has done the wakeup.
123 */ 123 */
124 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 124 if (!bdi_work_on_stack(work))
125 if (sb->s_op->dirty_inode) 125 bdi_work_clear(work);
126 sb->s_op->dirty_inode(inode); 126 if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
127 } 127 call_rcu(&work->rcu_head, bdi_work_free);
128}
128 129
130static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
131{
129 /* 132 /*
130 * make sure that changes are seen by all cpus before we test i_state 133 * The caller has retrieved the work arguments from this work,
131 * -- mikulas 134 * drop our reference. If this is the last ref, delete and free it
132 */ 135 */
133 smp_mb(); 136 if (atomic_dec_and_test(&work->pending)) {
137 struct backing_dev_info *bdi = wb->bdi;
134 138
135 /* avoid the locking if we can */ 139 spin_lock(&bdi->wb_lock);
136 if ((inode->i_state & flags) == flags) 140 list_del_rcu(&work->list);
137 return; 141 spin_unlock(&bdi->wb_lock);
138 142
139 if (unlikely(block_dump)) 143 wb_work_complete(work);
140 block_dump___mark_inode_dirty(inode); 144 }
141 145}
142 spin_lock(&inode_lock);
143 if ((inode->i_state & flags) != flags) {
144 const int was_dirty = inode->i_state & I_DIRTY;
145 146
146 inode->i_state |= flags; 147static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
148{
149 if (work) {
150 work->seen = bdi->wb_mask;
151 BUG_ON(!work->seen);
152 atomic_set(&work->pending, bdi->wb_cnt);
153 BUG_ON(!bdi->wb_cnt);
147 154
148 /* 155 /*
149 * If the inode is being synced, just update its dirty state. 156 * Make sure stores are seen before it appears on the list
150 * The unlocker will place the inode on the appropriate
151 * superblock list, based upon its state.
152 */ 157 */
153 if (inode->i_state & I_SYNC) 158 smp_mb();
154 goto out;
155 159
156 /* 160 spin_lock(&bdi->wb_lock);
157 * Only add valid (hashed) inodes to the superblock's 161 list_add_tail_rcu(&work->list, &bdi->work_list);
158 * dirty list. Add blockdev inodes as well. 162 spin_unlock(&bdi->wb_lock);
159 */ 163 }
160 if (!S_ISBLK(inode->i_mode)) { 164
161 if (hlist_unhashed(&inode->i_hash)) 165 /*
162 goto out; 166 * If the default thread isn't there, make sure we add it. When
163 } 167 * it gets created and wakes up, we'll run this work.
164 if (inode->i_state & (I_FREEING|I_CLEAR)) 168 */
165 goto out; 169 if (unlikely(list_empty_careful(&bdi->wb_list)))
170 wake_up_process(default_backing_dev_info.wb.task);
171 else {
172 struct bdi_writeback *wb = &bdi->wb;
166 173
167 /* 174 /*
168 * If the inode was already on s_dirty/s_io/s_more_io, don't 175 * If we failed allocating the bdi work item, wake up the wb
169 * reposition it (that would break s_dirty time-ordering). 176 * thread always. As a safety precaution, it'll flush out
177 * everything
170 */ 178 */
171 if (!was_dirty) { 179 if (!wb_has_dirty_io(wb)) {
172 inode->dirtied_when = jiffies; 180 if (work)
173 list_move(&inode->i_list, &sb->s_dirty); 181 wb_clear_pending(wb, work);
174 } 182 } else if (wb->task)
183 wake_up_process(wb->task);
175 } 184 }
176out:
177 spin_unlock(&inode_lock);
178} 185}
179 186
180EXPORT_SYMBOL(__mark_inode_dirty); 187/*
188 * Used for on-stack allocated work items. The caller needs to wait until
189 * the wb threads have acked the work before it's safe to continue.
190 */
191static void bdi_wait_on_work_clear(struct bdi_work *work)
192{
193 wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
194 TASK_UNINTERRUPTIBLE);
195}
181 196
182static int write_inode(struct inode *inode, int sync) 197static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc)
183{ 198{
184 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) 199 struct bdi_work *work;
185 return inode->i_sb->s_op->write_inode(inode, sync); 200
186 return 0; 201 work = kmalloc(sizeof(*work), GFP_ATOMIC);
202 if (work)
203 bdi_work_init(work, wbc);
204
205 return work;
206}
207
208void bdi_start_writeback(struct writeback_control *wbc)
209{
210 const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
211 struct bdi_work work_stack, *work = NULL;
212
213 if (!must_wait)
214 work = bdi_alloc_work(wbc);
215
216 if (!work) {
217 work = &work_stack;
218 bdi_work_init_on_stack(work, wbc);
219 }
220
221 bdi_queue_work(wbc->bdi, work);
222
223 /*
224 * If the sync mode is WB_SYNC_ALL, block waiting for the work to
225 * complete. If not, we only need to wait for the work to be started,
226 * if we allocated it on-stack. We use the same mechanism, if the
227 * wait bit is set in the bdi_work struct, then threads will not
228 * clear pending until after they are done.
229 *
230 * Note that work == &work_stack if must_wait is true, so we don't
231 * need to do call_rcu() here ever, since the completion path will
232 * have done that for us.
233 */
234 if (must_wait || work == &work_stack) {
235 bdi_wait_on_work_clear(work);
236 if (work != &work_stack)
237 call_rcu(&work->rcu_head, bdi_work_free);
238 }
187} 239}
188 240
189/* 241/*
@@ -191,31 +243,32 @@ static int write_inode(struct inode *inode, int sync)
191 * furthest end of its superblock's dirty-inode list. 243 * furthest end of its superblock's dirty-inode list.
192 * 244 *
193 * Before stamping the inode's ->dirtied_when, we check to see whether it is 245 * Before stamping the inode's ->dirtied_when, we check to see whether it is
194 * already the most-recently-dirtied inode on the s_dirty list. If that is 246 * already the most-recently-dirtied inode on the b_dirty list. If that is
195 * the case then the inode must have been redirtied while it was being written 247 * the case then the inode must have been redirtied while it was being written
196 * out and we don't reset its dirtied_when. 248 * out and we don't reset its dirtied_when.
197 */ 249 */
198static void redirty_tail(struct inode *inode) 250static void redirty_tail(struct inode *inode)
199{ 251{
200 struct super_block *sb = inode->i_sb; 252 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
201 253
202 if (!list_empty(&sb->s_dirty)) { 254 if (!list_empty(&wb->b_dirty)) {
203 struct inode *tail_inode; 255 struct inode *tail;
204 256
205 tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list); 257 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
206 if (time_before(inode->dirtied_when, 258 if (time_before(inode->dirtied_when, tail->dirtied_when))
207 tail_inode->dirtied_when))
208 inode->dirtied_when = jiffies; 259 inode->dirtied_when = jiffies;
209 } 260 }
210 list_move(&inode->i_list, &sb->s_dirty); 261 list_move(&inode->i_list, &wb->b_dirty);
211} 262}
212 263
213/* 264/*
214 * requeue inode for re-scanning after sb->s_io list is exhausted. 265 * requeue inode for re-scanning after bdi->b_io list is exhausted.
215 */ 266 */
216static void requeue_io(struct inode *inode) 267static void requeue_io(struct inode *inode)
217{ 268{
218 list_move(&inode->i_list, &inode->i_sb->s_more_io); 269 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
270
271 list_move(&inode->i_list, &wb->b_more_io);
219} 272}
220 273
221static void inode_sync_complete(struct inode *inode) 274static void inode_sync_complete(struct inode *inode)
@@ -262,20 +315,18 @@ static void move_expired_inodes(struct list_head *delaying_queue,
262/* 315/*
263 * Queue all expired dirty inodes for io, eldest first. 316 * Queue all expired dirty inodes for io, eldest first.
264 */ 317 */
265static void queue_io(struct super_block *sb, 318static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
266 unsigned long *older_than_this)
267{ 319{
268 list_splice_init(&sb->s_more_io, sb->s_io.prev); 320 list_splice_init(&wb->b_more_io, wb->b_io.prev);
269 move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this); 321 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
270} 322}
271 323
272int sb_has_dirty_inodes(struct super_block *sb) 324static int write_inode(struct inode *inode, int sync)
273{ 325{
274 return !list_empty(&sb->s_dirty) || 326 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
275 !list_empty(&sb->s_io) || 327 return inode->i_sb->s_op->write_inode(inode, sync);
276 !list_empty(&sb->s_more_io); 328 return 0;
277} 329}
278EXPORT_SYMBOL(sb_has_dirty_inodes);
279 330
280/* 331/*
281 * Wait for writeback on an inode to complete. 332 * Wait for writeback on an inode to complete.
@@ -322,11 +373,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
322 if (inode->i_state & I_SYNC) { 373 if (inode->i_state & I_SYNC) {
323 /* 374 /*
324 * If this inode is locked for writeback and we are not doing 375 * If this inode is locked for writeback and we are not doing
325 * writeback-for-data-integrity, move it to s_more_io so that 376 * writeback-for-data-integrity, move it to b_more_io so that
326 * writeback can proceed with the other inodes on s_io. 377 * writeback can proceed with the other inodes on s_io.
327 * 378 *
328 * We'll have another go at writing back this inode when we 379 * We'll have another go at writing back this inode when we
329 * completed a full scan of s_io. 380 * completed a full scan of b_io.
330 */ 381 */
331 if (!wait) { 382 if (!wait) {
332 requeue_io(inode); 383 requeue_io(inode);
@@ -371,11 +422,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
371 /* 422 /*
372 * We didn't write back all the pages. nfs_writepages() 423 * We didn't write back all the pages. nfs_writepages()
373 * sometimes bales out without doing anything. Redirty 424 * sometimes bales out without doing anything. Redirty
374 * the inode; Move it from s_io onto s_more_io/s_dirty. 425 * the inode; Move it from b_io onto b_more_io/b_dirty.
375 */ 426 */
376 /* 427 /*
377 * akpm: if the caller was the kupdate function we put 428 * akpm: if the caller was the kupdate function we put
378 * this inode at the head of s_dirty so it gets first 429 * this inode at the head of b_dirty so it gets first
379 * consideration. Otherwise, move it to the tail, for 430 * consideration. Otherwise, move it to the tail, for
380 * the reasons described there. I'm not really sure 431 * the reasons described there. I'm not really sure
381 * how much sense this makes. Presumably I had a good 432 * how much sense this makes. Presumably I had a good
@@ -385,7 +436,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
385 if (wbc->for_kupdate) { 436 if (wbc->for_kupdate) {
386 /* 437 /*
387 * For the kupdate function we move the inode 438 * For the kupdate function we move the inode
388 * to s_more_io so it will get more writeout as 439 * to b_more_io so it will get more writeout as
389 * soon as the queue becomes uncongested. 440 * soon as the queue becomes uncongested.
390 */ 441 */
391 inode->i_state |= I_DIRTY_PAGES; 442 inode->i_state |= I_DIRTY_PAGES;
@@ -434,50 +485,84 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
434} 485}
435 486
436/* 487/*
437 * Write out a superblock's list of dirty inodes. A wait will be performed 488 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
438 * upon no inodes, all inodes or the final one, depending upon sync_mode. 489 * before calling writeback. So make sure that we do pin it, so it doesn't
439 * 490 * go away while we are writing inodes from it.
440 * If older_than_this is non-NULL, then only write out inodes which
441 * had their first dirtying at a time earlier than *older_than_this.
442 *
443 * If we're a pdflush thread, then implement pdflush collision avoidance
444 * against the entire list.
445 * 491 *
446 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 492 * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
447 * This function assumes that the blockdev superblock's inodes are backed by 493 * 1 if we failed.
448 * a variety of queues, so all inodes are searched. For other superblocks,
449 * assume that all inodes are backed by the same queue.
450 *
451 * FIXME: this linear search could get expensive with many fileystems. But
452 * how to fix? We need to go from an address_space to all inodes which share
453 * a queue with that address_space. (Easy: have a global "dirty superblocks"
454 * list).
455 *
456 * The inodes to be written are parked on sb->s_io. They are moved back onto
457 * sb->s_dirty as they are selected for writing. This way, none can be missed
458 * on the writer throttling path, and we get decent balancing between many
459 * throttled threads: we don't want them all piling up on inode_sync_wait.
460 */ 494 */
461void generic_sync_sb_inodes(struct super_block *sb, 495static int pin_sb_for_writeback(struct writeback_control *wbc,
496 struct inode *inode)
497{
498 struct super_block *sb = inode->i_sb;
499
500 /*
501 * Caller must already hold the ref for this
502 */
503 if (wbc->sync_mode == WB_SYNC_ALL) {
504 WARN_ON(!rwsem_is_locked(&sb->s_umount));
505 return 0;
506 }
507
508 spin_lock(&sb_lock);
509 sb->s_count++;
510 if (down_read_trylock(&sb->s_umount)) {
511 if (sb->s_root) {
512 spin_unlock(&sb_lock);
513 return 0;
514 }
515 /*
516 * umounted, drop rwsem again and fall through to failure
517 */
518 up_read(&sb->s_umount);
519 }
520
521 sb->s_count--;
522 spin_unlock(&sb_lock);
523 return 1;
524}
525
526static void unpin_sb_for_writeback(struct writeback_control *wbc,
527 struct inode *inode)
528{
529 struct super_block *sb = inode->i_sb;
530
531 if (wbc->sync_mode == WB_SYNC_ALL)
532 return;
533
534 up_read(&sb->s_umount);
535 put_super(sb);
536}
537
538static void writeback_inodes_wb(struct bdi_writeback *wb,
462 struct writeback_control *wbc) 539 struct writeback_control *wbc)
463{ 540{
541 struct super_block *sb = wbc->sb;
542 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
464 const unsigned long start = jiffies; /* livelock avoidance */ 543 const unsigned long start = jiffies; /* livelock avoidance */
465 int sync = wbc->sync_mode == WB_SYNC_ALL;
466 544
467 spin_lock(&inode_lock); 545 spin_lock(&inode_lock);
468 if (!wbc->for_kupdate || list_empty(&sb->s_io))
469 queue_io(sb, wbc->older_than_this);
470 546
471 while (!list_empty(&sb->s_io)) { 547 if (!wbc->for_kupdate || list_empty(&wb->b_io))
472 struct inode *inode = list_entry(sb->s_io.prev, 548 queue_io(wb, wbc->older_than_this);
549
550 while (!list_empty(&wb->b_io)) {
551 struct inode *inode = list_entry(wb->b_io.prev,
473 struct inode, i_list); 552 struct inode, i_list);
474 struct address_space *mapping = inode->i_mapping;
475 struct backing_dev_info *bdi = mapping->backing_dev_info;
476 long pages_skipped; 553 long pages_skipped;
477 554
478 if (!bdi_cap_writeback_dirty(bdi)) { 555 /*
556 * super block given and doesn't match, skip this inode
557 */
558 if (sb && sb != inode->i_sb) {
559 redirty_tail(inode);
560 continue;
561 }
562
563 if (!bdi_cap_writeback_dirty(wb->bdi)) {
479 redirty_tail(inode); 564 redirty_tail(inode);
480 if (sb_is_blkdev_sb(sb)) { 565 if (is_blkdev_sb) {
481 /* 566 /*
482 * Dirty memory-backed blockdev: the ramdisk 567 * Dirty memory-backed blockdev: the ramdisk
483 * driver does this. Skip just this inode 568 * driver does this. Skip just this inode
@@ -497,21 +582,14 @@ void generic_sync_sb_inodes(struct super_block *sb,
497 continue; 582 continue;
498 } 583 }
499 584
500 if (wbc->nonblocking && bdi_write_congested(bdi)) { 585 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
501 wbc->encountered_congestion = 1; 586 wbc->encountered_congestion = 1;
502 if (!sb_is_blkdev_sb(sb)) 587 if (!is_blkdev_sb)
503 break; /* Skip a congested fs */ 588 break; /* Skip a congested fs */
504 requeue_io(inode); 589 requeue_io(inode);
505 continue; /* Skip a congested blockdev */ 590 continue; /* Skip a congested blockdev */
506 } 591 }
507 592
508 if (wbc->bdi && bdi != wbc->bdi) {
509 if (!sb_is_blkdev_sb(sb))
510 break; /* fs has the wrong queue */
511 requeue_io(inode);
512 continue; /* blockdev has wrong queue */
513 }
514
515 /* 593 /*
516 * Was this inode dirtied after sync_sb_inodes was called? 594 * Was this inode dirtied after sync_sb_inodes was called?
517 * This keeps sync from extra jobs and livelock. 595 * This keeps sync from extra jobs and livelock.
@@ -519,16 +597,16 @@ void generic_sync_sb_inodes(struct super_block *sb,
519 if (inode_dirtied_after(inode, start)) 597 if (inode_dirtied_after(inode, start))
520 break; 598 break;
521 599
522 /* Is another pdflush already flushing this queue? */ 600 if (pin_sb_for_writeback(wbc, inode)) {
523 if (current_is_pdflush() && !writeback_acquire(bdi)) 601 requeue_io(inode);
524 break; 602 continue;
603 }
525 604
526 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR)); 605 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
527 __iget(inode); 606 __iget(inode);
528 pages_skipped = wbc->pages_skipped; 607 pages_skipped = wbc->pages_skipped;
529 writeback_single_inode(inode, wbc); 608 writeback_single_inode(inode, wbc);
530 if (current_is_pdflush()) 609 unpin_sb_for_writeback(wbc, inode);
531 writeback_release(bdi);
532 if (wbc->pages_skipped != pages_skipped) { 610 if (wbc->pages_skipped != pages_skipped) {
533 /* 611 /*
534 * writeback is not making progress due to locked 612 * writeback is not making progress due to locked
@@ -544,144 +622,571 @@ void generic_sync_sb_inodes(struct super_block *sb,
544 wbc->more_io = 1; 622 wbc->more_io = 1;
545 break; 623 break;
546 } 624 }
547 if (!list_empty(&sb->s_more_io)) 625 if (!list_empty(&wb->b_more_io))
548 wbc->more_io = 1; 626 wbc->more_io = 1;
549 } 627 }
550 628
551 if (sync) { 629 spin_unlock(&inode_lock);
552 struct inode *inode, *old_inode = NULL; 630 /* Leave any unwritten inodes on b_io */
631}
632
633void writeback_inodes_wbc(struct writeback_control *wbc)
634{
635 struct backing_dev_info *bdi = wbc->bdi;
553 636
637 writeback_inodes_wb(&bdi->wb, wbc);
638}
639
640/*
641 * The maximum number of pages to writeout in a single bdi flush/kupdate
642 * operation. We do this so we don't hold I_SYNC against an inode for
643 * enormous amounts of time, which would block a userspace task which has
644 * been forced to throttle against that inode. Also, the code reevaluates
645 * the dirty each time it has written this many pages.
646 */
647#define MAX_WRITEBACK_PAGES 1024
648
649static inline bool over_bground_thresh(void)
650{
651 unsigned long background_thresh, dirty_thresh;
652
653 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
654
655 return (global_page_state(NR_FILE_DIRTY) +
656 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
657}
658
659/*
660 * Explicit flushing or periodic writeback of "old" data.
661 *
662 * Define "old": the first time one of an inode's pages is dirtied, we mark the
663 * dirtying-time in the inode's address_space. So this periodic writeback code
664 * just walks the superblock inode list, writing back any inodes which are
665 * older than a specific point in time.
666 *
667 * Try to run once per dirty_writeback_interval. But if a writeback event
668 * takes longer than a dirty_writeback_interval interval, then leave a
669 * one-second gap.
670 *
671 * older_than_this takes precedence over nr_to_write. So we'll only write back
672 * all dirty pages if they are all attached to "old" mappings.
673 */
674static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
675 struct super_block *sb,
676 enum writeback_sync_modes sync_mode, int for_kupdate)
677{
678 struct writeback_control wbc = {
679 .bdi = wb->bdi,
680 .sb = sb,
681 .sync_mode = sync_mode,
682 .older_than_this = NULL,
683 .for_kupdate = for_kupdate,
684 .range_cyclic = 1,
685 };
686 unsigned long oldest_jif;
687 long wrote = 0;
688
689 if (wbc.for_kupdate) {
690 wbc.older_than_this = &oldest_jif;
691 oldest_jif = jiffies -
692 msecs_to_jiffies(dirty_expire_interval * 10);
693 }
694
695 for (;;) {
554 /* 696 /*
555 * Data integrity sync. Must wait for all pages under writeback, 697 * Don't flush anything for non-integrity writeback where
556 * because there may have been pages dirtied before our sync 698 * no nr_pages was given
557 * call, but which had writeout started before we write it out.
558 * In which case, the inode may not be on the dirty list, but
559 * we still have to wait for that writeout.
560 */ 699 */
561 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 700 if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE)
562 struct address_space *mapping; 701 break;
563 702
564 if (inode->i_state & 703 /*
565 (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 704 * If no specific pages were given and this is just a
566 continue; 705 * periodic background writeout and we are below the
567 mapping = inode->i_mapping; 706 * background dirty threshold, don't do anything
568 if (mapping->nrpages == 0) 707 */
708 if (for_kupdate && nr_pages <= 0 && !over_bground_thresh())
709 break;
710
711 wbc.more_io = 0;
712 wbc.encountered_congestion = 0;
713 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
714 wbc.pages_skipped = 0;
715 writeback_inodes_wb(wb, &wbc);
716 nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
717 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
718
719 /*
720 * If we ran out of stuff to write, bail unless more_io got set
721 */
722 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
723 if (wbc.more_io && !wbc.for_kupdate)
569 continue; 724 continue;
570 __iget(inode); 725 break;
571 spin_unlock(&inode_lock); 726 }
727 }
728
729 return wrote;
730}
731
732/*
733 * Return the next bdi_work struct that hasn't been processed by this
734 * wb thread yet
735 */
736static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
737 struct bdi_writeback *wb)
738{
739 struct bdi_work *work, *ret = NULL;
740
741 rcu_read_lock();
742
743 list_for_each_entry_rcu(work, &bdi->work_list, list) {
744 if (!test_and_clear_bit(wb->nr, &work->seen))
745 continue;
746
747 ret = work;
748 break;
749 }
750
751 rcu_read_unlock();
752 return ret;
753}
754
755static long wb_check_old_data_flush(struct bdi_writeback *wb)
756{
757 unsigned long expired;
758 long nr_pages;
759
760 expired = wb->last_old_flush +
761 msecs_to_jiffies(dirty_writeback_interval * 10);
762 if (time_before(jiffies, expired))
763 return 0;
764
765 wb->last_old_flush = jiffies;
766 nr_pages = global_page_state(NR_FILE_DIRTY) +
767 global_page_state(NR_UNSTABLE_NFS) +
768 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
769
770 if (nr_pages)
771 return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1);
772
773 return 0;
774}
775
776/*
777 * Retrieve work items and do the writeback they describe
778 */
779long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
780{
781 struct backing_dev_info *bdi = wb->bdi;
782 struct bdi_work *work;
783 long nr_pages, wrote = 0;
784
785 while ((work = get_next_work_item(bdi, wb)) != NULL) {
786 enum writeback_sync_modes sync_mode;
787
788 nr_pages = work->nr_pages;
789
790 /*
791 * Override sync mode, in case we must wait for completion
792 */
793 if (force_wait)
794 work->sync_mode = sync_mode = WB_SYNC_ALL;
795 else
796 sync_mode = work->sync_mode;
797
798 /*
799 * If this isn't a data integrity operation, just notify
800 * that we have seen this work and we are now starting it.
801 */
802 if (sync_mode == WB_SYNC_NONE)
803 wb_clear_pending(wb, work);
804
805 wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0);
806
807 /*
808 * This is a data integrity writeback, so only do the
809 * notification when we have completed the work.
810 */
811 if (sync_mode == WB_SYNC_ALL)
812 wb_clear_pending(wb, work);
813 }
814
815 /*
816 * Check for periodic writeback, kupdated() style
817 */
818 wrote += wb_check_old_data_flush(wb);
819
820 return wrote;
821}
822
823/*
824 * Handle writeback of dirty data for the device backed by this bdi. Also
825 * wakes up periodically and does kupdated style flushing.
826 */
827int bdi_writeback_task(struct bdi_writeback *wb)
828{
829 unsigned long last_active = jiffies;
830 unsigned long wait_jiffies = -1UL;
831 long pages_written;
832
833 while (!kthread_should_stop()) {
834 pages_written = wb_do_writeback(wb, 0);
835
836 if (pages_written)
837 last_active = jiffies;
838 else if (wait_jiffies != -1UL) {
839 unsigned long max_idle;
840
572 /* 841 /*
573 * We hold a reference to 'inode' so it couldn't have 842 * Longest period of inactivity that we tolerate. If we
574 * been removed from s_inodes list while we dropped the 843 * see dirty data again later, the task will get
575 * inode_lock. We cannot iput the inode now as we can 844 * recreated automatically.
576 * be holding the last reference and we cannot iput it
577 * under inode_lock. So we keep the reference and iput
578 * it later.
579 */ 845 */
580 iput(old_inode); 846 max_idle = max(5UL * 60 * HZ, wait_jiffies);
581 old_inode = inode; 847 if (time_after(jiffies, max_idle + last_active))
848 break;
849 }
582 850
583 filemap_fdatawait(mapping); 851 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
852 set_current_state(TASK_INTERRUPTIBLE);
853 schedule_timeout(wait_jiffies);
854 try_to_freeze();
855 }
584 856
585 cond_resched(); 857 return 0;
858}
859
860/*
861 * Schedule writeback for all backing devices. Expensive! If this is a data
862 * integrity operation, writeback will be complete when this returns. If
863 * we are simply called for WB_SYNC_NONE, then writeback will merely be
864 * scheduled to run.
865 */
866static void bdi_writeback_all(struct writeback_control *wbc)
867{
868 const bool must_wait = wbc->sync_mode == WB_SYNC_ALL;
869 struct backing_dev_info *bdi;
870 struct bdi_work *work;
871 LIST_HEAD(list);
872
873restart:
874 spin_lock(&bdi_lock);
875
876 list_for_each_entry(bdi, &bdi_list, bdi_list) {
877 struct bdi_work *work;
586 878
587 spin_lock(&inode_lock); 879 if (!bdi_has_dirty_io(bdi))
880 continue;
881
882 /*
883 * If work allocation fails, do the writes inline. We drop
884 * the lock and restart the list writeout. This should be OK,
885 * since this happens rarely and because the writeout should
886 * eventually make more free memory available.
887 */
888 work = bdi_alloc_work(wbc);
889 if (!work) {
890 struct writeback_control __wbc;
891
892 /*
893 * Not a data integrity writeout, just continue
894 */
895 if (!must_wait)
896 continue;
897
898 spin_unlock(&bdi_lock);
899 __wbc = *wbc;
900 __wbc.bdi = bdi;
901 writeback_inodes_wbc(&__wbc);
902 goto restart;
588 } 903 }
589 spin_unlock(&inode_lock); 904 if (must_wait)
590 iput(old_inode); 905 list_add_tail(&work->wait_list, &list);
591 } else 906
592 spin_unlock(&inode_lock); 907 bdi_queue_work(bdi, work);
908 }
909
910 spin_unlock(&bdi_lock);
593 911
594 return; /* Leave any unwritten inodes on s_io */ 912 /*
913 * If this is for WB_SYNC_ALL, wait for pending work to complete
914 * before returning.
915 */
916 while (!list_empty(&list)) {
917 work = list_entry(list.next, struct bdi_work, wait_list);
918 list_del(&work->wait_list);
919 bdi_wait_on_work_clear(work);
920 call_rcu(&work->rcu_head, bdi_work_free);
921 }
595} 922}
596EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
597 923
598static void sync_sb_inodes(struct super_block *sb, 924/*
599 struct writeback_control *wbc) 925 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
926 * the whole world.
927 */
928void wakeup_flusher_threads(long nr_pages)
600{ 929{
601 generic_sync_sb_inodes(sb, wbc); 930 struct writeback_control wbc = {
931 .sync_mode = WB_SYNC_NONE,
932 .older_than_this = NULL,
933 .range_cyclic = 1,
934 };
935
936 if (nr_pages == 0)
937 nr_pages = global_page_state(NR_FILE_DIRTY) +
938 global_page_state(NR_UNSTABLE_NFS);
939 wbc.nr_to_write = nr_pages;
940 bdi_writeback_all(&wbc);
602} 941}
603 942
604/* 943static noinline void block_dump___mark_inode_dirty(struct inode *inode)
605 * Start writeback of dirty pagecache data against all unlocked inodes. 944{
945 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
946 struct dentry *dentry;
947 const char *name = "?";
948
949 dentry = d_find_alias(inode);
950 if (dentry) {
951 spin_lock(&dentry->d_lock);
952 name = (const char *) dentry->d_name.name;
953 }
954 printk(KERN_DEBUG
955 "%s(%d): dirtied inode %lu (%s) on %s\n",
956 current->comm, task_pid_nr(current), inode->i_ino,
957 name, inode->i_sb->s_id);
958 if (dentry) {
959 spin_unlock(&dentry->d_lock);
960 dput(dentry);
961 }
962 }
963}
964
965/**
966 * __mark_inode_dirty - internal function
967 * @inode: inode to mark
968 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
969 * Mark an inode as dirty. Callers should use mark_inode_dirty or
970 * mark_inode_dirty_sync.
606 * 971 *
607 * Note: 972 * Put the inode on the super block's dirty list.
608 * We don't need to grab a reference to superblock here. If it has non-empty 973 *
609 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed 974 * CAREFUL! We mark it dirty unconditionally, but move it onto the
610 * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all 975 * dirty list only if it is hashed or if it refers to a blockdev.
611 * empty. Since __sync_single_inode() regains inode_lock before it finally moves 976 * If it was not hashed, it will never be added to the dirty list
612 * inode from superblock lists we are OK. 977 * even if it is later hashed, as it will have been marked dirty already.
613 * 978 *
614 * If `older_than_this' is non-zero then only flush inodes which have a 979 * In short, make sure you hash any inodes _before_ you start marking
615 * flushtime older than *older_than_this. 980 * them dirty.
616 * 981 *
617 * If `bdi' is non-zero then we will scan the first inode against each 982 * This function *must* be atomic for the I_DIRTY_PAGES case -
618 * superblock until we find the matching ones. One group will be the dirty 983 * set_page_dirty() is called under spinlock in several places.
619 * inodes against a filesystem. Then when we hit the dummy blockdev superblock, 984 *
620 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not 985 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
621 * super-efficient but we're about to do a ton of I/O... 986 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
987 * the kernel-internal blockdev inode represents the dirtying time of the
988 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
989 * page->mapping->host, so the page-dirtying time is recorded in the internal
990 * blockdev inode.
622 */ 991 */
623void 992void __mark_inode_dirty(struct inode *inode, int flags)
624writeback_inodes(struct writeback_control *wbc)
625{ 993{
626 struct super_block *sb; 994 struct super_block *sb = inode->i_sb;
627 995
628 might_sleep(); 996 /*
629 spin_lock(&sb_lock); 997 * Don't do this for I_DIRTY_PAGES - that doesn't actually
630restart: 998 * dirty the inode itself
631 list_for_each_entry_reverse(sb, &super_blocks, s_list) { 999 */
632 if (sb_has_dirty_inodes(sb)) { 1000 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
633 /* we're making our own get_super here */ 1001 if (sb->s_op->dirty_inode)
634 sb->s_count++; 1002 sb->s_op->dirty_inode(inode);
635 spin_unlock(&sb_lock); 1003 }
636 /* 1004
637 * If we can't get the readlock, there's no sense in 1005 /*
638 * waiting around, most of the time the FS is going to 1006 * make sure that changes are seen by all cpus before we test i_state
639 * be unmounted by the time it is released. 1007 * -- mikulas
640 */ 1008 */
641 if (down_read_trylock(&sb->s_umount)) { 1009 smp_mb();
642 if (sb->s_root) 1010
643 sync_sb_inodes(sb, wbc); 1011 /* avoid the locking if we can */
644 up_read(&sb->s_umount); 1012 if ((inode->i_state & flags) == flags)
1013 return;
1014
1015 if (unlikely(block_dump))
1016 block_dump___mark_inode_dirty(inode);
1017
1018 spin_lock(&inode_lock);
1019 if ((inode->i_state & flags) != flags) {
1020 const int was_dirty = inode->i_state & I_DIRTY;
1021
1022 inode->i_state |= flags;
1023
1024 /*
1025 * If the inode is being synced, just update its dirty state.
1026 * The unlocker will place the inode on the appropriate
1027 * superblock list, based upon its state.
1028 */
1029 if (inode->i_state & I_SYNC)
1030 goto out;
1031
1032 /*
1033 * Only add valid (hashed) inodes to the superblock's
1034 * dirty list. Add blockdev inodes as well.
1035 */
1036 if (!S_ISBLK(inode->i_mode)) {
1037 if (hlist_unhashed(&inode->i_hash))
1038 goto out;
1039 }
1040 if (inode->i_state & (I_FREEING|I_CLEAR))
1041 goto out;
1042
1043 /*
1044 * If the inode was already on b_dirty/b_io/b_more_io, don't
1045 * reposition it (that would break b_dirty time-ordering).
1046 */
1047 if (!was_dirty) {
1048 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1049 struct backing_dev_info *bdi = wb->bdi;
1050
1051 if (bdi_cap_writeback_dirty(bdi) &&
1052 !test_bit(BDI_registered, &bdi->state)) {
1053 WARN_ON(1);
1054 printk(KERN_ERR "bdi-%s not registered\n",
1055 bdi->name);
645 } 1056 }
646 spin_lock(&sb_lock); 1057
647 if (__put_super_and_need_restart(sb)) 1058 inode->dirtied_when = jiffies;
648 goto restart; 1059 list_move(&inode->i_list, &wb->b_dirty);
649 } 1060 }
650 if (wbc->nr_to_write <= 0)
651 break;
652 } 1061 }
653 spin_unlock(&sb_lock); 1062out:
1063 spin_unlock(&inode_lock);
654} 1064}
1065EXPORT_SYMBOL(__mark_inode_dirty);
655 1066
656/* 1067/*
657 * writeback and wait upon the filesystem's dirty inodes. The caller will 1068 * Write out a superblock's list of dirty inodes. A wait will be performed
658 * do this in two passes - one to write, and one to wait. 1069 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1070 *
1071 * If older_than_this is non-NULL, then only write out inodes which
1072 * had their first dirtying at a time earlier than *older_than_this.
659 * 1073 *
660 * A finite limit is set on the number of pages which will be written. 1074 * If we're a pdlfush thread, then implement pdflush collision avoidance
661 * To prevent infinite livelock of sys_sync(). 1075 * against the entire list.
662 * 1076 *
663 * We add in the number of potentially dirty inodes, because each inode write 1077 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
664 * can dirty pagecache in the underlying blockdev. 1078 * This function assumes that the blockdev superblock's inodes are backed by
1079 * a variety of queues, so all inodes are searched. For other superblocks,
1080 * assume that all inodes are backed by the same queue.
1081 *
1082 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1083 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1084 * on the writer throttling path, and we get decent balancing between many
1085 * throttled threads: we don't want them all piling up on inode_sync_wait.
665 */ 1086 */
666void sync_inodes_sb(struct super_block *sb, int wait) 1087static void wait_sb_inodes(struct writeback_control *wbc)
1088{
1089 struct inode *inode, *old_inode = NULL;
1090
1091 /*
1092 * We need to be protected against the filesystem going from
1093 * r/o to r/w or vice versa.
1094 */
1095 WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
1096
1097 spin_lock(&inode_lock);
1098
1099 /*
1100 * Data integrity sync. Must wait for all pages under writeback,
1101 * because there may have been pages dirtied before our sync
1102 * call, but which had writeout started before we write it out.
1103 * In which case, the inode may not be on the dirty list, but
1104 * we still have to wait for that writeout.
1105 */
1106 list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
1107 struct address_space *mapping;
1108
1109 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1110 continue;
1111 mapping = inode->i_mapping;
1112 if (mapping->nrpages == 0)
1113 continue;
1114 __iget(inode);
1115 spin_unlock(&inode_lock);
1116 /*
1117 * We hold a reference to 'inode' so it couldn't have
1118 * been removed from s_inodes list while we dropped the
1119 * inode_lock. We cannot iput the inode now as we can
1120 * be holding the last reference and we cannot iput it
1121 * under inode_lock. So we keep the reference and iput
1122 * it later.
1123 */
1124 iput(old_inode);
1125 old_inode = inode;
1126
1127 filemap_fdatawait(mapping);
1128
1129 cond_resched();
1130
1131 spin_lock(&inode_lock);
1132 }
1133 spin_unlock(&inode_lock);
1134 iput(old_inode);
1135}
1136
1137/**
1138 * writeback_inodes_sb - writeback dirty inodes from given super_block
1139 * @sb: the superblock
1140 *
1141 * Start writeback on some inodes on this super_block. No guarantees are made
1142 * on how many (if any) will be written, and this function does not wait
1143 * for IO completion of submitted IO. The number of pages submitted is
1144 * returned.
1145 */
1146long writeback_inodes_sb(struct super_block *sb)
667{ 1147{
668 struct writeback_control wbc = { 1148 struct writeback_control wbc = {
669 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE, 1149 .sb = sb,
1150 .sync_mode = WB_SYNC_NONE,
670 .range_start = 0, 1151 .range_start = 0,
671 .range_end = LLONG_MAX, 1152 .range_end = LLONG_MAX,
672 }; 1153 };
1154 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1155 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1156 long nr_to_write;
673 1157
674 if (!wait) { 1158 nr_to_write = nr_dirty + nr_unstable +
675 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
676 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
677
678 wbc.nr_to_write = nr_dirty + nr_unstable +
679 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1159 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
680 } else
681 wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
682 1160
683 sync_sb_inodes(sb, &wbc); 1161 wbc.nr_to_write = nr_to_write;
1162 bdi_writeback_all(&wbc);
1163 return nr_to_write - wbc.nr_to_write;
1164}
1165EXPORT_SYMBOL(writeback_inodes_sb);
1166
1167/**
1168 * sync_inodes_sb - sync sb inode pages
1169 * @sb: the superblock
1170 *
1171 * This function writes and waits on any dirty inode belonging to this
1172 * super_block. The number of pages synced is returned.
1173 */
1174long sync_inodes_sb(struct super_block *sb)
1175{
1176 struct writeback_control wbc = {
1177 .sb = sb,
1178 .sync_mode = WB_SYNC_ALL,
1179 .range_start = 0,
1180 .range_end = LLONG_MAX,
1181 };
1182 long nr_to_write = LONG_MAX; /* doesn't actually matter */
1183
1184 wbc.nr_to_write = nr_to_write;
1185 bdi_writeback_all(&wbc);
1186 wait_sb_inodes(&wbc);
1187 return nr_to_write - wbc.nr_to_write;
684} 1188}
1189EXPORT_SYMBOL(sync_inodes_sb);
685 1190
686/** 1191/**
687 * write_inode_now - write an inode to disk 1192 * write_inode_now - write an inode to disk
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f91ccc4a189d..4567db6f9430 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -801,6 +801,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
801{ 801{
802 int err; 802 int err;
803 803
804 fc->bdi.name = "fuse";
804 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 805 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
805 fc->bdi.unplug_io_fn = default_unplug_io_fn; 806 fc->bdi.unplug_io_fn = default_unplug_io_fn;
806 /* fuse does it's own writeback accounting */ 807 /* fuse does it's own writeback accounting */
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index cb88dac8ccaa..a93b885311d8 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -44,6 +44,7 @@ static const struct inode_operations hugetlbfs_dir_inode_operations;
44static const struct inode_operations hugetlbfs_inode_operations; 44static const struct inode_operations hugetlbfs_inode_operations;
45 45
46static struct backing_dev_info hugetlbfs_backing_dev_info = { 46static struct backing_dev_info hugetlbfs_backing_dev_info = {
47 .name = "hugetlbfs",
47 .ra_pages = 0, /* No readahead */ 48 .ra_pages = 0, /* No readahead */
48 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 49 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
49}; 50};
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 8fcb6239218e..7edb62e97419 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -258,7 +258,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
258 return rc; 258 return rc;
259} 259}
260 260
261static int jffs2_check_acl(struct inode *inode, int mask) 261int jffs2_check_acl(struct inode *inode, int mask)
262{ 262{
263 struct posix_acl *acl; 263 struct posix_acl *acl;
264 int rc; 264 int rc;
@@ -274,11 +274,6 @@ static int jffs2_check_acl(struct inode *inode, int mask)
274 return -EAGAIN; 274 return -EAGAIN;
275} 275}
276 276
277int jffs2_permission(struct inode *inode, int mask)
278{
279 return generic_permission(inode, mask, jffs2_check_acl);
280}
281
282int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode) 277int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
283{ 278{
284 struct posix_acl *acl, *clone; 279 struct posix_acl *acl, *clone;
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index fc929f2a14f6..f0ba63e3c36b 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -26,7 +26,7 @@ struct jffs2_acl_header {
26 26
27#ifdef CONFIG_JFFS2_FS_POSIX_ACL 27#ifdef CONFIG_JFFS2_FS_POSIX_ACL
28 28
29extern int jffs2_permission(struct inode *, int); 29extern int jffs2_check_acl(struct inode *, int);
30extern int jffs2_acl_chmod(struct inode *); 30extern int jffs2_acl_chmod(struct inode *);
31extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); 31extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
32extern int jffs2_init_acl_post(struct inode *); 32extern int jffs2_init_acl_post(struct inode *);
@@ -36,7 +36,7 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
36 36
37#else 37#else
38 38
39#define jffs2_permission (NULL) 39#define jffs2_check_acl (NULL)
40#define jffs2_acl_chmod(inode) (0) 40#define jffs2_acl_chmod(inode) (0)
41#define jffs2_init_acl_pre(dir_i,inode,mode) (0) 41#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
42#define jffs2_init_acl_post(inode) (0) 42#define jffs2_init_acl_post(inode) (0)
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 6f60cc910f4c..7aa4417e085f 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -55,7 +55,7 @@ const struct inode_operations jffs2_dir_inode_operations =
55 .rmdir = jffs2_rmdir, 55 .rmdir = jffs2_rmdir,
56 .mknod = jffs2_mknod, 56 .mknod = jffs2_mknod,
57 .rename = jffs2_rename, 57 .rename = jffs2_rename,
58 .permission = jffs2_permission, 58 .check_acl = jffs2_check_acl,
59 .setattr = jffs2_setattr, 59 .setattr = jffs2_setattr,
60 .setxattr = jffs2_setxattr, 60 .setxattr = jffs2_setxattr,
61 .getxattr = jffs2_getxattr, 61 .getxattr = jffs2_getxattr,
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 23c947539864..b7b74e299142 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -56,7 +56,7 @@ const struct file_operations jffs2_file_operations =
56 56
57const struct inode_operations jffs2_file_inode_operations = 57const struct inode_operations jffs2_file_inode_operations =
58{ 58{
59 .permission = jffs2_permission, 59 .check_acl = jffs2_check_acl,
60 .setattr = jffs2_setattr, 60 .setattr = jffs2_setattr,
61 .setxattr = jffs2_setxattr, 61 .setxattr = jffs2_setxattr,
62 .getxattr = jffs2_getxattr, 62 .getxattr = jffs2_getxattr,
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index b7339c3b6ad9..4ec11e8bda8c 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -21,7 +21,7 @@ const struct inode_operations jffs2_symlink_inode_operations =
21{ 21{
22 .readlink = generic_readlink, 22 .readlink = generic_readlink,
23 .follow_link = jffs2_follow_link, 23 .follow_link = jffs2_follow_link,
24 .permission = jffs2_permission, 24 .check_acl = jffs2_check_acl,
25 .setattr = jffs2_setattr, 25 .setattr = jffs2_setattr,
26 .setxattr = jffs2_setxattr, 26 .setxattr = jffs2_setxattr,
27 .getxattr = jffs2_getxattr, 27 .getxattr = jffs2_getxattr,
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index d9a721e6db70..5ef7bac265e5 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1268,10 +1268,20 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1268 if (!c->wbuf) 1268 if (!c->wbuf)
1269 return -ENOMEM; 1269 return -ENOMEM;
1270 1270
1271#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1272 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1273 if (!c->wbuf_verify) {
1274 kfree(c->wbuf);
1275 return -ENOMEM;
1276 }
1277#endif
1271 return 0; 1278 return 0;
1272} 1279}
1273 1280
1274void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { 1281void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1282#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1283 kfree(c->wbuf_verify);
1284#endif
1275 kfree(c->wbuf); 1285 kfree(c->wbuf);
1276} 1286}
1277 1287
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index a29c7c3e3fb8..d66477c34306 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -114,7 +114,7 @@ out:
114 return rc; 114 return rc;
115} 115}
116 116
117static int jfs_check_acl(struct inode *inode, int mask) 117int jfs_check_acl(struct inode *inode, int mask)
118{ 118{
119 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); 119 struct posix_acl *acl = jfs_get_acl(inode, ACL_TYPE_ACCESS);
120 120
@@ -129,11 +129,6 @@ static int jfs_check_acl(struct inode *inode, int mask)
129 return -EAGAIN; 129 return -EAGAIN;
130} 130}
131 131
132int jfs_permission(struct inode *inode, int mask)
133{
134 return generic_permission(inode, mask, jfs_check_acl);
135}
136
137int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir) 132int jfs_init_acl(tid_t tid, struct inode *inode, struct inode *dir)
138{ 133{
139 struct posix_acl *acl = NULL; 134 struct posix_acl *acl = NULL;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 7f6063acaa3b..2b70fa78e4a7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -96,7 +96,7 @@ const struct inode_operations jfs_file_inode_operations = {
96 .removexattr = jfs_removexattr, 96 .removexattr = jfs_removexattr,
97#ifdef CONFIG_JFS_POSIX_ACL 97#ifdef CONFIG_JFS_POSIX_ACL
98 .setattr = jfs_setattr, 98 .setattr = jfs_setattr,
99 .permission = jfs_permission, 99 .check_acl = jfs_check_acl,
100#endif 100#endif
101}; 101};
102 102
diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h
index 88475f10a389..b07bd417ef85 100644
--- a/fs/jfs/jfs_acl.h
+++ b/fs/jfs/jfs_acl.h
@@ -20,7 +20,7 @@
20 20
21#ifdef CONFIG_JFS_POSIX_ACL 21#ifdef CONFIG_JFS_POSIX_ACL
22 22
23int jfs_permission(struct inode *, int); 23int jfs_check_acl(struct inode *, int);
24int jfs_init_acl(tid_t, struct inode *, struct inode *); 24int jfs_init_acl(tid_t, struct inode *, struct inode *);
25int jfs_setattr(struct dentry *, struct iattr *); 25int jfs_setattr(struct dentry *, struct iattr *);
26 26
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 514ee2edb92a..c79a4270f083 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1543,7 +1543,7 @@ const struct inode_operations jfs_dir_inode_operations = {
1543 .removexattr = jfs_removexattr, 1543 .removexattr = jfs_removexattr,
1544#ifdef CONFIG_JFS_POSIX_ACL 1544#ifdef CONFIG_JFS_POSIX_ACL
1545 .setattr = jfs_setattr, 1545 .setattr = jfs_setattr,
1546 .permission = jfs_permission, 1546 .check_acl = jfs_check_acl,
1547#endif 1547#endif
1548}; 1548};
1549 1549
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 99d737bd4325..7cb076ac6b45 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -87,18 +87,6 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap)
87 return hash & (NLM_HOST_NRHASH - 1); 87 return hash & (NLM_HOST_NRHASH - 1);
88} 88}
89 89
90static void nlm_clear_port(struct sockaddr *sap)
91{
92 switch (sap->sa_family) {
93 case AF_INET:
94 ((struct sockaddr_in *)sap)->sin_port = 0;
95 break;
96 case AF_INET6:
97 ((struct sockaddr_in6 *)sap)->sin6_port = 0;
98 break;
99 }
100}
101
102/* 90/*
103 * Common host lookup routine for server & client 91 * Common host lookup routine for server & client
104 */ 92 */
@@ -177,7 +165,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
177 host->h_addrbuf = nsm->sm_addrbuf; 165 host->h_addrbuf = nsm->sm_addrbuf;
178 memcpy(nlm_addr(host), ni->sap, ni->salen); 166 memcpy(nlm_addr(host), ni->sap, ni->salen);
179 host->h_addrlen = ni->salen; 167 host->h_addrlen = ni->salen;
180 nlm_clear_port(nlm_addr(host)); 168 rpc_set_port(nlm_addr(host), 0);
181 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); 169 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
182 host->h_version = ni->version; 170 host->h_version = ni->version;
183 host->h_proto = ni->protocol; 171 host->h_proto = ni->protocol;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 7fce1b525849..30c933188dd7 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -61,43 +61,6 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
61 return (struct sockaddr *)&nsm->sm_addr; 61 return (struct sockaddr *)&nsm->sm_addr;
62} 62}
63 63
64static void nsm_display_ipv4_address(const struct sockaddr *sap, char *buf,
65 const size_t len)
66{
67 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
68 snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr);
69}
70
71static void nsm_display_ipv6_address(const struct sockaddr *sap, char *buf,
72 const size_t len)
73{
74 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
75
76 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
77 snprintf(buf, len, "%pI4", &sin6->sin6_addr.s6_addr32[3]);
78 else if (sin6->sin6_scope_id != 0)
79 snprintf(buf, len, "%pI6%%%u", &sin6->sin6_addr,
80 sin6->sin6_scope_id);
81 else
82 snprintf(buf, len, "%pI6", &sin6->sin6_addr);
83}
84
85static void nsm_display_address(const struct sockaddr *sap,
86 char *buf, const size_t len)
87{
88 switch (sap->sa_family) {
89 case AF_INET:
90 nsm_display_ipv4_address(sap, buf, len);
91 break;
92 case AF_INET6:
93 nsm_display_ipv6_address(sap, buf, len);
94 break;
95 default:
96 snprintf(buf, len, "unsupported address family");
97 break;
98 }
99}
100
101static struct rpc_clnt *nsm_create(void) 64static struct rpc_clnt *nsm_create(void)
102{ 65{
103 struct sockaddr_in sin = { 66 struct sockaddr_in sin = {
@@ -307,8 +270,11 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
307 memcpy(nsm_addr(new), sap, salen); 270 memcpy(nsm_addr(new), sap, salen);
308 new->sm_addrlen = salen; 271 new->sm_addrlen = salen;
309 nsm_init_private(new); 272 nsm_init_private(new);
310 nsm_display_address((const struct sockaddr *)&new->sm_addr, 273
311 new->sm_addrbuf, sizeof(new->sm_addrbuf)); 274 if (rpc_ntop(nsm_addr(new), new->sm_addrbuf,
275 sizeof(new->sm_addrbuf)) == 0)
276 (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf),
277 "unsupported address family");
312 memcpy(new->sm_name, hostname, hostname_len); 278 memcpy(new->sm_name, hostname, hostname_len);
313 new->sm_name[hostname_len] = '\0'; 279 new->sm_name[hostname_len] = '\0';
314 280
diff --git a/fs/locks.c b/fs/locks.c
index b6440f52178f..19ee18a6829b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -768,7 +768,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
768 * give it the opportunity to lock the file. 768 * give it the opportunity to lock the file.
769 */ 769 */
770 if (found) 770 if (found)
771 cond_resched_bkl(); 771 cond_resched();
772 772
773find_conflict: 773find_conflict:
774 for_each_lock(inode, before) { 774 for_each_lock(inode, before) {
@@ -1591,7 +1591,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1591 if (can_sleep) 1591 if (can_sleep)
1592 lock->fl_flags |= FL_SLEEP; 1592 lock->fl_flags |= FL_SLEEP;
1593 1593
1594 error = security_file_lock(filp, cmd); 1594 error = security_file_lock(filp, lock->fl_type);
1595 if (error) 1595 if (error)
1596 goto out_free; 1596 goto out_free;
1597 1597
diff --git a/fs/namei.c b/fs/namei.c
index f3c5b278895a..d11f404667e9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -169,19 +169,10 @@ void putname(const char *name)
169EXPORT_SYMBOL(putname); 169EXPORT_SYMBOL(putname);
170#endif 170#endif
171 171
172 172/*
173/** 173 * This does basic POSIX ACL permission checking
174 * generic_permission - check for access rights on a Posix-like filesystem
175 * @inode: inode to check access rights for
176 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
177 * @check_acl: optional callback to check for Posix ACLs
178 *
179 * Used to check for read/write/execute permissions on a file.
180 * We use "fsuid" for this, letting us set arbitrary permissions
181 * for filesystem access without changing the "normal" uids which
182 * are used for other things..
183 */ 174 */
184int generic_permission(struct inode *inode, int mask, 175static int acl_permission_check(struct inode *inode, int mask,
185 int (*check_acl)(struct inode *inode, int mask)) 176 int (*check_acl)(struct inode *inode, int mask))
186{ 177{
187 umode_t mode = inode->i_mode; 178 umode_t mode = inode->i_mode;
@@ -193,9 +184,7 @@ int generic_permission(struct inode *inode, int mask,
193 else { 184 else {
194 if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { 185 if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) {
195 int error = check_acl(inode, mask); 186 int error = check_acl(inode, mask);
196 if (error == -EACCES) 187 if (error != -EAGAIN)
197 goto check_capabilities;
198 else if (error != -EAGAIN)
199 return error; 188 return error;
200 } 189 }
201 190
@@ -208,8 +197,32 @@ int generic_permission(struct inode *inode, int mask,
208 */ 197 */
209 if ((mask & ~mode) == 0) 198 if ((mask & ~mode) == 0)
210 return 0; 199 return 0;
200 return -EACCES;
201}
202
203/**
204 * generic_permission - check for access rights on a Posix-like filesystem
205 * @inode: inode to check access rights for
206 * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
207 * @check_acl: optional callback to check for Posix ACLs
208 *
209 * Used to check for read/write/execute permissions on a file.
210 * We use "fsuid" for this, letting us set arbitrary permissions
211 * for filesystem access without changing the "normal" uids which
212 * are used for other things..
213 */
214int generic_permission(struct inode *inode, int mask,
215 int (*check_acl)(struct inode *inode, int mask))
216{
217 int ret;
218
219 /*
220 * Do the basic POSIX ACL permission checks.
221 */
222 ret = acl_permission_check(inode, mask, check_acl);
223 if (ret != -EACCES)
224 return ret;
211 225
212 check_capabilities:
213 /* 226 /*
214 * Read/write DACs are always overridable. 227 * Read/write DACs are always overridable.
215 * Executable DACs are overridable if at least one exec bit is set. 228 * Executable DACs are overridable if at least one exec bit is set.
@@ -262,7 +275,7 @@ int inode_permission(struct inode *inode, int mask)
262 if (inode->i_op->permission) 275 if (inode->i_op->permission)
263 retval = inode->i_op->permission(inode, mask); 276 retval = inode->i_op->permission(inode, mask);
264 else 277 else
265 retval = generic_permission(inode, mask, NULL); 278 retval = generic_permission(inode, mask, inode->i_op->check_acl);
266 279
267 if (retval) 280 if (retval)
268 return retval; 281 return retval;
@@ -432,29 +445,22 @@ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name,
432 */ 445 */
433static int exec_permission_lite(struct inode *inode) 446static int exec_permission_lite(struct inode *inode)
434{ 447{
435 umode_t mode = inode->i_mode; 448 int ret;
436 449
437 if (inode->i_op->permission) 450 if (inode->i_op->permission) {
438 return -EAGAIN; 451 ret = inode->i_op->permission(inode, MAY_EXEC);
439 452 if (!ret)
440 if (current_fsuid() == inode->i_uid) 453 goto ok;
441 mode >>= 6; 454 return ret;
442 else if (in_group_p(inode->i_gid)) 455 }
443 mode >>= 3; 456 ret = acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl);
444 457 if (!ret)
445 if (mode & MAY_EXEC)
446 goto ok;
447
448 if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE))
449 goto ok;
450
451 if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_OVERRIDE))
452 goto ok; 458 goto ok;
453 459
454 if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH)) 460 if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
455 goto ok; 461 goto ok;
456 462
457 return -EACCES; 463 return ret;
458ok: 464ok:
459 return security_inode_permission(inode, MAY_EXEC); 465 return security_inode_permission(inode, MAY_EXEC);
460} 466}
@@ -853,12 +859,6 @@ static int __link_path_walk(const char *name, struct nameidata *nd)
853 859
854 nd->flags |= LOOKUP_CONTINUE; 860 nd->flags |= LOOKUP_CONTINUE;
855 err = exec_permission_lite(inode); 861 err = exec_permission_lite(inode);
856 if (err == -EAGAIN)
857 err = inode_permission(nd->path.dentry->d_inode,
858 MAY_EXEC);
859 if (!err)
860 err = ima_path_check(&nd->path, MAY_EXEC,
861 IMA_COUNT_UPDATE);
862 if (err) 862 if (err)
863 break; 863 break;
864 864
@@ -1533,37 +1533,42 @@ int may_open(struct path *path, int acc_mode, int flag)
1533 if (error) 1533 if (error)
1534 return error; 1534 return error;
1535 1535
1536 error = ima_path_check(path, 1536 error = ima_path_check(path, acc_mode ?
1537 acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC), 1537 acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
1538 ACC_MODE(flag) & (MAY_READ | MAY_WRITE),
1538 IMA_COUNT_UPDATE); 1539 IMA_COUNT_UPDATE);
1540
1539 if (error) 1541 if (error)
1540 return error; 1542 return error;
1541 /* 1543 /*
1542 * An append-only file must be opened in append mode for writing. 1544 * An append-only file must be opened in append mode for writing.
1543 */ 1545 */
1544 if (IS_APPEND(inode)) { 1546 if (IS_APPEND(inode)) {
1547 error = -EPERM;
1545 if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) 1548 if ((flag & FMODE_WRITE) && !(flag & O_APPEND))
1546 return -EPERM; 1549 goto err_out;
1547 if (flag & O_TRUNC) 1550 if (flag & O_TRUNC)
1548 return -EPERM; 1551 goto err_out;
1549 } 1552 }
1550 1553
1551 /* O_NOATIME can only be set by the owner or superuser */ 1554 /* O_NOATIME can only be set by the owner or superuser */
1552 if (flag & O_NOATIME) 1555 if (flag & O_NOATIME)
1553 if (!is_owner_or_cap(inode)) 1556 if (!is_owner_or_cap(inode)) {
1554 return -EPERM; 1557 error = -EPERM;
1558 goto err_out;
1559 }
1555 1560
1556 /* 1561 /*
1557 * Ensure there are no outstanding leases on the file. 1562 * Ensure there are no outstanding leases on the file.
1558 */ 1563 */
1559 error = break_lease(inode, flag); 1564 error = break_lease(inode, flag);
1560 if (error) 1565 if (error)
1561 return error; 1566 goto err_out;
1562 1567
1563 if (flag & O_TRUNC) { 1568 if (flag & O_TRUNC) {
1564 error = get_write_access(inode); 1569 error = get_write_access(inode);
1565 if (error) 1570 if (error)
1566 return error; 1571 goto err_out;
1567 1572
1568 /* 1573 /*
1569 * Refuse to truncate files with mandatory locks held on them. 1574 * Refuse to truncate files with mandatory locks held on them.
@@ -1581,12 +1586,17 @@ int may_open(struct path *path, int acc_mode, int flag)
1581 } 1586 }
1582 put_write_access(inode); 1587 put_write_access(inode);
1583 if (error) 1588 if (error)
1584 return error; 1589 goto err_out;
1585 } else 1590 } else
1586 if (flag & FMODE_WRITE) 1591 if (flag & FMODE_WRITE)
1587 vfs_dq_init(inode); 1592 vfs_dq_init(inode);
1588 1593
1589 return 0; 1594 return 0;
1595err_out:
1596 ima_counts_put(path, acc_mode ?
1597 acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) :
1598 ACC_MODE(flag) & (MAY_READ | MAY_WRITE));
1599 return error;
1590} 1600}
1591 1601
1592/* 1602/*
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 845159814de2..da7fda639eac 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -6,7 +6,8 @@ obj-$(CONFIG_NFS_FS) += nfs.o
6 6
7nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ 7nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
8 direct.o pagelist.o proc.o read.o symlink.o unlink.o \ 8 direct.o pagelist.o proc.o read.o symlink.o unlink.o \
9 write.o namespace.o mount_clnt.o 9 write.o namespace.o mount_clnt.o \
10 dns_resolve.o cache_lib.o
10nfs-$(CONFIG_ROOT_NFS) += nfsroot.o 11nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
11nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 12nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
12nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 13nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
new file mode 100644
index 000000000000..b4ffd0146ea6
--- /dev/null
+++ b/fs/nfs/cache_lib.c
@@ -0,0 +1,140 @@
1/*
2 * linux/fs/nfs/cache_lib.c
3 *
4 * Helper routines for the NFS client caches
5 *
6 * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
7 */
8#include <linux/kmod.h>
9#include <linux/module.h>
10#include <linux/moduleparam.h>
11#include <linux/mount.h>
12#include <linux/namei.h>
13#include <linux/sunrpc/cache.h>
14#include <linux/sunrpc/rpc_pipe_fs.h>
15
16#include "cache_lib.h"
17
18#define NFS_CACHE_UPCALL_PATHLEN 256
19#define NFS_CACHE_UPCALL_TIMEOUT 15
20
21static char nfs_cache_getent_prog[NFS_CACHE_UPCALL_PATHLEN] =
22 "/sbin/nfs_cache_getent";
23static unsigned long nfs_cache_getent_timeout = NFS_CACHE_UPCALL_TIMEOUT;
24
25module_param_string(cache_getent, nfs_cache_getent_prog,
26 sizeof(nfs_cache_getent_prog), 0600);
27MODULE_PARM_DESC(cache_getent, "Path to the client cache upcall program");
28module_param_named(cache_getent_timeout, nfs_cache_getent_timeout, ulong, 0600);
29MODULE_PARM_DESC(cache_getent_timeout, "Timeout (in seconds) after which "
30 "the cache upcall is assumed to have failed");
31
32int nfs_cache_upcall(struct cache_detail *cd, char *entry_name)
33{
34 static char *envp[] = { "HOME=/",
35 "TERM=linux",
36 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
37 NULL
38 };
39 char *argv[] = {
40 nfs_cache_getent_prog,
41 cd->name,
42 entry_name,
43 NULL
44 };
45 int ret = -EACCES;
46
47 if (nfs_cache_getent_prog[0] == '\0')
48 goto out;
49 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
50 /*
51 * Disable the upcall mechanism if we're getting an ENOENT or
52 * EACCES error. The admin can re-enable it on the fly by using
53 * sysfs to set the 'cache_getent' parameter once the problem
54 * has been fixed.
55 */
56 if (ret == -ENOENT || ret == -EACCES)
57 nfs_cache_getent_prog[0] = '\0';
58out:
59 return ret > 0 ? 0 : ret;
60}
61
62/*
63 * Deferred request handling
64 */
65void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
66{
67 if (atomic_dec_and_test(&dreq->count))
68 kfree(dreq);
69}
70
71static void nfs_dns_cache_revisit(struct cache_deferred_req *d, int toomany)
72{
73 struct nfs_cache_defer_req *dreq;
74
75 dreq = container_of(d, struct nfs_cache_defer_req, deferred_req);
76
77 complete_all(&dreq->completion);
78 nfs_cache_defer_req_put(dreq);
79}
80
81static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
82{
83 struct nfs_cache_defer_req *dreq;
84
85 dreq = container_of(req, struct nfs_cache_defer_req, req);
86 dreq->deferred_req.revisit = nfs_dns_cache_revisit;
87 atomic_inc(&dreq->count);
88
89 return &dreq->deferred_req;
90}
91
92struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
93{
94 struct nfs_cache_defer_req *dreq;
95
96 dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
97 if (dreq) {
98 init_completion(&dreq->completion);
99 atomic_set(&dreq->count, 1);
100 dreq->req.defer = nfs_dns_cache_defer;
101 }
102 return dreq;
103}
104
105int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq)
106{
107 if (wait_for_completion_timeout(&dreq->completion,
108 nfs_cache_getent_timeout * HZ) == 0)
109 return -ETIMEDOUT;
110 return 0;
111}
112
113int nfs_cache_register(struct cache_detail *cd)
114{
115 struct nameidata nd;
116 struct vfsmount *mnt;
117 int ret;
118
119 mnt = rpc_get_mount();
120 if (IS_ERR(mnt))
121 return PTR_ERR(mnt);
122 ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &nd);
123 if (ret)
124 goto err;
125 ret = sunrpc_cache_register_pipefs(nd.path.dentry,
126 cd->name, 0600, cd);
127 path_put(&nd.path);
128 if (!ret)
129 return ret;
130err:
131 rpc_put_mount();
132 return ret;
133}
134
135void nfs_cache_unregister(struct cache_detail *cd)
136{
137 sunrpc_cache_unregister_pipefs(cd);
138 rpc_put_mount();
139}
140
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
new file mode 100644
index 000000000000..76f856e284e4
--- /dev/null
+++ b/fs/nfs/cache_lib.h
@@ -0,0 +1,27 @@
1/*
2 * Helper routines for the NFS client caches
3 *
4 * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
5 */
6
7#include <linux/completion.h>
8#include <linux/sunrpc/cache.h>
9#include <asm/atomic.h>
10
11/*
12 * Deferred request handling
13 */
14struct nfs_cache_defer_req {
15 struct cache_req req;
16 struct cache_deferred_req deferred_req;
17 struct completion completion;
18 atomic_t count;
19};
20
21extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);
22extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
23extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
24extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
25
26extern int nfs_cache_register(struct cache_detail *cd);
27extern void nfs_cache_unregister(struct cache_detail *cd);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 7f604c7941fb..293fa0528a6e 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -43,21 +43,29 @@ static struct svc_program nfs4_callback_program;
43unsigned int nfs_callback_set_tcpport; 43unsigned int nfs_callback_set_tcpport;
44unsigned short nfs_callback_tcpport; 44unsigned short nfs_callback_tcpport;
45unsigned short nfs_callback_tcpport6; 45unsigned short nfs_callback_tcpport6;
46static const int nfs_set_port_min = 0; 46#define NFS_CALLBACK_MAXPORTNR (65535U)
47static const int nfs_set_port_max = 65535;
48 47
49static int param_set_port(const char *val, struct kernel_param *kp) 48static int param_set_portnr(const char *val, struct kernel_param *kp)
50{ 49{
51 char *endp; 50 unsigned long num;
52 int num = simple_strtol(val, &endp, 0); 51 int ret;
53 if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max) 52
53 if (!val)
54 return -EINVAL;
55 ret = strict_strtoul(val, 0, &num);
56 if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
54 return -EINVAL; 57 return -EINVAL;
55 *((int *)kp->arg) = num; 58 *((unsigned int *)kp->arg) = num;
56 return 0; 59 return 0;
57} 60}
58 61
59module_param_call(callback_tcpport, param_set_port, param_get_int, 62static int param_get_portnr(char *buffer, struct kernel_param *kp)
60 &nfs_callback_set_tcpport, 0644); 63{
64 return param_get_uint(buffer, kp);
65}
66#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
67
68module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
61 69
62/* 70/*
63 * This is the NFSv4 callback kernel thread. 71 * This is the NFSv4 callback kernel thread.
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 8d25ccb2d51d..e350bd6a2334 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -809,6 +809,9 @@ static int nfs_init_server(struct nfs_server *server,
809 /* Initialise the client representation from the mount data */ 809 /* Initialise the client representation from the mount data */
810 server->flags = data->flags; 810 server->flags = data->flags;
811 server->options = data->options; 811 server->options = data->options;
812 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
813 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
814 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
812 815
813 if (data->rsize) 816 if (data->rsize)
814 server->rsize = nfs_block_size(data->rsize, NULL); 817 server->rsize = nfs_block_size(data->rsize, NULL);
@@ -879,6 +882,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
879 server->rsize = NFS_MAX_FILE_IO_SIZE; 882 server->rsize = NFS_MAX_FILE_IO_SIZE;
880 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 883 server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
881 884
885 server->backing_dev_info.name = "nfs";
882 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; 886 server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
883 887
884 if (server->wsize > max_rpc_payload) 888 if (server->wsize > max_rpc_payload)
@@ -1074,10 +1078,6 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
1074 (unsigned long long) server->fsid.major, 1078 (unsigned long long) server->fsid.major,
1075 (unsigned long long) server->fsid.minor); 1079 (unsigned long long) server->fsid.minor);
1076 1080
1077 BUG_ON(!server->nfs_client);
1078 BUG_ON(!server->nfs_client->rpc_ops);
1079 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
1080
1081 spin_lock(&nfs_client_lock); 1081 spin_lock(&nfs_client_lock);
1082 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); 1082 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
1083 list_add_tail(&server->master_link, &nfs_volume_list); 1083 list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1274,7 +1274,7 @@ static int nfs4_init_server(struct nfs_server *server,
1274 1274
1275 /* Initialise the client representation from the mount data */ 1275 /* Initialise the client representation from the mount data */
1276 server->flags = data->flags; 1276 server->flags = data->flags;
1277 server->caps |= NFS_CAP_ATOMIC_OPEN; 1277 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
1278 server->options = data->options; 1278 server->options = data->options;
1279 1279
1280 /* Get a client record */ 1280 /* Get a client record */
@@ -1359,10 +1359,6 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
1359 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) 1359 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1360 server->namelen = NFS4_MAXNAMLEN; 1360 server->namelen = NFS4_MAXNAMLEN;
1361 1361
1362 BUG_ON(!server->nfs_client);
1363 BUG_ON(!server->nfs_client->rpc_ops);
1364 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
1365
1366 spin_lock(&nfs_client_lock); 1362 spin_lock(&nfs_client_lock);
1367 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); 1363 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
1368 list_add_tail(&server->master_link, &nfs_volume_list); 1364 list_add_tail(&server->master_link, &nfs_volume_list);
@@ -1400,7 +1396,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1400 1396
1401 /* Initialise the client representation from the parent server */ 1397 /* Initialise the client representation from the parent server */
1402 nfs_server_copy_userdata(server, parent_server); 1398 nfs_server_copy_userdata(server, parent_server);
1403 server->caps |= NFS_CAP_ATOMIC_OPEN; 1399 server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
1404 1400
1405 /* Get a client representation. 1401 /* Get a client representation.
1406 * Note: NFSv4 always uses TCP, */ 1402 * Note: NFSv4 always uses TCP, */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e4e089a8f294..6c3210099d51 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -934,9 +934,6 @@ out:
934 * back into its cache. We let the server do generic write 934 * back into its cache. We let the server do generic write
935 * parameter checking and report problems. 935 * parameter checking and report problems.
936 * 936 *
937 * We also avoid an unnecessary invocation of generic_osync_inode(),
938 * as it is fairly meaningless to sync the metadata of an NFS file.
939 *
940 * We eliminate local atime updates, see direct read above. 937 * We eliminate local atime updates, see direct read above.
941 * 938 *
942 * We avoid unnecessary page cache invalidations for normal cached 939 * We avoid unnecessary page cache invalidations for normal cached
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
new file mode 100644
index 000000000000..f4d54ba97cc6
--- /dev/null
+++ b/fs/nfs/dns_resolve.c
@@ -0,0 +1,335 @@
1/*
2 * linux/fs/nfs/dns_resolve.c
3 *
4 * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
5 *
6 * Resolves DNS hostnames into valid ip addresses
7 */
8
9#include <linux/hash.h>
10#include <linux/string.h>
11#include <linux/kmod.h>
12#include <linux/module.h>
13#include <linux/socket.h>
14#include <linux/seq_file.h>
15#include <linux/inet.h>
16#include <linux/sunrpc/clnt.h>
17#include <linux/sunrpc/cache.h>
18#include <linux/sunrpc/svcauth.h>
19
20#include "dns_resolve.h"
21#include "cache_lib.h"
22
23#define NFS_DNS_HASHBITS 4
24#define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS)
25
26static struct cache_head *nfs_dns_table[NFS_DNS_HASHTBL_SIZE];
27
28struct nfs_dns_ent {
29 struct cache_head h;
30
31 char *hostname;
32 size_t namelen;
33
34 struct sockaddr_storage addr;
35 size_t addrlen;
36};
37
38
39static void nfs_dns_ent_init(struct cache_head *cnew,
40 struct cache_head *ckey)
41{
42 struct nfs_dns_ent *new;
43 struct nfs_dns_ent *key;
44
45 new = container_of(cnew, struct nfs_dns_ent, h);
46 key = container_of(ckey, struct nfs_dns_ent, h);
47
48 kfree(new->hostname);
49 new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
50 if (new->hostname) {
51 new->namelen = key->namelen;
52 memcpy(&new->addr, &key->addr, key->addrlen);
53 new->addrlen = key->addrlen;
54 } else {
55 new->namelen = 0;
56 new->addrlen = 0;
57 }
58}
59
60static void nfs_dns_ent_put(struct kref *ref)
61{
62 struct nfs_dns_ent *item;
63
64 item = container_of(ref, struct nfs_dns_ent, h.ref);
65 kfree(item->hostname);
66 kfree(item);
67}
68
69static struct cache_head *nfs_dns_ent_alloc(void)
70{
71 struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
72
73 if (item != NULL) {
74 item->hostname = NULL;
75 item->namelen = 0;
76 item->addrlen = 0;
77 return &item->h;
78 }
79 return NULL;
80};
81
82static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key)
83{
84 return hash_str(key->hostname, NFS_DNS_HASHBITS);
85}
86
87static void nfs_dns_request(struct cache_detail *cd,
88 struct cache_head *ch,
89 char **bpp, int *blen)
90{
91 struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
92
93 qword_add(bpp, blen, key->hostname);
94 (*bpp)[-1] = '\n';
95}
96
97static int nfs_dns_upcall(struct cache_detail *cd,
98 struct cache_head *ch)
99{
100 struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
101 int ret;
102
103 ret = nfs_cache_upcall(cd, key->hostname);
104 if (ret)
105 ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
106 return ret;
107}
108
109static int nfs_dns_match(struct cache_head *ca,
110 struct cache_head *cb)
111{
112 struct nfs_dns_ent *a;
113 struct nfs_dns_ent *b;
114
115 a = container_of(ca, struct nfs_dns_ent, h);
116 b = container_of(cb, struct nfs_dns_ent, h);
117
118 if (a->namelen == 0 || a->namelen != b->namelen)
119 return 0;
120 return memcmp(a->hostname, b->hostname, a->namelen) == 0;
121}
122
123static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
124 struct cache_head *h)
125{
126 struct nfs_dns_ent *item;
127 long ttl;
128
129 if (h == NULL) {
130 seq_puts(m, "# ip address hostname ttl\n");
131 return 0;
132 }
133 item = container_of(h, struct nfs_dns_ent, h);
134 ttl = (long)item->h.expiry_time - (long)get_seconds();
135 if (ttl < 0)
136 ttl = 0;
137
138 if (!test_bit(CACHE_NEGATIVE, &h->flags)) {
139 char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1];
140
141 rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf));
142 seq_printf(m, "%15s ", buf);
143 } else
144 seq_puts(m, "<none> ");
145 seq_printf(m, "%15s %ld\n", item->hostname, ttl);
146 return 0;
147}
148
149struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
150 struct nfs_dns_ent *key)
151{
152 struct cache_head *ch;
153
154 ch = sunrpc_cache_lookup(cd,
155 &key->h,
156 nfs_dns_hash(key));
157 if (!ch)
158 return NULL;
159 return container_of(ch, struct nfs_dns_ent, h);
160}
161
162struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
163 struct nfs_dns_ent *new,
164 struct nfs_dns_ent *key)
165{
166 struct cache_head *ch;
167
168 ch = sunrpc_cache_update(cd,
169 &new->h, &key->h,
170 nfs_dns_hash(key));
171 if (!ch)
172 return NULL;
173 return container_of(ch, struct nfs_dns_ent, h);
174}
175
176static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
177{
178 char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
179 struct nfs_dns_ent key, *item;
180 unsigned long ttl;
181 ssize_t len;
182 int ret = -EINVAL;
183
184 if (buf[buflen-1] != '\n')
185 goto out;
186 buf[buflen-1] = '\0';
187
188 len = qword_get(&buf, buf1, sizeof(buf1));
189 if (len <= 0)
190 goto out;
191 key.addrlen = rpc_pton(buf1, len,
192 (struct sockaddr *)&key.addr,
193 sizeof(key.addr));
194
195 len = qword_get(&buf, buf1, sizeof(buf1));
196 if (len <= 0)
197 goto out;
198
199 key.hostname = buf1;
200 key.namelen = len;
201 memset(&key.h, 0, sizeof(key.h));
202
203 ttl = get_expiry(&buf);
204 if (ttl == 0)
205 goto out;
206 key.h.expiry_time = ttl + get_seconds();
207
208 ret = -ENOMEM;
209 item = nfs_dns_lookup(cd, &key);
210 if (item == NULL)
211 goto out;
212
213 if (key.addrlen == 0)
214 set_bit(CACHE_NEGATIVE, &key.h.flags);
215
216 item = nfs_dns_update(cd, &key, item);
217 if (item == NULL)
218 goto out;
219
220 ret = 0;
221 cache_put(&item->h, cd);
222out:
223 return ret;
224}
225
226static struct cache_detail nfs_dns_resolve = {
227 .owner = THIS_MODULE,
228 .hash_size = NFS_DNS_HASHTBL_SIZE,
229 .hash_table = nfs_dns_table,
230 .name = "dns_resolve",
231 .cache_put = nfs_dns_ent_put,
232 .cache_upcall = nfs_dns_upcall,
233 .cache_parse = nfs_dns_parse,
234 .cache_show = nfs_dns_show,
235 .match = nfs_dns_match,
236 .init = nfs_dns_ent_init,
237 .update = nfs_dns_ent_init,
238 .alloc = nfs_dns_ent_alloc,
239};
240
241static int do_cache_lookup(struct cache_detail *cd,
242 struct nfs_dns_ent *key,
243 struct nfs_dns_ent **item,
244 struct nfs_cache_defer_req *dreq)
245{
246 int ret = -ENOMEM;
247
248 *item = nfs_dns_lookup(cd, key);
249 if (*item) {
250 ret = cache_check(cd, &(*item)->h, &dreq->req);
251 if (ret)
252 *item = NULL;
253 }
254 return ret;
255}
256
257static int do_cache_lookup_nowait(struct cache_detail *cd,
258 struct nfs_dns_ent *key,
259 struct nfs_dns_ent **item)
260{
261 int ret = -ENOMEM;
262
263 *item = nfs_dns_lookup(cd, key);
264 if (!*item)
265 goto out_err;
266 ret = -ETIMEDOUT;
267 if (!test_bit(CACHE_VALID, &(*item)->h.flags)
268 || (*item)->h.expiry_time < get_seconds()
269 || cd->flush_time > (*item)->h.last_refresh)
270 goto out_put;
271 ret = -ENOENT;
272 if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
273 goto out_put;
274 return 0;
275out_put:
276 cache_put(&(*item)->h, cd);
277out_err:
278 *item = NULL;
279 return ret;
280}
281
282static int do_cache_lookup_wait(struct cache_detail *cd,
283 struct nfs_dns_ent *key,
284 struct nfs_dns_ent **item)
285{
286 struct nfs_cache_defer_req *dreq;
287 int ret = -ENOMEM;
288
289 dreq = nfs_cache_defer_req_alloc();
290 if (!dreq)
291 goto out;
292 ret = do_cache_lookup(cd, key, item, dreq);
293 if (ret == -EAGAIN) {
294 ret = nfs_cache_wait_for_upcall(dreq);
295 if (!ret)
296 ret = do_cache_lookup_nowait(cd, key, item);
297 }
298 nfs_cache_defer_req_put(dreq);
299out:
300 return ret;
301}
302
303ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
304 struct sockaddr *sa, size_t salen)
305{
306 struct nfs_dns_ent key = {
307 .hostname = name,
308 .namelen = namelen,
309 };
310 struct nfs_dns_ent *item = NULL;
311 ssize_t ret;
312
313 ret = do_cache_lookup_wait(&nfs_dns_resolve, &key, &item);
314 if (ret == 0) {
315 if (salen >= item->addrlen) {
316 memcpy(sa, &item->addr, item->addrlen);
317 ret = item->addrlen;
318 } else
319 ret = -EOVERFLOW;
320 cache_put(&item->h, &nfs_dns_resolve);
321 } else if (ret == -ENOENT)
322 ret = -ESRCH;
323 return ret;
324}
325
326int nfs_dns_resolver_init(void)
327{
328 return nfs_cache_register(&nfs_dns_resolve);
329}
330
331void nfs_dns_resolver_destroy(void)
332{
333 nfs_cache_unregister(&nfs_dns_resolve);
334}
335
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
new file mode 100644
index 000000000000..a3f0938babf7
--- /dev/null
+++ b/fs/nfs/dns_resolve.h
@@ -0,0 +1,14 @@
1/*
2 * Resolve DNS hostnames into valid ip addresses
3 */
4#ifndef __LINUX_FS_NFS_DNS_RESOLVE_H
5#define __LINUX_FS_NFS_DNS_RESOLVE_H
6
7#define NFS_DNS_HOSTNAME_MAXLEN (128)
8
9extern int nfs_dns_resolver_init(void);
10extern void nfs_dns_resolver_destroy(void);
11extern ssize_t nfs_dns_resolve_name(char *name, size_t namelen,
12 struct sockaddr *sa, size_t salen);
13
14#endif
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 05062329b678..5021b75d2d1e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -328,6 +328,42 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync)
328} 328}
329 329
330/* 330/*
331 * Decide whether a read/modify/write cycle may be more efficient
332 * then a modify/write/read cycle when writing to a page in the
333 * page cache.
334 *
335 * The modify/write/read cycle may occur if a page is read before
336 * being completely filled by the writer. In this situation, the
337 * page must be completely written to stable storage on the server
338 * before it can be refilled by reading in the page from the server.
339 * This can lead to expensive, small, FILE_SYNC mode writes being
340 * done.
341 *
342 * It may be more efficient to read the page first if the file is
343 * open for reading in addition to writing, the page is not marked
344 * as Uptodate, it is not dirty or waiting to be committed,
345 * indicating that it was previously allocated and then modified,
346 * that there were valid bytes of data in that range of the file,
347 * and that the new data won't completely replace the old data in
348 * that range of the file.
349 */
350static int nfs_want_read_modify_write(struct file *file, struct page *page,
351 loff_t pos, unsigned len)
352{
353 unsigned int pglen = nfs_page_length(page);
354 unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
355 unsigned int end = offset + len;
356
357 if ((file->f_mode & FMODE_READ) && /* open for read? */
358 !PageUptodate(page) && /* Uptodate? */
359 !PagePrivate(page) && /* i/o request already? */
360 pglen && /* valid bytes of file? */
361 (end < pglen || offset)) /* replace all valid bytes? */
362 return 1;
363 return 0;
364}
365
366/*
331 * This does the "real" work of the write. We must allocate and lock the 367 * This does the "real" work of the write. We must allocate and lock the
332 * page to be sent back to the generic routine, which then copies the 368 * page to be sent back to the generic routine, which then copies the
333 * data from user space. 369 * data from user space.
@@ -340,15 +376,16 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
340 struct page **pagep, void **fsdata) 376 struct page **pagep, void **fsdata)
341{ 377{
342 int ret; 378 int ret;
343 pgoff_t index; 379 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
344 struct page *page; 380 struct page *page;
345 index = pos >> PAGE_CACHE_SHIFT; 381 int once_thru = 0;
346 382
347 dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n", 383 dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
348 file->f_path.dentry->d_parent->d_name.name, 384 file->f_path.dentry->d_parent->d_name.name,
349 file->f_path.dentry->d_name.name, 385 file->f_path.dentry->d_name.name,
350 mapping->host->i_ino, len, (long long) pos); 386 mapping->host->i_ino, len, (long long) pos);
351 387
388start:
352 /* 389 /*
353 * Prevent starvation issues if someone is doing a consistency 390 * Prevent starvation issues if someone is doing a consistency
354 * sync-to-disk 391 * sync-to-disk
@@ -367,6 +404,13 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
367 if (ret) { 404 if (ret) {
368 unlock_page(page); 405 unlock_page(page);
369 page_cache_release(page); 406 page_cache_release(page);
407 } else if (!once_thru &&
408 nfs_want_read_modify_write(file, page, pos, len)) {
409 once_thru = 1;
410 ret = nfs_readpage(file, page);
411 page_cache_release(page);
412 if (!ret)
413 goto start;
370 } 414 }
371 return ret; 415 return ret;
372} 416}
@@ -479,6 +523,7 @@ const struct address_space_operations nfs_file_aops = {
479 .invalidatepage = nfs_invalidate_page, 523 .invalidatepage = nfs_invalidate_page,
480 .releasepage = nfs_release_page, 524 .releasepage = nfs_release_page,
481 .direct_IO = nfs_direct_IO, 525 .direct_IO = nfs_direct_IO,
526 .migratepage = nfs_migrate_page,
482 .launder_page = nfs_launder_page, 527 .launder_page = nfs_launder_page,
483}; 528};
484 529
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 86147b0ab2cf..21a84d45916f 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -101,7 +101,7 @@ static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
101 101
102static unsigned int fnvhash32(const void *, size_t); 102static unsigned int fnvhash32(const void *, size_t);
103 103
104static struct rpc_pipe_ops idmap_upcall_ops = { 104static const struct rpc_pipe_ops idmap_upcall_ops = {
105 .upcall = idmap_pipe_upcall, 105 .upcall = idmap_pipe_upcall,
106 .downcall = idmap_pipe_downcall, 106 .downcall = idmap_pipe_downcall,
107 .destroy_msg = idmap_pipe_destroy_msg, 107 .destroy_msg = idmap_pipe_destroy_msg,
@@ -119,8 +119,8 @@ nfs_idmap_new(struct nfs_client *clp)
119 if (idmap == NULL) 119 if (idmap == NULL)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
122 idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap", 122 idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_path.dentry,
123 idmap, &idmap_upcall_ops, 0); 123 "idmap", idmap, &idmap_upcall_ops, 0);
124 if (IS_ERR(idmap->idmap_dentry)) { 124 if (IS_ERR(idmap->idmap_dentry)) {
125 error = PTR_ERR(idmap->idmap_dentry); 125 error = PTR_ERR(idmap->idmap_dentry);
126 kfree(idmap); 126 kfree(idmap);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index bd7938eda6a8..060022b4651c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -46,6 +46,7 @@
46#include "iostat.h" 46#include "iostat.h"
47#include "internal.h" 47#include "internal.h"
48#include "fscache.h" 48#include "fscache.h"
49#include "dns_resolve.h"
49 50
50#define NFSDBG_FACILITY NFSDBG_VFS 51#define NFSDBG_FACILITY NFSDBG_VFS
51 52
@@ -286,6 +287,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
286 /* We can't support update_atime(), since the server will reset it */ 287 /* We can't support update_atime(), since the server will reset it */
287 inode->i_flags |= S_NOATIME|S_NOCMTIME; 288 inode->i_flags |= S_NOATIME|S_NOCMTIME;
288 inode->i_mode = fattr->mode; 289 inode->i_mode = fattr->mode;
290 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
291 && nfs_server_capable(inode, NFS_CAP_MODE))
292 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
293 | NFS_INO_INVALID_ACCESS
294 | NFS_INO_INVALID_ACL;
289 /* Why so? Because we want revalidate for devices/FIFOs, and 295 /* Why so? Because we want revalidate for devices/FIFOs, and
290 * that's precisely what we have in nfs_file_inode_operations. 296 * that's precisely what we have in nfs_file_inode_operations.
291 */ 297 */
@@ -330,20 +336,46 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
330 nfsi->attr_gencount = fattr->gencount; 336 nfsi->attr_gencount = fattr->gencount;
331 if (fattr->valid & NFS_ATTR_FATTR_ATIME) 337 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
332 inode->i_atime = fattr->atime; 338 inode->i_atime = fattr->atime;
339 else if (nfs_server_capable(inode, NFS_CAP_ATIME))
340 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
333 if (fattr->valid & NFS_ATTR_FATTR_MTIME) 341 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
334 inode->i_mtime = fattr->mtime; 342 inode->i_mtime = fattr->mtime;
343 else if (nfs_server_capable(inode, NFS_CAP_MTIME))
344 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
345 | NFS_INO_INVALID_DATA;
335 if (fattr->valid & NFS_ATTR_FATTR_CTIME) 346 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
336 inode->i_ctime = fattr->ctime; 347 inode->i_ctime = fattr->ctime;
348 else if (nfs_server_capable(inode, NFS_CAP_CTIME))
349 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
350 | NFS_INO_INVALID_ACCESS
351 | NFS_INO_INVALID_ACL;
337 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 352 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
338 nfsi->change_attr = fattr->change_attr; 353 nfsi->change_attr = fattr->change_attr;
354 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
355 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
356 | NFS_INO_INVALID_DATA;
339 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 357 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
340 inode->i_size = nfs_size_to_loff_t(fattr->size); 358 inode->i_size = nfs_size_to_loff_t(fattr->size);
359 else
360 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
361 | NFS_INO_INVALID_DATA
362 | NFS_INO_REVAL_PAGECACHE;
341 if (fattr->valid & NFS_ATTR_FATTR_NLINK) 363 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
342 inode->i_nlink = fattr->nlink; 364 inode->i_nlink = fattr->nlink;
365 else if (nfs_server_capable(inode, NFS_CAP_NLINK))
366 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
343 if (fattr->valid & NFS_ATTR_FATTR_OWNER) 367 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
344 inode->i_uid = fattr->uid; 368 inode->i_uid = fattr->uid;
369 else if (nfs_server_capable(inode, NFS_CAP_OWNER))
370 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
371 | NFS_INO_INVALID_ACCESS
372 | NFS_INO_INVALID_ACL;
345 if (fattr->valid & NFS_ATTR_FATTR_GROUP) 373 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
346 inode->i_gid = fattr->gid; 374 inode->i_gid = fattr->gid;
375 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
376 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
377 | NFS_INO_INVALID_ACCESS
378 | NFS_INO_INVALID_ACL;
347 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) 379 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
348 inode->i_blocks = fattr->du.nfs2.blocks; 380 inode->i_blocks = fattr->du.nfs2.blocks;
349 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { 381 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -1145,6 +1177,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1145 loff_t cur_isize, new_isize; 1177 loff_t cur_isize, new_isize;
1146 unsigned long invalid = 0; 1178 unsigned long invalid = 0;
1147 unsigned long now = jiffies; 1179 unsigned long now = jiffies;
1180 unsigned long save_cache_validity;
1148 1181
1149 dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n", 1182 dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
1150 __func__, inode->i_sb->s_id, inode->i_ino, 1183 __func__, inode->i_sb->s_id, inode->i_ino,
@@ -1171,10 +1204,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1171 */ 1204 */
1172 nfsi->read_cache_jiffies = fattr->time_start; 1205 nfsi->read_cache_jiffies = fattr->time_start;
1173 1206
1174 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) || (fattr->valid & (NFS_ATTR_FATTR_MTIME|NFS_ATTR_FATTR_CTIME))) 1207 save_cache_validity = nfsi->cache_validity;
1175 nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR 1208 nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
1176 | NFS_INO_INVALID_ATIME 1209 | NFS_INO_INVALID_ATIME
1177 | NFS_INO_REVAL_PAGECACHE); 1210 | NFS_INO_REVAL_FORCED
1211 | NFS_INO_REVAL_PAGECACHE);
1178 1212
1179 /* Do atomic weak cache consistency updates */ 1213 /* Do atomic weak cache consistency updates */
1180 nfs_wcc_update_inode(inode, fattr); 1214 nfs_wcc_update_inode(inode, fattr);
@@ -1189,7 +1223,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1189 nfs_force_lookup_revalidate(inode); 1223 nfs_force_lookup_revalidate(inode);
1190 nfsi->change_attr = fattr->change_attr; 1224 nfsi->change_attr = fattr->change_attr;
1191 } 1225 }
1192 } 1226 } else if (server->caps & NFS_CAP_CHANGE_ATTR)
1227 invalid |= save_cache_validity;
1193 1228
1194 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1229 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
1195 /* NFSv2/v3: Check if the mtime agrees */ 1230 /* NFSv2/v3: Check if the mtime agrees */
@@ -1201,7 +1236,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1201 nfs_force_lookup_revalidate(inode); 1236 nfs_force_lookup_revalidate(inode);
1202 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); 1237 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
1203 } 1238 }
1204 } 1239 } else if (server->caps & NFS_CAP_MTIME)
1240 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1241 | NFS_INO_INVALID_DATA
1242 | NFS_INO_REVAL_PAGECACHE
1243 | NFS_INO_REVAL_FORCED);
1244
1205 if (fattr->valid & NFS_ATTR_FATTR_CTIME) { 1245 if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
1206 /* If ctime has changed we should definitely clear access+acl caches */ 1246 /* If ctime has changed we should definitely clear access+acl caches */
1207 if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) { 1247 if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
@@ -1215,7 +1255,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1215 } 1255 }
1216 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); 1256 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
1217 } 1257 }
1218 } 1258 } else if (server->caps & NFS_CAP_CTIME)
1259 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1260 | NFS_INO_INVALID_ACCESS
1261 | NFS_INO_INVALID_ACL
1262 | NFS_INO_REVAL_FORCED);
1219 1263
1220 /* Check if our cached file size is stale */ 1264 /* Check if our cached file size is stale */
1221 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1265 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
@@ -1231,30 +1275,50 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1231 dprintk("NFS: isize change on server for file %s/%ld\n", 1275 dprintk("NFS: isize change on server for file %s/%ld\n",
1232 inode->i_sb->s_id, inode->i_ino); 1276 inode->i_sb->s_id, inode->i_ino);
1233 } 1277 }
1234 } 1278 } else
1279 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1280 | NFS_INO_REVAL_PAGECACHE
1281 | NFS_INO_REVAL_FORCED);
1235 1282
1236 1283
1237 if (fattr->valid & NFS_ATTR_FATTR_ATIME) 1284 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
1238 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); 1285 memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
1286 else if (server->caps & NFS_CAP_ATIME)
1287 invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
1288 | NFS_INO_REVAL_FORCED);
1239 1289
1240 if (fattr->valid & NFS_ATTR_FATTR_MODE) { 1290 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
1241 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) { 1291 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
1242 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1292 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1243 inode->i_mode = fattr->mode; 1293 inode->i_mode = fattr->mode;
1244 } 1294 }
1245 } 1295 } else if (server->caps & NFS_CAP_MODE)
1296 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1297 | NFS_INO_INVALID_ACCESS
1298 | NFS_INO_INVALID_ACL
1299 | NFS_INO_REVAL_FORCED);
1300
1246 if (fattr->valid & NFS_ATTR_FATTR_OWNER) { 1301 if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
1247 if (inode->i_uid != fattr->uid) { 1302 if (inode->i_uid != fattr->uid) {
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1303 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1249 inode->i_uid = fattr->uid; 1304 inode->i_uid = fattr->uid;
1250 } 1305 }
1251 } 1306 } else if (server->caps & NFS_CAP_OWNER)
1307 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1308 | NFS_INO_INVALID_ACCESS
1309 | NFS_INO_INVALID_ACL
1310 | NFS_INO_REVAL_FORCED);
1311
1252 if (fattr->valid & NFS_ATTR_FATTR_GROUP) { 1312 if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
1253 if (inode->i_gid != fattr->gid) { 1313 if (inode->i_gid != fattr->gid) {
1254 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1314 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1255 inode->i_gid = fattr->gid; 1315 inode->i_gid = fattr->gid;
1256 } 1316 }
1257 } 1317 } else if (server->caps & NFS_CAP_OWNER_GROUP)
1318 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1319 | NFS_INO_INVALID_ACCESS
1320 | NFS_INO_INVALID_ACL
1321 | NFS_INO_REVAL_FORCED);
1258 1322
1259 if (fattr->valid & NFS_ATTR_FATTR_NLINK) { 1323 if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
1260 if (inode->i_nlink != fattr->nlink) { 1324 if (inode->i_nlink != fattr->nlink) {
@@ -1263,7 +1327,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1263 invalid |= NFS_INO_INVALID_DATA; 1327 invalid |= NFS_INO_INVALID_DATA;
1264 inode->i_nlink = fattr->nlink; 1328 inode->i_nlink = fattr->nlink;
1265 } 1329 }
1266 } 1330 } else if (server->caps & NFS_CAP_NLINK)
1331 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1332 | NFS_INO_REVAL_FORCED);
1267 1333
1268 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { 1334 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
1269 /* 1335 /*
@@ -1293,9 +1359,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1293 || S_ISLNK(inode->i_mode))) 1359 || S_ISLNK(inode->i_mode)))
1294 invalid &= ~NFS_INO_INVALID_DATA; 1360 invalid &= ~NFS_INO_INVALID_DATA;
1295 if (!nfs_have_delegation(inode, FMODE_READ) || 1361 if (!nfs_have_delegation(inode, FMODE_READ) ||
1296 (nfsi->cache_validity & NFS_INO_REVAL_FORCED)) 1362 (save_cache_validity & NFS_INO_REVAL_FORCED))
1297 nfsi->cache_validity |= invalid; 1363 nfsi->cache_validity |= invalid;
1298 nfsi->cache_validity &= ~NFS_INO_REVAL_FORCED;
1299 1364
1300 return 0; 1365 return 0;
1301 out_changed: 1366 out_changed:
@@ -1442,6 +1507,10 @@ static int __init init_nfs_fs(void)
1442{ 1507{
1443 int err; 1508 int err;
1444 1509
1510 err = nfs_dns_resolver_init();
1511 if (err < 0)
1512 goto out8;
1513
1445 err = nfs_fscache_register(); 1514 err = nfs_fscache_register();
1446 if (err < 0) 1515 if (err < 0)
1447 goto out7; 1516 goto out7;
@@ -1500,6 +1569,8 @@ out5:
1500out6: 1569out6:
1501 nfs_fscache_unregister(); 1570 nfs_fscache_unregister();
1502out7: 1571out7:
1572 nfs_dns_resolver_destroy();
1573out8:
1503 return err; 1574 return err;
1504} 1575}
1505 1576
@@ -1511,6 +1582,7 @@ static void __exit exit_nfs_fs(void)
1511 nfs_destroy_inodecache(); 1582 nfs_destroy_inodecache();
1512 nfs_destroy_nfspagecache(); 1583 nfs_destroy_nfspagecache();
1513 nfs_fscache_unregister(); 1584 nfs_fscache_unregister();
1585 nfs_dns_resolver_destroy();
1514#ifdef CONFIG_PROC_FS 1586#ifdef CONFIG_PROC_FS
1515 rpc_proc_unregister("nfs"); 1587 rpc_proc_unregister("nfs");
1516#endif 1588#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7dd90a6769d0..e21b1bb9972f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -49,6 +49,11 @@ struct nfs_clone_mount {
49#define NFS_MAX_SECFLAVORS (12) 49#define NFS_MAX_SECFLAVORS (12)
50 50
51/* 51/*
52 * Value used if the user did not specify a port value.
53 */
54#define NFS_UNSPEC_PORT (-1)
55
56/*
52 * In-kernel mount arguments 57 * In-kernel mount arguments
53 */ 58 */
54struct nfs_parsed_mount_data { 59struct nfs_parsed_mount_data {
@@ -63,6 +68,7 @@ struct nfs_parsed_mount_data {
63 unsigned int auth_flavor_len; 68 unsigned int auth_flavor_len;
64 rpc_authflavor_t auth_flavors[1]; 69 rpc_authflavor_t auth_flavors[1];
65 char *client_address; 70 char *client_address;
71 unsigned int version;
66 unsigned int minorversion; 72 unsigned int minorversion;
67 char *fscache_uniq; 73 char *fscache_uniq;
68 74
@@ -71,7 +77,7 @@ struct nfs_parsed_mount_data {
71 size_t addrlen; 77 size_t addrlen;
72 char *hostname; 78 char *hostname;
73 u32 version; 79 u32 version;
74 unsigned short port; 80 int port;
75 unsigned short protocol; 81 unsigned short protocol;
76 } mount_server; 82 } mount_server;
77 83
@@ -80,7 +86,7 @@ struct nfs_parsed_mount_data {
80 size_t addrlen; 86 size_t addrlen;
81 char *hostname; 87 char *hostname;
82 char *export_path; 88 char *export_path;
83 unsigned short port; 89 int port;
84 unsigned short protocol; 90 unsigned short protocol;
85 } nfs_server; 91 } nfs_server;
86 92
@@ -102,6 +108,7 @@ struct nfs_mount_request {
102}; 108};
103 109
104extern int nfs_mount(struct nfs_mount_request *info); 110extern int nfs_mount(struct nfs_mount_request *info);
111extern void nfs_umount(const struct nfs_mount_request *info);
105 112
106/* client.c */ 113/* client.c */
107extern struct rpc_program nfs_program; 114extern struct rpc_program nfs_program;
@@ -213,7 +220,6 @@ void nfs_zap_acl_cache(struct inode *inode);
213extern int nfs_wait_bit_killable(void *word); 220extern int nfs_wait_bit_killable(void *word);
214 221
215/* super.c */ 222/* super.c */
216void nfs_parse_ip_address(char *, size_t, struct sockaddr *, size_t *);
217extern struct file_system_type nfs_xdev_fs_type; 223extern struct file_system_type nfs_xdev_fs_type;
218#ifdef CONFIG_NFS_V4 224#ifdef CONFIG_NFS_V4
219extern struct file_system_type nfs4_xdev_fs_type; 225extern struct file_system_type nfs4_xdev_fs_type;
@@ -248,6 +254,12 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
248 254
249/* write.c */ 255/* write.c */
250extern void nfs_write_prepare(struct rpc_task *task, void *calldata); 256extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
257#ifdef CONFIG_MIGRATION
258extern int nfs_migrate_page(struct address_space *,
259 struct page *, struct page *);
260#else
261#define nfs_migrate_page NULL
262#endif
251 263
252/* nfs4proc.c */ 264/* nfs4proc.c */
253extern int _nfs4_call_sync(struct nfs_server *server, 265extern int _nfs4_call_sync(struct nfs_server *server,
@@ -368,24 +380,3 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
368 return ((unsigned long)len + (unsigned long)base + 380 return ((unsigned long)len + (unsigned long)base +
369 PAGE_SIZE - 1) >> PAGE_SHIFT; 381 PAGE_SIZE - 1) >> PAGE_SHIFT;
370} 382}
371
372#define IPV6_SCOPE_DELIMITER '%'
373
374/*
375 * Set the port number in an address. Be agnostic about the address
376 * family.
377 */
378static inline void nfs_set_port(struct sockaddr *sap, unsigned short port)
379{
380 struct sockaddr_in *ap = (struct sockaddr_in *)sap;
381 struct sockaddr_in6 *ap6 = (struct sockaddr_in6 *)sap;
382
383 switch (sap->sa_family) {
384 case AF_INET:
385 ap->sin_port = htons(port);
386 break;
387 case AF_INET6:
388 ap6->sin6_port = htons(port);
389 break;
390 }
391}
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 38ef9eaec407..0adefc40cc89 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -209,6 +209,71 @@ out_mnt_err:
209 goto out; 209 goto out;
210} 210}
211 211
212/**
213 * nfs_umount - Notify a server that we have unmounted this export
214 * @info: pointer to umount request arguments
215 *
216 * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
217 * use UDP.
218 */
219void nfs_umount(const struct nfs_mount_request *info)
220{
221 static const struct rpc_timeout nfs_umnt_timeout = {
222 .to_initval = 1 * HZ,
223 .to_maxval = 3 * HZ,
224 .to_retries = 2,
225 };
226 struct rpc_create_args args = {
227 .protocol = IPPROTO_UDP,
228 .address = info->sap,
229 .addrsize = info->salen,
230 .timeout = &nfs_umnt_timeout,
231 .servername = info->hostname,
232 .program = &mnt_program,
233 .version = info->version,
234 .authflavor = RPC_AUTH_UNIX,
235 .flags = RPC_CLNT_CREATE_NOPING,
236 };
237 struct mountres result;
238 struct rpc_message msg = {
239 .rpc_argp = info->dirpath,
240 .rpc_resp = &result,
241 };
242 struct rpc_clnt *clnt;
243 int status;
244
245 if (info->noresvport)
246 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
247
248 clnt = rpc_create(&args);
249 if (unlikely(IS_ERR(clnt)))
250 goto out_clnt_err;
251
252 dprintk("NFS: sending UMNT request for %s:%s\n",
253 (info->hostname ? info->hostname : "server"), info->dirpath);
254
255 if (info->version == NFS_MNT3_VERSION)
256 msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
257 else
258 msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
259
260 status = rpc_call_sync(clnt, &msg, 0);
261 rpc_shutdown_client(clnt);
262
263 if (unlikely(status < 0))
264 goto out_call_err;
265
266 return;
267
268out_clnt_err:
269 dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
270 PTR_ERR(clnt));
271 return;
272
273out_call_err:
274 dprintk("NFS: UMNT request failed, status=%d\n", status);
275}
276
212/* 277/*
213 * XDR encode/decode functions for MOUNT 278 * XDR encode/decode functions for MOUNT
214 */ 279 */
@@ -258,7 +323,7 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res)
258 return -EIO; 323 return -EIO;
259 status = ntohl(*p); 324 status = ntohl(*p);
260 325
261 for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) { 326 for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
262 if (mnt_errtbl[i].status == status) { 327 if (mnt_errtbl[i].status == status) {
263 res->errno = mnt_errtbl[i].errno; 328 res->errno = mnt_errtbl[i].errno;
264 return 0; 329 return 0;
@@ -309,7 +374,7 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
309 return -EIO; 374 return -EIO;
310 status = ntohl(*p); 375 status = ntohl(*p);
311 376
312 for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) { 377 for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
313 if (mnt3_errtbl[i].status == status) { 378 if (mnt3_errtbl[i].status == status) {
314 res->errno = mnt3_errtbl[i].errno; 379 res->errno = mnt3_errtbl[i].errno;
315 return 0; 380 return 0;
@@ -407,6 +472,13 @@ static struct rpc_procinfo mnt_procedures[] = {
407 .p_statidx = MOUNTPROC_MNT, 472 .p_statidx = MOUNTPROC_MNT,
408 .p_name = "MOUNT", 473 .p_name = "MOUNT",
409 }, 474 },
475 [MOUNTPROC_UMNT] = {
476 .p_proc = MOUNTPROC_UMNT,
477 .p_encode = (kxdrproc_t)mnt_enc_dirpath,
478 .p_arglen = MNT_enc_dirpath_sz,
479 .p_statidx = MOUNTPROC_UMNT,
480 .p_name = "UMOUNT",
481 },
410}; 482};
411 483
412static struct rpc_procinfo mnt3_procedures[] = { 484static struct rpc_procinfo mnt3_procedures[] = {
@@ -419,6 +491,13 @@ static struct rpc_procinfo mnt3_procedures[] = {
419 .p_statidx = MOUNTPROC3_MNT, 491 .p_statidx = MOUNTPROC3_MNT,
420 .p_name = "MOUNT", 492 .p_name = "MOUNT",
421 }, 493 },
494 [MOUNTPROC3_UMNT] = {
495 .p_proc = MOUNTPROC3_UMNT,
496 .p_encode = (kxdrproc_t)mnt_enc_dirpath,
497 .p_arglen = MNT_enc_dirpath_sz,
498 .p_statidx = MOUNTPROC3_UMNT,
499 .p_name = "UMOUNT",
500 },
422}; 501};
423 502
424 503
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index d0cc5ce0edfe..ee6a13f05443 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -299,7 +299,6 @@ static void nfs3_free_createdata(struct nfs3_createdata *data)
299 299
300/* 300/*
301 * Create a regular file. 301 * Create a regular file.
302 * For now, we don't implement O_EXCL.
303 */ 302 */
304static int 303static int
305nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 304nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 2a2a0a7143ad..2636c26d56fa 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -17,6 +17,7 @@
17#include <linux/inet.h> 17#include <linux/inet.h>
18#include "internal.h" 18#include "internal.h"
19#include "nfs4_fs.h" 19#include "nfs4_fs.h"
20#include "dns_resolve.h"
20 21
21#define NFSDBG_FACILITY NFSDBG_VFS 22#define NFSDBG_FACILITY NFSDBG_VFS
22 23
@@ -95,6 +96,20 @@ static int nfs4_validate_fspath(const struct vfsmount *mnt_parent,
95 return 0; 96 return 0;
96} 97}
97 98
99static size_t nfs_parse_server_name(char *string, size_t len,
100 struct sockaddr *sa, size_t salen)
101{
102 ssize_t ret;
103
104 ret = rpc_pton(string, len, sa, salen);
105 if (ret == 0) {
106 ret = nfs_dns_resolve_name(string, len, sa, salen);
107 if (ret < 0)
108 ret = 0;
109 }
110 return ret;
111}
112
98static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, 113static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
99 char *page, char *page2, 114 char *page, char *page2,
100 const struct nfs4_fs_location *location) 115 const struct nfs4_fs_location *location)
@@ -121,11 +136,12 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
121 136
122 if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len)) 137 if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
123 continue; 138 continue;
124 nfs_parse_ip_address(buf->data, buf->len, 139 mountdata->addrlen = nfs_parse_server_name(buf->data,
125 mountdata->addr, &mountdata->addrlen); 140 buf->len,
126 if (mountdata->addr->sa_family == AF_UNSPEC) 141 mountdata->addr, mountdata->addrlen);
142 if (mountdata->addrlen == 0)
127 continue; 143 continue;
128 nfs_set_port(mountdata->addr, NFS_PORT); 144 rpc_set_port(mountdata->addr, NFS_PORT);
129 145
130 memcpy(page2, buf->data, buf->len); 146 memcpy(page2, buf->data, buf->len);
131 page2[buf->len] = '\0'; 147 page2[buf->len] = '\0';
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6917311f201c..be6544aef41f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -61,6 +61,8 @@
61#define NFS4_POLL_RETRY_MIN (HZ/10) 61#define NFS4_POLL_RETRY_MIN (HZ/10)
62#define NFS4_POLL_RETRY_MAX (15*HZ) 62#define NFS4_POLL_RETRY_MAX (15*HZ)
63 63
64#define NFS4_MAX_LOOP_ON_RECOVER (10)
65
64struct nfs4_opendata; 66struct nfs4_opendata;
65static int _nfs4_proc_open(struct nfs4_opendata *data); 67static int _nfs4_proc_open(struct nfs4_opendata *data);
66static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 68static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
@@ -426,17 +428,19 @@ out:
426static int nfs4_recover_session(struct nfs4_session *session) 428static int nfs4_recover_session(struct nfs4_session *session)
427{ 429{
428 struct nfs_client *clp = session->clp; 430 struct nfs_client *clp = session->clp;
431 unsigned int loop;
429 int ret; 432 int ret;
430 433
431 for (;;) { 434 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
432 ret = nfs4_wait_clnt_recover(clp); 435 ret = nfs4_wait_clnt_recover(clp);
433 if (ret != 0) 436 if (ret != 0)
434 return ret; 437 break;
435 if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) 438 if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
436 break; 439 break;
437 nfs4_schedule_state_manager(clp); 440 nfs4_schedule_state_manager(clp);
441 ret = -EIO;
438 } 442 }
439 return 0; 443 return ret;
440} 444}
441 445
442static int nfs41_setup_sequence(struct nfs4_session *session, 446static int nfs41_setup_sequence(struct nfs4_session *session,
@@ -1444,18 +1448,20 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1444static int nfs4_recover_expired_lease(struct nfs_server *server) 1448static int nfs4_recover_expired_lease(struct nfs_server *server)
1445{ 1449{
1446 struct nfs_client *clp = server->nfs_client; 1450 struct nfs_client *clp = server->nfs_client;
1451 unsigned int loop;
1447 int ret; 1452 int ret;
1448 1453
1449 for (;;) { 1454 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1450 ret = nfs4_wait_clnt_recover(clp); 1455 ret = nfs4_wait_clnt_recover(clp);
1451 if (ret != 0) 1456 if (ret != 0)
1452 return ret; 1457 break;
1453 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1458 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1454 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1459 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1455 break; 1460 break;
1456 nfs4_schedule_state_recovery(clp); 1461 nfs4_schedule_state_recovery(clp);
1462 ret = -EIO;
1457 } 1463 }
1458 return 0; 1464 return ret;
1459} 1465}
1460 1466
1461/* 1467/*
@@ -1997,12 +2003,34 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
1997 status = nfs4_call_sync(server, &msg, &args, &res, 0); 2003 status = nfs4_call_sync(server, &msg, &args, &res, 0);
1998 if (status == 0) { 2004 if (status == 0) {
1999 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2005 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2006 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2007 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2008 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2009 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2010 NFS_CAP_CTIME|NFS_CAP_MTIME);
2000 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2011 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2001 server->caps |= NFS_CAP_ACLS; 2012 server->caps |= NFS_CAP_ACLS;
2002 if (res.has_links != 0) 2013 if (res.has_links != 0)
2003 server->caps |= NFS_CAP_HARDLINKS; 2014 server->caps |= NFS_CAP_HARDLINKS;
2004 if (res.has_symlinks != 0) 2015 if (res.has_symlinks != 0)
2005 server->caps |= NFS_CAP_SYMLINKS; 2016 server->caps |= NFS_CAP_SYMLINKS;
2017 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2018 server->caps |= NFS_CAP_FILEID;
2019 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2020 server->caps |= NFS_CAP_MODE;
2021 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2022 server->caps |= NFS_CAP_NLINK;
2023 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2024 server->caps |= NFS_CAP_OWNER;
2025 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2026 server->caps |= NFS_CAP_OWNER_GROUP;
2027 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2028 server->caps |= NFS_CAP_ATIME;
2029 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2030 server->caps |= NFS_CAP_CTIME;
2031 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2032 server->caps |= NFS_CAP_MTIME;
2033
2006 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2034 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2007 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2035 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2008 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2036 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 617273e7d47f..cfc30d362f94 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -702,29 +702,12 @@ struct compound_hdr {
702 u32 minorversion; 702 u32 minorversion;
703}; 703};
704 704
705/* 705static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
706 * START OF "GENERIC" ENCODE ROUTINES. 706{
707 * These may look a little ugly since they are imported from a "generic" 707 __be32 *p = xdr_reserve_space(xdr, nbytes);
708 * set of XDR encode/decode routines which are intended to be shared by 708 BUG_ON(!p);
709 * all of our NFSv4 implementations (OpenBSD, MacOS X...). 709 return p;
710 * 710}
711 * If the pain of reading these is too great, it should be a straightforward
712 * task to translate them into Linux-specific versions which are more
713 * consistent with the style used in NFSv2/v3...
714 */
715#define WRITE32(n) *p++ = htonl(n)
716#define WRITE64(n) do { \
717 *p++ = htonl((uint32_t)((n) >> 32)); \
718 *p++ = htonl((uint32_t)(n)); \
719} while (0)
720#define WRITEMEM(ptr,nbytes) do { \
721 p = xdr_encode_opaque_fixed(p, ptr, nbytes); \
722} while (0)
723
724#define RESERVE_SPACE(nbytes) do { \
725 p = xdr_reserve_space(xdr, nbytes); \
726 BUG_ON(!p); \
727} while (0)
728 711
729static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) 712static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
730{ 713{
@@ -749,12 +732,11 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
749 732
750 dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag); 733 dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
751 BUG_ON(hdr->taglen > NFS4_MAXTAGLEN); 734 BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
752 RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2)); 735 p = reserve_space(xdr, 4 + hdr->taglen + 8);
753 WRITE32(hdr->taglen); 736 p = xdr_encode_opaque(p, hdr->tag, hdr->taglen);
754 WRITEMEM(hdr->tag, hdr->taglen); 737 *p++ = cpu_to_be32(hdr->minorversion);
755 WRITE32(hdr->minorversion);
756 hdr->nops_p = p; 738 hdr->nops_p = p;
757 WRITE32(hdr->nops); 739 *p = cpu_to_be32(hdr->nops);
758} 740}
759 741
760static void encode_nops(struct compound_hdr *hdr) 742static void encode_nops(struct compound_hdr *hdr)
@@ -829,55 +811,53 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
829 len += 16; 811 len += 16;
830 else if (iap->ia_valid & ATTR_MTIME) 812 else if (iap->ia_valid & ATTR_MTIME)
831 len += 4; 813 len += 4;
832 RESERVE_SPACE(len); 814 p = reserve_space(xdr, len);
833 815
834 /* 816 /*
835 * We write the bitmap length now, but leave the bitmap and the attribute 817 * We write the bitmap length now, but leave the bitmap and the attribute
836 * buffer length to be backfilled at the end of this routine. 818 * buffer length to be backfilled at the end of this routine.
837 */ 819 */
838 WRITE32(2); 820 *p++ = cpu_to_be32(2);
839 q = p; 821 q = p;
840 p += 3; 822 p += 3;
841 823
842 if (iap->ia_valid & ATTR_SIZE) { 824 if (iap->ia_valid & ATTR_SIZE) {
843 bmval0 |= FATTR4_WORD0_SIZE; 825 bmval0 |= FATTR4_WORD0_SIZE;
844 WRITE64(iap->ia_size); 826 p = xdr_encode_hyper(p, iap->ia_size);
845 } 827 }
846 if (iap->ia_valid & ATTR_MODE) { 828 if (iap->ia_valid & ATTR_MODE) {
847 bmval1 |= FATTR4_WORD1_MODE; 829 bmval1 |= FATTR4_WORD1_MODE;
848 WRITE32(iap->ia_mode & S_IALLUGO); 830 *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
849 } 831 }
850 if (iap->ia_valid & ATTR_UID) { 832 if (iap->ia_valid & ATTR_UID) {
851 bmval1 |= FATTR4_WORD1_OWNER; 833 bmval1 |= FATTR4_WORD1_OWNER;
852 WRITE32(owner_namelen); 834 p = xdr_encode_opaque(p, owner_name, owner_namelen);
853 WRITEMEM(owner_name, owner_namelen);
854 } 835 }
855 if (iap->ia_valid & ATTR_GID) { 836 if (iap->ia_valid & ATTR_GID) {
856 bmval1 |= FATTR4_WORD1_OWNER_GROUP; 837 bmval1 |= FATTR4_WORD1_OWNER_GROUP;
857 WRITE32(owner_grouplen); 838 p = xdr_encode_opaque(p, owner_group, owner_grouplen);
858 WRITEMEM(owner_group, owner_grouplen);
859 } 839 }
860 if (iap->ia_valid & ATTR_ATIME_SET) { 840 if (iap->ia_valid & ATTR_ATIME_SET) {
861 bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; 841 bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
862 WRITE32(NFS4_SET_TO_CLIENT_TIME); 842 *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
863 WRITE32(0); 843 *p++ = cpu_to_be32(0);
864 WRITE32(iap->ia_mtime.tv_sec); 844 *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
865 WRITE32(iap->ia_mtime.tv_nsec); 845 *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
866 } 846 }
867 else if (iap->ia_valid & ATTR_ATIME) { 847 else if (iap->ia_valid & ATTR_ATIME) {
868 bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; 848 bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
869 WRITE32(NFS4_SET_TO_SERVER_TIME); 849 *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
870 } 850 }
871 if (iap->ia_valid & ATTR_MTIME_SET) { 851 if (iap->ia_valid & ATTR_MTIME_SET) {
872 bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; 852 bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
873 WRITE32(NFS4_SET_TO_CLIENT_TIME); 853 *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
874 WRITE32(0); 854 *p++ = cpu_to_be32(0);
875 WRITE32(iap->ia_mtime.tv_sec); 855 *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
876 WRITE32(iap->ia_mtime.tv_nsec); 856 *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
877 } 857 }
878 else if (iap->ia_valid & ATTR_MTIME) { 858 else if (iap->ia_valid & ATTR_MTIME) {
879 bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; 859 bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
880 WRITE32(NFS4_SET_TO_SERVER_TIME); 860 *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
881 } 861 }
882 862
883 /* 863 /*
@@ -891,7 +871,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
891 len = (char *)p - (char *)q - 12; 871 len = (char *)p - (char *)q - 12;
892 *q++ = htonl(bmval0); 872 *q++ = htonl(bmval0);
893 *q++ = htonl(bmval1); 873 *q++ = htonl(bmval1);
894 *q++ = htonl(len); 874 *q = htonl(len);
895 875
896/* out: */ 876/* out: */
897} 877}
@@ -900,9 +880,9 @@ static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hd
900{ 880{
901 __be32 *p; 881 __be32 *p;
902 882
903 RESERVE_SPACE(8); 883 p = reserve_space(xdr, 8);
904 WRITE32(OP_ACCESS); 884 *p++ = cpu_to_be32(OP_ACCESS);
905 WRITE32(access); 885 *p = cpu_to_be32(access);
906 hdr->nops++; 886 hdr->nops++;
907 hdr->replen += decode_access_maxsz; 887 hdr->replen += decode_access_maxsz;
908} 888}
@@ -911,10 +891,10 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
911{ 891{
912 __be32 *p; 892 __be32 *p;
913 893
914 RESERVE_SPACE(8+NFS4_STATEID_SIZE); 894 p = reserve_space(xdr, 8+NFS4_STATEID_SIZE);
915 WRITE32(OP_CLOSE); 895 *p++ = cpu_to_be32(OP_CLOSE);
916 WRITE32(arg->seqid->sequence->counter); 896 *p++ = cpu_to_be32(arg->seqid->sequence->counter);
917 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); 897 xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
918 hdr->nops++; 898 hdr->nops++;
919 hdr->replen += decode_close_maxsz; 899 hdr->replen += decode_close_maxsz;
920} 900}
@@ -923,10 +903,10 @@ static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *ar
923{ 903{
924 __be32 *p; 904 __be32 *p;
925 905
926 RESERVE_SPACE(16); 906 p = reserve_space(xdr, 16);
927 WRITE32(OP_COMMIT); 907 *p++ = cpu_to_be32(OP_COMMIT);
928 WRITE64(args->offset); 908 p = xdr_encode_hyper(p, args->offset);
929 WRITE32(args->count); 909 *p = cpu_to_be32(args->count);
930 hdr->nops++; 910 hdr->nops++;
931 hdr->replen += decode_commit_maxsz; 911 hdr->replen += decode_commit_maxsz;
932} 912}
@@ -935,30 +915,28 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
935{ 915{
936 __be32 *p; 916 __be32 *p;
937 917
938 RESERVE_SPACE(8); 918 p = reserve_space(xdr, 8);
939 WRITE32(OP_CREATE); 919 *p++ = cpu_to_be32(OP_CREATE);
940 WRITE32(create->ftype); 920 *p = cpu_to_be32(create->ftype);
941 921
942 switch (create->ftype) { 922 switch (create->ftype) {
943 case NF4LNK: 923 case NF4LNK:
944 RESERVE_SPACE(4); 924 p = reserve_space(xdr, 4);
945 WRITE32(create->u.symlink.len); 925 *p = cpu_to_be32(create->u.symlink.len);
946 xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len); 926 xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
947 break; 927 break;
948 928
949 case NF4BLK: case NF4CHR: 929 case NF4BLK: case NF4CHR:
950 RESERVE_SPACE(8); 930 p = reserve_space(xdr, 8);
951 WRITE32(create->u.device.specdata1); 931 *p++ = cpu_to_be32(create->u.device.specdata1);
952 WRITE32(create->u.device.specdata2); 932 *p = cpu_to_be32(create->u.device.specdata2);
953 break; 933 break;
954 934
955 default: 935 default:
956 break; 936 break;
957 } 937 }
958 938
959 RESERVE_SPACE(4 + create->name->len); 939 encode_string(xdr, create->name->len, create->name->name);
960 WRITE32(create->name->len);
961 WRITEMEM(create->name->name, create->name->len);
962 hdr->nops++; 940 hdr->nops++;
963 hdr->replen += decode_create_maxsz; 941 hdr->replen += decode_create_maxsz;
964 942
@@ -969,10 +947,10 @@ static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct c
969{ 947{
970 __be32 *p; 948 __be32 *p;
971 949
972 RESERVE_SPACE(12); 950 p = reserve_space(xdr, 12);
973 WRITE32(OP_GETATTR); 951 *p++ = cpu_to_be32(OP_GETATTR);
974 WRITE32(1); 952 *p++ = cpu_to_be32(1);
975 WRITE32(bitmap); 953 *p = cpu_to_be32(bitmap);
976 hdr->nops++; 954 hdr->nops++;
977 hdr->replen += decode_getattr_maxsz; 955 hdr->replen += decode_getattr_maxsz;
978} 956}
@@ -981,11 +959,11 @@ static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm
981{ 959{
982 __be32 *p; 960 __be32 *p;
983 961
984 RESERVE_SPACE(16); 962 p = reserve_space(xdr, 16);
985 WRITE32(OP_GETATTR); 963 *p++ = cpu_to_be32(OP_GETATTR);
986 WRITE32(2); 964 *p++ = cpu_to_be32(2);
987 WRITE32(bm0); 965 *p++ = cpu_to_be32(bm0);
988 WRITE32(bm1); 966 *p = cpu_to_be32(bm1);
989 hdr->nops++; 967 hdr->nops++;
990 hdr->replen += decode_getattr_maxsz; 968 hdr->replen += decode_getattr_maxsz;
991} 969}
@@ -1012,8 +990,8 @@ static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1012{ 990{
1013 __be32 *p; 991 __be32 *p;
1014 992
1015 RESERVE_SPACE(4); 993 p = reserve_space(xdr, 4);
1016 WRITE32(OP_GETFH); 994 *p = cpu_to_be32(OP_GETFH);
1017 hdr->nops++; 995 hdr->nops++;
1018 hdr->replen += decode_getfh_maxsz; 996 hdr->replen += decode_getfh_maxsz;
1019} 997}
@@ -1022,10 +1000,9 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
1022{ 1000{
1023 __be32 *p; 1001 __be32 *p;
1024 1002
1025 RESERVE_SPACE(8 + name->len); 1003 p = reserve_space(xdr, 8 + name->len);
1026 WRITE32(OP_LINK); 1004 *p++ = cpu_to_be32(OP_LINK);
1027 WRITE32(name->len); 1005 xdr_encode_opaque(p, name->name, name->len);
1028 WRITEMEM(name->name, name->len);
1029 hdr->nops++; 1006 hdr->nops++;
1030 hdr->replen += decode_link_maxsz; 1007 hdr->replen += decode_link_maxsz;
1031} 1008}
@@ -1052,27 +1029,27 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
1052{ 1029{
1053 __be32 *p; 1030 __be32 *p;
1054 1031
1055 RESERVE_SPACE(32); 1032 p = reserve_space(xdr, 32);
1056 WRITE32(OP_LOCK); 1033 *p++ = cpu_to_be32(OP_LOCK);
1057 WRITE32(nfs4_lock_type(args->fl, args->block)); 1034 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
1058 WRITE32(args->reclaim); 1035 *p++ = cpu_to_be32(args->reclaim);
1059 WRITE64(args->fl->fl_start); 1036 p = xdr_encode_hyper(p, args->fl->fl_start);
1060 WRITE64(nfs4_lock_length(args->fl)); 1037 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
1061 WRITE32(args->new_lock_owner); 1038 *p = cpu_to_be32(args->new_lock_owner);
1062 if (args->new_lock_owner){ 1039 if (args->new_lock_owner){
1063 RESERVE_SPACE(4+NFS4_STATEID_SIZE+32); 1040 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32);
1064 WRITE32(args->open_seqid->sequence->counter); 1041 *p++ = cpu_to_be32(args->open_seqid->sequence->counter);
1065 WRITEMEM(args->open_stateid->data, NFS4_STATEID_SIZE); 1042 p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
1066 WRITE32(args->lock_seqid->sequence->counter); 1043 *p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
1067 WRITE64(args->lock_owner.clientid); 1044 p = xdr_encode_hyper(p, args->lock_owner.clientid);
1068 WRITE32(16); 1045 *p++ = cpu_to_be32(16);
1069 WRITEMEM("lock id:", 8); 1046 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1070 WRITE64(args->lock_owner.id); 1047 xdr_encode_hyper(p, args->lock_owner.id);
1071 } 1048 }
1072 else { 1049 else {
1073 RESERVE_SPACE(NFS4_STATEID_SIZE+4); 1050 p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
1074 WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE); 1051 p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE);
1075 WRITE32(args->lock_seqid->sequence->counter); 1052 *p = cpu_to_be32(args->lock_seqid->sequence->counter);
1076 } 1053 }
1077 hdr->nops++; 1054 hdr->nops++;
1078 hdr->replen += decode_lock_maxsz; 1055 hdr->replen += decode_lock_maxsz;
@@ -1082,15 +1059,15 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
1082{ 1059{
1083 __be32 *p; 1060 __be32 *p;
1084 1061
1085 RESERVE_SPACE(52); 1062 p = reserve_space(xdr, 52);
1086 WRITE32(OP_LOCKT); 1063 *p++ = cpu_to_be32(OP_LOCKT);
1087 WRITE32(nfs4_lock_type(args->fl, 0)); 1064 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
1088 WRITE64(args->fl->fl_start); 1065 p = xdr_encode_hyper(p, args->fl->fl_start);
1089 WRITE64(nfs4_lock_length(args->fl)); 1066 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
1090 WRITE64(args->lock_owner.clientid); 1067 p = xdr_encode_hyper(p, args->lock_owner.clientid);
1091 WRITE32(16); 1068 *p++ = cpu_to_be32(16);
1092 WRITEMEM("lock id:", 8); 1069 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1093 WRITE64(args->lock_owner.id); 1070 xdr_encode_hyper(p, args->lock_owner.id);
1094 hdr->nops++; 1071 hdr->nops++;
1095 hdr->replen += decode_lockt_maxsz; 1072 hdr->replen += decode_lockt_maxsz;
1096} 1073}
@@ -1099,13 +1076,13 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
1099{ 1076{
1100 __be32 *p; 1077 __be32 *p;
1101 1078
1102 RESERVE_SPACE(12+NFS4_STATEID_SIZE+16); 1079 p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16);
1103 WRITE32(OP_LOCKU); 1080 *p++ = cpu_to_be32(OP_LOCKU);
1104 WRITE32(nfs4_lock_type(args->fl, 0)); 1081 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
1105 WRITE32(args->seqid->sequence->counter); 1082 *p++ = cpu_to_be32(args->seqid->sequence->counter);
1106 WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE); 1083 p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE);
1107 WRITE64(args->fl->fl_start); 1084 p = xdr_encode_hyper(p, args->fl->fl_start);
1108 WRITE64(nfs4_lock_length(args->fl)); 1085 xdr_encode_hyper(p, nfs4_lock_length(args->fl));
1109 hdr->nops++; 1086 hdr->nops++;
1110 hdr->replen += decode_locku_maxsz; 1087 hdr->replen += decode_locku_maxsz;
1111} 1088}
@@ -1115,10 +1092,9 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
1115 int len = name->len; 1092 int len = name->len;
1116 __be32 *p; 1093 __be32 *p;
1117 1094
1118 RESERVE_SPACE(8 + len); 1095 p = reserve_space(xdr, 8 + len);
1119 WRITE32(OP_LOOKUP); 1096 *p++ = cpu_to_be32(OP_LOOKUP);
1120 WRITE32(len); 1097 xdr_encode_opaque(p, name->name, len);
1121 WRITEMEM(name->name, len);
1122 hdr->nops++; 1098 hdr->nops++;
1123 hdr->replen += decode_lookup_maxsz; 1099 hdr->replen += decode_lookup_maxsz;
1124} 1100}
@@ -1127,21 +1103,21 @@ static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
1127{ 1103{
1128 __be32 *p; 1104 __be32 *p;
1129 1105
1130 RESERVE_SPACE(8); 1106 p = reserve_space(xdr, 8);
1131 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1107 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1132 case FMODE_READ: 1108 case FMODE_READ:
1133 WRITE32(NFS4_SHARE_ACCESS_READ); 1109 *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
1134 break; 1110 break;
1135 case FMODE_WRITE: 1111 case FMODE_WRITE:
1136 WRITE32(NFS4_SHARE_ACCESS_WRITE); 1112 *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
1137 break; 1113 break;
1138 case FMODE_READ|FMODE_WRITE: 1114 case FMODE_READ|FMODE_WRITE:
1139 WRITE32(NFS4_SHARE_ACCESS_BOTH); 1115 *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
1140 break; 1116 break;
1141 default: 1117 default:
1142 WRITE32(0); 1118 *p++ = cpu_to_be32(0);
1143 } 1119 }
1144 WRITE32(0); /* for linux, share_deny = 0 always */ 1120 *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
1145} 1121}
1146 1122
1147static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg) 1123static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
@@ -1151,29 +1127,29 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
1151 * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, 1127 * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
1152 * owner 4 = 32 1128 * owner 4 = 32
1153 */ 1129 */
1154 RESERVE_SPACE(8); 1130 p = reserve_space(xdr, 8);
1155 WRITE32(OP_OPEN); 1131 *p++ = cpu_to_be32(OP_OPEN);
1156 WRITE32(arg->seqid->sequence->counter); 1132 *p = cpu_to_be32(arg->seqid->sequence->counter);
1157 encode_share_access(xdr, arg->fmode); 1133 encode_share_access(xdr, arg->fmode);
1158 RESERVE_SPACE(28); 1134 p = reserve_space(xdr, 28);
1159 WRITE64(arg->clientid); 1135 p = xdr_encode_hyper(p, arg->clientid);
1160 WRITE32(16); 1136 *p++ = cpu_to_be32(16);
1161 WRITEMEM("open id:", 8); 1137 p = xdr_encode_opaque_fixed(p, "open id:", 8);
1162 WRITE64(arg->id); 1138 xdr_encode_hyper(p, arg->id);
1163} 1139}
1164 1140
1165static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) 1141static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
1166{ 1142{
1167 __be32 *p; 1143 __be32 *p;
1168 1144
1169 RESERVE_SPACE(4); 1145 p = reserve_space(xdr, 4);
1170 switch(arg->open_flags & O_EXCL) { 1146 switch(arg->open_flags & O_EXCL) {
1171 case 0: 1147 case 0:
1172 WRITE32(NFS4_CREATE_UNCHECKED); 1148 *p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
1173 encode_attrs(xdr, arg->u.attrs, arg->server); 1149 encode_attrs(xdr, arg->u.attrs, arg->server);
1174 break; 1150 break;
1175 default: 1151 default:
1176 WRITE32(NFS4_CREATE_EXCLUSIVE); 1152 *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
1177 encode_nfs4_verifier(xdr, &arg->u.verifier); 1153 encode_nfs4_verifier(xdr, &arg->u.verifier);
1178 } 1154 }
1179} 1155}
@@ -1182,14 +1158,14 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a
1182{ 1158{
1183 __be32 *p; 1159 __be32 *p;
1184 1160
1185 RESERVE_SPACE(4); 1161 p = reserve_space(xdr, 4);
1186 switch (arg->open_flags & O_CREAT) { 1162 switch (arg->open_flags & O_CREAT) {
1187 case 0: 1163 case 0:
1188 WRITE32(NFS4_OPEN_NOCREATE); 1164 *p = cpu_to_be32(NFS4_OPEN_NOCREATE);
1189 break; 1165 break;
1190 default: 1166 default:
1191 BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); 1167 BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
1192 WRITE32(NFS4_OPEN_CREATE); 1168 *p = cpu_to_be32(NFS4_OPEN_CREATE);
1193 encode_createmode(xdr, arg); 1169 encode_createmode(xdr, arg);
1194 } 1170 }
1195} 1171}
@@ -1198,16 +1174,16 @@ static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delega
1198{ 1174{
1199 __be32 *p; 1175 __be32 *p;
1200 1176
1201 RESERVE_SPACE(4); 1177 p = reserve_space(xdr, 4);
1202 switch (delegation_type) { 1178 switch (delegation_type) {
1203 case 0: 1179 case 0:
1204 WRITE32(NFS4_OPEN_DELEGATE_NONE); 1180 *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
1205 break; 1181 break;
1206 case FMODE_READ: 1182 case FMODE_READ:
1207 WRITE32(NFS4_OPEN_DELEGATE_READ); 1183 *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
1208 break; 1184 break;
1209 case FMODE_WRITE|FMODE_READ: 1185 case FMODE_WRITE|FMODE_READ:
1210 WRITE32(NFS4_OPEN_DELEGATE_WRITE); 1186 *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
1211 break; 1187 break;
1212 default: 1188 default:
1213 BUG(); 1189 BUG();
@@ -1218,8 +1194,8 @@ static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *
1218{ 1194{
1219 __be32 *p; 1195 __be32 *p;
1220 1196
1221 RESERVE_SPACE(4); 1197 p = reserve_space(xdr, 4);
1222 WRITE32(NFS4_OPEN_CLAIM_NULL); 1198 *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
1223 encode_string(xdr, name->len, name->name); 1199 encode_string(xdr, name->len, name->name);
1224} 1200}
1225 1201
@@ -1227,8 +1203,8 @@ static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
1227{ 1203{
1228 __be32 *p; 1204 __be32 *p;
1229 1205
1230 RESERVE_SPACE(4); 1206 p = reserve_space(xdr, 4);
1231 WRITE32(NFS4_OPEN_CLAIM_PREVIOUS); 1207 *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
1232 encode_delegation_type(xdr, type); 1208 encode_delegation_type(xdr, type);
1233} 1209}
1234 1210
@@ -1236,9 +1212,9 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc
1236{ 1212{
1237 __be32 *p; 1213 __be32 *p;
1238 1214
1239 RESERVE_SPACE(4+NFS4_STATEID_SIZE); 1215 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
1240 WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR); 1216 *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
1241 WRITEMEM(stateid->data, NFS4_STATEID_SIZE); 1217 xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
1242 encode_string(xdr, name->len, name->name); 1218 encode_string(xdr, name->len, name->name);
1243} 1219}
1244 1220
@@ -1267,10 +1243,10 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
1267{ 1243{
1268 __be32 *p; 1244 __be32 *p;
1269 1245
1270 RESERVE_SPACE(4+NFS4_STATEID_SIZE+4); 1246 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
1271 WRITE32(OP_OPEN_CONFIRM); 1247 *p++ = cpu_to_be32(OP_OPEN_CONFIRM);
1272 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); 1248 p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
1273 WRITE32(arg->seqid->sequence->counter); 1249 *p = cpu_to_be32(arg->seqid->sequence->counter);
1274 hdr->nops++; 1250 hdr->nops++;
1275 hdr->replen += decode_open_confirm_maxsz; 1251 hdr->replen += decode_open_confirm_maxsz;
1276} 1252}
@@ -1279,10 +1255,10 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close
1279{ 1255{
1280 __be32 *p; 1256 __be32 *p;
1281 1257
1282 RESERVE_SPACE(4+NFS4_STATEID_SIZE+4); 1258 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
1283 WRITE32(OP_OPEN_DOWNGRADE); 1259 *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE);
1284 WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); 1260 p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE);
1285 WRITE32(arg->seqid->sequence->counter); 1261 *p = cpu_to_be32(arg->seqid->sequence->counter);
1286 encode_share_access(xdr, arg->fmode); 1262 encode_share_access(xdr, arg->fmode);
1287 hdr->nops++; 1263 hdr->nops++;
1288 hdr->replen += decode_open_downgrade_maxsz; 1264 hdr->replen += decode_open_downgrade_maxsz;
@@ -1294,10 +1270,9 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hd
1294 int len = fh->size; 1270 int len = fh->size;
1295 __be32 *p; 1271 __be32 *p;
1296 1272
1297 RESERVE_SPACE(8 + len); 1273 p = reserve_space(xdr, 8 + len);
1298 WRITE32(OP_PUTFH); 1274 *p++ = cpu_to_be32(OP_PUTFH);
1299 WRITE32(len); 1275 xdr_encode_opaque(p, fh->data, len);
1300 WRITEMEM(fh->data, len);
1301 hdr->nops++; 1276 hdr->nops++;
1302 hdr->replen += decode_putfh_maxsz; 1277 hdr->replen += decode_putfh_maxsz;
1303} 1278}
@@ -1306,8 +1281,8 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1306{ 1281{
1307 __be32 *p; 1282 __be32 *p;
1308 1283
1309 RESERVE_SPACE(4); 1284 p = reserve_space(xdr, 4);
1310 WRITE32(OP_PUTROOTFH); 1285 *p = cpu_to_be32(OP_PUTROOTFH);
1311 hdr->nops++; 1286 hdr->nops++;
1312 hdr->replen += decode_putrootfh_maxsz; 1287 hdr->replen += decode_putrootfh_maxsz;
1313} 1288}
@@ -1317,26 +1292,26 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
1317 nfs4_stateid stateid; 1292 nfs4_stateid stateid;
1318 __be32 *p; 1293 __be32 *p;
1319 1294
1320 RESERVE_SPACE(NFS4_STATEID_SIZE); 1295 p = reserve_space(xdr, NFS4_STATEID_SIZE);
1321 if (ctx->state != NULL) { 1296 if (ctx->state != NULL) {
1322 nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); 1297 nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner);
1323 WRITEMEM(stateid.data, NFS4_STATEID_SIZE); 1298 xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
1324 } else 1299 } else
1325 WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); 1300 xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
1326} 1301}
1327 1302
1328static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) 1303static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
1329{ 1304{
1330 __be32 *p; 1305 __be32 *p;
1331 1306
1332 RESERVE_SPACE(4); 1307 p = reserve_space(xdr, 4);
1333 WRITE32(OP_READ); 1308 *p = cpu_to_be32(OP_READ);
1334 1309
1335 encode_stateid(xdr, args->context); 1310 encode_stateid(xdr, args->context);
1336 1311
1337 RESERVE_SPACE(12); 1312 p = reserve_space(xdr, 12);
1338 WRITE64(args->offset); 1313 p = xdr_encode_hyper(p, args->offset);
1339 WRITE32(args->count); 1314 *p = cpu_to_be32(args->count);
1340 hdr->nops++; 1315 hdr->nops++;
1341 hdr->replen += decode_read_maxsz; 1316 hdr->replen += decode_read_maxsz;
1342} 1317}
@@ -1349,20 +1324,20 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
1349 }; 1324 };
1350 __be32 *p; 1325 __be32 *p;
1351 1326
1352 RESERVE_SPACE(12+NFS4_VERIFIER_SIZE+20); 1327 p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20);
1353 WRITE32(OP_READDIR); 1328 *p++ = cpu_to_be32(OP_READDIR);
1354 WRITE64(readdir->cookie); 1329 p = xdr_encode_hyper(p, readdir->cookie);
1355 WRITEMEM(readdir->verifier.data, NFS4_VERIFIER_SIZE); 1330 p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE);
1356 WRITE32(readdir->count >> 1); /* We're not doing readdirplus */ 1331 *p++ = cpu_to_be32(readdir->count >> 1); /* We're not doing readdirplus */
1357 WRITE32(readdir->count); 1332 *p++ = cpu_to_be32(readdir->count);
1358 WRITE32(2); 1333 *p++ = cpu_to_be32(2);
1359 /* Switch to mounted_on_fileid if the server supports it */ 1334 /* Switch to mounted_on_fileid if the server supports it */
1360 if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 1335 if (readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
1361 attrs[0] &= ~FATTR4_WORD0_FILEID; 1336 attrs[0] &= ~FATTR4_WORD0_FILEID;
1362 else 1337 else
1363 attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 1338 attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
1364 WRITE32(attrs[0] & readdir->bitmask[0]); 1339 *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
1365 WRITE32(attrs[1] & readdir->bitmask[1]); 1340 *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
1366 hdr->nops++; 1341 hdr->nops++;
1367 hdr->replen += decode_readdir_maxsz; 1342 hdr->replen += decode_readdir_maxsz;
1368 dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", 1343 dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
@@ -1378,8 +1353,8 @@ static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *
1378{ 1353{
1379 __be32 *p; 1354 __be32 *p;
1380 1355
1381 RESERVE_SPACE(4); 1356 p = reserve_space(xdr, 4);
1382 WRITE32(OP_READLINK); 1357 *p = cpu_to_be32(OP_READLINK);
1383 hdr->nops++; 1358 hdr->nops++;
1384 hdr->replen += decode_readlink_maxsz; 1359 hdr->replen += decode_readlink_maxsz;
1385} 1360}
@@ -1388,10 +1363,9 @@ static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struc
1388{ 1363{
1389 __be32 *p; 1364 __be32 *p;
1390 1365
1391 RESERVE_SPACE(8 + name->len); 1366 p = reserve_space(xdr, 8 + name->len);
1392 WRITE32(OP_REMOVE); 1367 *p++ = cpu_to_be32(OP_REMOVE);
1393 WRITE32(name->len); 1368 xdr_encode_opaque(p, name->name, name->len);
1394 WRITEMEM(name->name, name->len);
1395 hdr->nops++; 1369 hdr->nops++;
1396 hdr->replen += decode_remove_maxsz; 1370 hdr->replen += decode_remove_maxsz;
1397} 1371}
@@ -1400,14 +1374,10 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co
1400{ 1374{
1401 __be32 *p; 1375 __be32 *p;
1402 1376
1403 RESERVE_SPACE(8 + oldname->len); 1377 p = reserve_space(xdr, 4);
1404 WRITE32(OP_RENAME); 1378 *p = cpu_to_be32(OP_RENAME);
1405 WRITE32(oldname->len); 1379 encode_string(xdr, oldname->len, oldname->name);
1406 WRITEMEM(oldname->name, oldname->len); 1380 encode_string(xdr, newname->len, newname->name);
1407
1408 RESERVE_SPACE(4 + newname->len);
1409 WRITE32(newname->len);
1410 WRITEMEM(newname->name, newname->len);
1411 hdr->nops++; 1381 hdr->nops++;
1412 hdr->replen += decode_rename_maxsz; 1382 hdr->replen += decode_rename_maxsz;
1413} 1383}
@@ -1416,9 +1386,9 @@ static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client
1416{ 1386{
1417 __be32 *p; 1387 __be32 *p;
1418 1388
1419 RESERVE_SPACE(12); 1389 p = reserve_space(xdr, 12);
1420 WRITE32(OP_RENEW); 1390 *p++ = cpu_to_be32(OP_RENEW);
1421 WRITE64(client_stateid->cl_clientid); 1391 xdr_encode_hyper(p, client_stateid->cl_clientid);
1422 hdr->nops++; 1392 hdr->nops++;
1423 hdr->replen += decode_renew_maxsz; 1393 hdr->replen += decode_renew_maxsz;
1424} 1394}
@@ -1428,8 +1398,8 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1428{ 1398{
1429 __be32 *p; 1399 __be32 *p;
1430 1400
1431 RESERVE_SPACE(4); 1401 p = reserve_space(xdr, 4);
1432 WRITE32(OP_RESTOREFH); 1402 *p = cpu_to_be32(OP_RESTOREFH);
1433 hdr->nops++; 1403 hdr->nops++;
1434 hdr->replen += decode_restorefh_maxsz; 1404 hdr->replen += decode_restorefh_maxsz;
1435} 1405}
@@ -1439,16 +1409,16 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
1439{ 1409{
1440 __be32 *p; 1410 __be32 *p;
1441 1411
1442 RESERVE_SPACE(4+NFS4_STATEID_SIZE); 1412 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
1443 WRITE32(OP_SETATTR); 1413 *p++ = cpu_to_be32(OP_SETATTR);
1444 WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); 1414 xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
1445 RESERVE_SPACE(2*4); 1415 p = reserve_space(xdr, 2*4);
1446 WRITE32(1); 1416 *p++ = cpu_to_be32(1);
1447 WRITE32(FATTR4_WORD0_ACL); 1417 *p = cpu_to_be32(FATTR4_WORD0_ACL);
1448 if (arg->acl_len % 4) 1418 if (arg->acl_len % 4)
1449 return -EINVAL; 1419 return -EINVAL;
1450 RESERVE_SPACE(4); 1420 p = reserve_space(xdr, 4);
1451 WRITE32(arg->acl_len); 1421 *p = cpu_to_be32(arg->acl_len);
1452 xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); 1422 xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
1453 hdr->nops++; 1423 hdr->nops++;
1454 hdr->replen += decode_setacl_maxsz; 1424 hdr->replen += decode_setacl_maxsz;
@@ -1460,8 +1430,8 @@ encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1460{ 1430{
1461 __be32 *p; 1431 __be32 *p;
1462 1432
1463 RESERVE_SPACE(4); 1433 p = reserve_space(xdr, 4);
1464 WRITE32(OP_SAVEFH); 1434 *p = cpu_to_be32(OP_SAVEFH);
1465 hdr->nops++; 1435 hdr->nops++;
1466 hdr->replen += decode_savefh_maxsz; 1436 hdr->replen += decode_savefh_maxsz;
1467} 1437}
@@ -1470,9 +1440,9 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
1470{ 1440{
1471 __be32 *p; 1441 __be32 *p;
1472 1442
1473 RESERVE_SPACE(4+NFS4_STATEID_SIZE); 1443 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
1474 WRITE32(OP_SETATTR); 1444 *p++ = cpu_to_be32(OP_SETATTR);
1475 WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); 1445 xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE);
1476 hdr->nops++; 1446 hdr->nops++;
1477 hdr->replen += decode_setattr_maxsz; 1447 hdr->replen += decode_setattr_maxsz;
1478 encode_attrs(xdr, arg->iap, server); 1448 encode_attrs(xdr, arg->iap, server);
@@ -1482,17 +1452,17 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
1482{ 1452{
1483 __be32 *p; 1453 __be32 *p;
1484 1454
1485 RESERVE_SPACE(4 + NFS4_VERIFIER_SIZE); 1455 p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE);
1486 WRITE32(OP_SETCLIENTID); 1456 *p++ = cpu_to_be32(OP_SETCLIENTID);
1487 WRITEMEM(setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE); 1457 xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE);
1488 1458
1489 encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); 1459 encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
1490 RESERVE_SPACE(4); 1460 p = reserve_space(xdr, 4);
1491 WRITE32(setclientid->sc_prog); 1461 *p = cpu_to_be32(setclientid->sc_prog);
1492 encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid); 1462 encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
1493 encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); 1463 encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
1494 RESERVE_SPACE(4); 1464 p = reserve_space(xdr, 4);
1495 WRITE32(setclientid->sc_cb_ident); 1465 *p = cpu_to_be32(setclientid->sc_cb_ident);
1496 hdr->nops++; 1466 hdr->nops++;
1497 hdr->replen += decode_setclientid_maxsz; 1467 hdr->replen += decode_setclientid_maxsz;
1498} 1468}
@@ -1501,10 +1471,10 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_
1501{ 1471{
1502 __be32 *p; 1472 __be32 *p;
1503 1473
1504 RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE); 1474 p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE);
1505 WRITE32(OP_SETCLIENTID_CONFIRM); 1475 *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM);
1506 WRITE64(client_state->cl_clientid); 1476 p = xdr_encode_hyper(p, client_state->cl_clientid);
1507 WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); 1477 xdr_encode_opaque_fixed(p, client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
1508 hdr->nops++; 1478 hdr->nops++;
1509 hdr->replen += decode_setclientid_confirm_maxsz; 1479 hdr->replen += decode_setclientid_confirm_maxsz;
1510} 1480}
@@ -1513,15 +1483,15 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
1513{ 1483{
1514 __be32 *p; 1484 __be32 *p;
1515 1485
1516 RESERVE_SPACE(4); 1486 p = reserve_space(xdr, 4);
1517 WRITE32(OP_WRITE); 1487 *p = cpu_to_be32(OP_WRITE);
1518 1488
1519 encode_stateid(xdr, args->context); 1489 encode_stateid(xdr, args->context);
1520 1490
1521 RESERVE_SPACE(16); 1491 p = reserve_space(xdr, 16);
1522 WRITE64(args->offset); 1492 p = xdr_encode_hyper(p, args->offset);
1523 WRITE32(args->stable); 1493 *p++ = cpu_to_be32(args->stable);
1524 WRITE32(args->count); 1494 *p = cpu_to_be32(args->count);
1525 1495
1526 xdr_write_pages(xdr, args->pages, args->pgbase, args->count); 1496 xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
1527 hdr->nops++; 1497 hdr->nops++;
@@ -1532,10 +1502,10 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
1532{ 1502{
1533 __be32 *p; 1503 __be32 *p;
1534 1504
1535 RESERVE_SPACE(4+NFS4_STATEID_SIZE); 1505 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE);
1536 1506
1537 WRITE32(OP_DELEGRETURN); 1507 *p++ = cpu_to_be32(OP_DELEGRETURN);
1538 WRITEMEM(stateid->data, NFS4_STATEID_SIZE); 1508 xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE);
1539 hdr->nops++; 1509 hdr->nops++;
1540 hdr->replen += decode_delegreturn_maxsz; 1510 hdr->replen += decode_delegreturn_maxsz;
1541} 1511}
@@ -1548,16 +1518,16 @@ static void encode_exchange_id(struct xdr_stream *xdr,
1548{ 1518{
1549 __be32 *p; 1519 __be32 *p;
1550 1520
1551 RESERVE_SPACE(4 + sizeof(args->verifier->data)); 1521 p = reserve_space(xdr, 4 + sizeof(args->verifier->data));
1552 WRITE32(OP_EXCHANGE_ID); 1522 *p++ = cpu_to_be32(OP_EXCHANGE_ID);
1553 WRITEMEM(args->verifier->data, sizeof(args->verifier->data)); 1523 xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data));
1554 1524
1555 encode_string(xdr, args->id_len, args->id); 1525 encode_string(xdr, args->id_len, args->id);
1556 1526
1557 RESERVE_SPACE(12); 1527 p = reserve_space(xdr, 12);
1558 WRITE32(args->flags); 1528 *p++ = cpu_to_be32(args->flags);
1559 WRITE32(0); /* zero length state_protect4_a */ 1529 *p++ = cpu_to_be32(0); /* zero length state_protect4_a */
1560 WRITE32(0); /* zero length implementation id array */ 1530 *p = cpu_to_be32(0); /* zero length implementation id array */
1561 hdr->nops++; 1531 hdr->nops++;
1562 hdr->replen += decode_exchange_id_maxsz; 1532 hdr->replen += decode_exchange_id_maxsz;
1563} 1533}
@@ -1571,55 +1541,43 @@ static void encode_create_session(struct xdr_stream *xdr,
1571 uint32_t len; 1541 uint32_t len;
1572 struct nfs_client *clp = args->client; 1542 struct nfs_client *clp = args->client;
1573 1543
1574 RESERVE_SPACE(4); 1544 len = scnprintf(machine_name, sizeof(machine_name), "%s",
1575 WRITE32(OP_CREATE_SESSION); 1545 clp->cl_ipaddr);
1576
1577 RESERVE_SPACE(8);
1578 WRITE64(clp->cl_ex_clid);
1579 1546
1580 RESERVE_SPACE(8); 1547 p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
1581 WRITE32(clp->cl_seqid); /*Sequence id */ 1548 *p++ = cpu_to_be32(OP_CREATE_SESSION);
1582 WRITE32(args->flags); /*flags */ 1549 p = xdr_encode_hyper(p, clp->cl_ex_clid);
1550 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
1551 *p++ = cpu_to_be32(args->flags); /*flags */
1583 1552
1584 RESERVE_SPACE(2*28); /* 2 channel_attrs */
1585 /* Fore Channel */ 1553 /* Fore Channel */
1586 WRITE32(args->fc_attrs.headerpadsz); /* header padding size */ 1554 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
1587 WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */ 1555 *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
1588 WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */ 1556 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
1589 WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1557 *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
1590 WRITE32(args->fc_attrs.max_ops); /* max operations */ 1558 *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
1591 WRITE32(args->fc_attrs.max_reqs); /* max requests */ 1559 *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
1592 WRITE32(0); /* rdmachannel_attrs */ 1560 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
1593 1561
1594 /* Back Channel */ 1562 /* Back Channel */
1595 WRITE32(args->fc_attrs.headerpadsz); /* header padding size */ 1563 *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */
1596 WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */ 1564 *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
1597 WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */ 1565 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
1598 WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ 1566 *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
1599 WRITE32(args->bc_attrs.max_ops); /* max operations */ 1567 *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */
1600 WRITE32(args->bc_attrs.max_reqs); /* max requests */ 1568 *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */
1601 WRITE32(0); /* rdmachannel_attrs */ 1569 *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
1602 1570
1603 RESERVE_SPACE(4); 1571 *p++ = cpu_to_be32(args->cb_program); /* cb_program */
1604 WRITE32(args->cb_program); /* cb_program */ 1572 *p++ = cpu_to_be32(1);
1605 1573 *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
1606 RESERVE_SPACE(4); /* # of security flavors */
1607 WRITE32(1);
1608
1609 RESERVE_SPACE(4);
1610 WRITE32(RPC_AUTH_UNIX); /* auth_sys */
1611 1574
1612 /* authsys_parms rfc1831 */ 1575 /* authsys_parms rfc1831 */
1613 RESERVE_SPACE(4); 1576 *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
1614 WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */ 1577 p = xdr_encode_opaque(p, machine_name, len);
1615 len = scnprintf(machine_name, sizeof(machine_name), "%s", 1578 *p++ = cpu_to_be32(0); /* UID */
1616 clp->cl_ipaddr); 1579 *p++ = cpu_to_be32(0); /* GID */
1617 RESERVE_SPACE(16 + len); 1580 *p = cpu_to_be32(0); /* No more gids */
1618 WRITE32(len);
1619 WRITEMEM(machine_name, len);
1620 WRITE32(0); /* UID */
1621 WRITE32(0); /* GID */
1622 WRITE32(0); /* No more gids */
1623 hdr->nops++; 1581 hdr->nops++;
1624 hdr->replen += decode_create_session_maxsz; 1582 hdr->replen += decode_create_session_maxsz;
1625} 1583}
@@ -1629,9 +1587,9 @@ static void encode_destroy_session(struct xdr_stream *xdr,
1629 struct compound_hdr *hdr) 1587 struct compound_hdr *hdr)
1630{ 1588{
1631 __be32 *p; 1589 __be32 *p;
1632 RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN); 1590 p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN);
1633 WRITE32(OP_DESTROY_SESSION); 1591 *p++ = cpu_to_be32(OP_DESTROY_SESSION);
1634 WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN); 1592 xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
1635 hdr->nops++; 1593 hdr->nops++;
1636 hdr->replen += decode_destroy_session_maxsz; 1594 hdr->replen += decode_destroy_session_maxsz;
1637} 1595}
@@ -1655,8 +1613,8 @@ static void encode_sequence(struct xdr_stream *xdr,
1655 WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE); 1613 WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
1656 slot = tp->slots + args->sa_slotid; 1614 slot = tp->slots + args->sa_slotid;
1657 1615
1658 RESERVE_SPACE(4); 1616 p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16);
1659 WRITE32(OP_SEQUENCE); 1617 *p++ = cpu_to_be32(OP_SEQUENCE);
1660 1618
1661 /* 1619 /*
1662 * Sessionid + seqid + slotid + max slotid + cache_this 1620 * Sessionid + seqid + slotid + max slotid + cache_this
@@ -1670,12 +1628,11 @@ static void encode_sequence(struct xdr_stream *xdr,
1670 ((u32 *)session->sess_id.data)[3], 1628 ((u32 *)session->sess_id.data)[3],
1671 slot->seq_nr, args->sa_slotid, 1629 slot->seq_nr, args->sa_slotid,
1672 tp->highest_used_slotid, args->sa_cache_this); 1630 tp->highest_used_slotid, args->sa_cache_this);
1673 RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16); 1631 p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
1674 WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN); 1632 *p++ = cpu_to_be32(slot->seq_nr);
1675 WRITE32(slot->seq_nr); 1633 *p++ = cpu_to_be32(args->sa_slotid);
1676 WRITE32(args->sa_slotid); 1634 *p++ = cpu_to_be32(tp->highest_used_slotid);
1677 WRITE32(tp->highest_used_slotid); 1635 *p = cpu_to_be32(args->sa_cache_this);
1678 WRITE32(args->sa_cache_this);
1679 hdr->nops++; 1636 hdr->nops++;
1680 hdr->replen += decode_sequence_maxsz; 1637 hdr->replen += decode_sequence_maxsz;
1681#endif /* CONFIG_NFS_V4_1 */ 1638#endif /* CONFIG_NFS_V4_1 */
@@ -2466,68 +2423,53 @@ static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
2466} 2423}
2467#endif /* CONFIG_NFS_V4_1 */ 2424#endif /* CONFIG_NFS_V4_1 */
2468 2425
2469/* 2426static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
2470 * START OF "GENERIC" DECODE ROUTINES. 2427{
2471 * These may look a little ugly since they are imported from a "generic" 2428 dprintk("nfs: %s: prematurely hit end of receive buffer. "
2472 * set of XDR encode/decode routines which are intended to be shared by 2429 "Remaining buffer length is %tu words.\n",
2473 * all of our NFSv4 implementations (OpenBSD, MacOS X...). 2430 func, xdr->end - xdr->p);
2474 * 2431}
2475 * If the pain of reading these is too great, it should be a straightforward
2476 * task to translate them into Linux-specific versions which are more
2477 * consistent with the style used in NFSv2/v3...
2478 */
2479#define READ32(x) (x) = ntohl(*p++)
2480#define READ64(x) do { \
2481 (x) = (u64)ntohl(*p++) << 32; \
2482 (x) |= ntohl(*p++); \
2483} while (0)
2484#define READTIME(x) do { \
2485 p++; \
2486 (x.tv_sec) = ntohl(*p++); \
2487 (x.tv_nsec) = ntohl(*p++); \
2488} while (0)
2489#define COPYMEM(x,nbytes) do { \
2490 memcpy((x), p, nbytes); \
2491 p += XDR_QUADLEN(nbytes); \
2492} while (0)
2493
2494#define READ_BUF(nbytes) do { \
2495 p = xdr_inline_decode(xdr, nbytes); \
2496 if (unlikely(!p)) { \
2497 dprintk("nfs: %s: prematurely hit end of receive" \
2498 " buffer\n", __func__); \
2499 dprintk("nfs: %s: xdr->p=%p, bytes=%u, xdr->end=%p\n", \
2500 __func__, xdr->p, nbytes, xdr->end); \
2501 return -EIO; \
2502 } \
2503} while (0)
2504 2432
2505static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) 2433static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
2506{ 2434{
2507 __be32 *p; 2435 __be32 *p;
2508 2436
2509 READ_BUF(4); 2437 p = xdr_inline_decode(xdr, 4);
2510 READ32(*len); 2438 if (unlikely(!p))
2511 READ_BUF(*len); 2439 goto out_overflow;
2440 *len = be32_to_cpup(p);
2441 p = xdr_inline_decode(xdr, *len);
2442 if (unlikely(!p))
2443 goto out_overflow;
2512 *string = (char *)p; 2444 *string = (char *)p;
2513 return 0; 2445 return 0;
2446out_overflow:
2447 print_overflow_msg(__func__, xdr);
2448 return -EIO;
2514} 2449}
2515 2450
2516static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) 2451static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
2517{ 2452{
2518 __be32 *p; 2453 __be32 *p;
2519 2454
2520 READ_BUF(8); 2455 p = xdr_inline_decode(xdr, 8);
2521 READ32(hdr->status); 2456 if (unlikely(!p))
2522 READ32(hdr->taglen); 2457 goto out_overflow;
2458 hdr->status = be32_to_cpup(p++);
2459 hdr->taglen = be32_to_cpup(p);
2523 2460
2524 READ_BUF(hdr->taglen + 4); 2461 p = xdr_inline_decode(xdr, hdr->taglen + 4);
2462 if (unlikely(!p))
2463 goto out_overflow;
2525 hdr->tag = (char *)p; 2464 hdr->tag = (char *)p;
2526 p += XDR_QUADLEN(hdr->taglen); 2465 p += XDR_QUADLEN(hdr->taglen);
2527 READ32(hdr->nops); 2466 hdr->nops = be32_to_cpup(p);
2528 if (unlikely(hdr->nops < 1)) 2467 if (unlikely(hdr->nops < 1))
2529 return nfs4_stat_to_errno(hdr->status); 2468 return nfs4_stat_to_errno(hdr->status);
2530 return 0; 2469 return 0;
2470out_overflow:
2471 print_overflow_msg(__func__, xdr);
2472 return -EIO;
2531} 2473}
2532 2474
2533static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) 2475static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
@@ -2536,18 +2478,23 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
2536 uint32_t opnum; 2478 uint32_t opnum;
2537 int32_t nfserr; 2479 int32_t nfserr;
2538 2480
2539 READ_BUF(8); 2481 p = xdr_inline_decode(xdr, 8);
2540 READ32(opnum); 2482 if (unlikely(!p))
2483 goto out_overflow;
2484 opnum = be32_to_cpup(p++);
2541 if (opnum != expected) { 2485 if (opnum != expected) {
2542 dprintk("nfs: Server returned operation" 2486 dprintk("nfs: Server returned operation"
2543 " %d but we issued a request for %d\n", 2487 " %d but we issued a request for %d\n",
2544 opnum, expected); 2488 opnum, expected);
2545 return -EIO; 2489 return -EIO;
2546 } 2490 }
2547 READ32(nfserr); 2491 nfserr = be32_to_cpup(p);
2548 if (nfserr != NFS_OK) 2492 if (nfserr != NFS_OK)
2549 return nfs4_stat_to_errno(nfserr); 2493 return nfs4_stat_to_errno(nfserr);
2550 return 0; 2494 return 0;
2495out_overflow:
2496 print_overflow_msg(__func__, xdr);
2497 return -EIO;
2551} 2498}
2552 2499
2553/* Dummy routine */ 2500/* Dummy routine */
@@ -2557,8 +2504,11 @@ static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp)
2557 unsigned int strlen; 2504 unsigned int strlen;
2558 char *str; 2505 char *str;
2559 2506
2560 READ_BUF(12); 2507 p = xdr_inline_decode(xdr, 12);
2561 return decode_opaque_inline(xdr, &strlen, &str); 2508 if (likely(p))
2509 return decode_opaque_inline(xdr, &strlen, &str);
2510 print_overflow_msg(__func__, xdr);
2511 return -EIO;
2562} 2512}
2563 2513
2564static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) 2514static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
@@ -2566,27 +2516,39 @@ static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
2566 uint32_t bmlen; 2516 uint32_t bmlen;
2567 __be32 *p; 2517 __be32 *p;
2568 2518
2569 READ_BUF(4); 2519 p = xdr_inline_decode(xdr, 4);
2570 READ32(bmlen); 2520 if (unlikely(!p))
2521 goto out_overflow;
2522 bmlen = be32_to_cpup(p);
2571 2523
2572 bitmap[0] = bitmap[1] = 0; 2524 bitmap[0] = bitmap[1] = 0;
2573 READ_BUF((bmlen << 2)); 2525 p = xdr_inline_decode(xdr, (bmlen << 2));
2526 if (unlikely(!p))
2527 goto out_overflow;
2574 if (bmlen > 0) { 2528 if (bmlen > 0) {
2575 READ32(bitmap[0]); 2529 bitmap[0] = be32_to_cpup(p++);
2576 if (bmlen > 1) 2530 if (bmlen > 1)
2577 READ32(bitmap[1]); 2531 bitmap[1] = be32_to_cpup(p);
2578 } 2532 }
2579 return 0; 2533 return 0;
2534out_overflow:
2535 print_overflow_msg(__func__, xdr);
2536 return -EIO;
2580} 2537}
2581 2538
2582static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep) 2539static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
2583{ 2540{
2584 __be32 *p; 2541 __be32 *p;
2585 2542
2586 READ_BUF(4); 2543 p = xdr_inline_decode(xdr, 4);
2587 READ32(*attrlen); 2544 if (unlikely(!p))
2545 goto out_overflow;
2546 *attrlen = be32_to_cpup(p);
2588 *savep = xdr->p; 2547 *savep = xdr->p;
2589 return 0; 2548 return 0;
2549out_overflow:
2550 print_overflow_msg(__func__, xdr);
2551 return -EIO;
2590} 2552}
2591 2553
2592static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask) 2554static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
@@ -2609,8 +2571,10 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
2609 if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U))) 2571 if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
2610 return -EIO; 2572 return -EIO;
2611 if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) { 2573 if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
2612 READ_BUF(4); 2574 p = xdr_inline_decode(xdr, 4);
2613 READ32(*type); 2575 if (unlikely(!p))
2576 goto out_overflow;
2577 *type = be32_to_cpup(p);
2614 if (*type < NF4REG || *type > NF4NAMEDATTR) { 2578 if (*type < NF4REG || *type > NF4NAMEDATTR) {
2615 dprintk("%s: bad type %d\n", __func__, *type); 2579 dprintk("%s: bad type %d\n", __func__, *type);
2616 return -EIO; 2580 return -EIO;
@@ -2620,6 +2584,9 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
2620 } 2584 }
2621 dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]); 2585 dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
2622 return ret; 2586 return ret;
2587out_overflow:
2588 print_overflow_msg(__func__, xdr);
2589 return -EIO;
2623} 2590}
2624 2591
2625static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change) 2592static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
@@ -2631,14 +2598,19 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
2631 if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U))) 2598 if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
2632 return -EIO; 2599 return -EIO;
2633 if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) { 2600 if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
2634 READ_BUF(8); 2601 p = xdr_inline_decode(xdr, 8);
2635 READ64(*change); 2602 if (unlikely(!p))
2603 goto out_overflow;
2604 xdr_decode_hyper(p, change);
2636 bitmap[0] &= ~FATTR4_WORD0_CHANGE; 2605 bitmap[0] &= ~FATTR4_WORD0_CHANGE;
2637 ret = NFS_ATTR_FATTR_CHANGE; 2606 ret = NFS_ATTR_FATTR_CHANGE;
2638 } 2607 }
2639 dprintk("%s: change attribute=%Lu\n", __func__, 2608 dprintk("%s: change attribute=%Lu\n", __func__,
2640 (unsigned long long)*change); 2609 (unsigned long long)*change);
2641 return ret; 2610 return ret;
2611out_overflow:
2612 print_overflow_msg(__func__, xdr);
2613 return -EIO;
2642} 2614}
2643 2615
2644static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size) 2616static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
@@ -2650,13 +2622,18 @@ static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *
2650 if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U))) 2622 if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
2651 return -EIO; 2623 return -EIO;
2652 if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) { 2624 if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
2653 READ_BUF(8); 2625 p = xdr_inline_decode(xdr, 8);
2654 READ64(*size); 2626 if (unlikely(!p))
2627 goto out_overflow;
2628 xdr_decode_hyper(p, size);
2655 bitmap[0] &= ~FATTR4_WORD0_SIZE; 2629 bitmap[0] &= ~FATTR4_WORD0_SIZE;
2656 ret = NFS_ATTR_FATTR_SIZE; 2630 ret = NFS_ATTR_FATTR_SIZE;
2657 } 2631 }
2658 dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size); 2632 dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
2659 return ret; 2633 return ret;
2634out_overflow:
2635 print_overflow_msg(__func__, xdr);
2636 return -EIO;
2660} 2637}
2661 2638
2662static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 2639static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2667,12 +2644,17 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
2667 if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U))) 2644 if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
2668 return -EIO; 2645 return -EIO;
2669 if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) { 2646 if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
2670 READ_BUF(4); 2647 p = xdr_inline_decode(xdr, 4);
2671 READ32(*res); 2648 if (unlikely(!p))
2649 goto out_overflow;
2650 *res = be32_to_cpup(p);
2672 bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT; 2651 bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
2673 } 2652 }
2674 dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true"); 2653 dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
2675 return 0; 2654 return 0;
2655out_overflow:
2656 print_overflow_msg(__func__, xdr);
2657 return -EIO;
2676} 2658}
2677 2659
2678static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 2660static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2683,12 +2665,17 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
2683 if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U))) 2665 if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
2684 return -EIO; 2666 return -EIO;
2685 if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) { 2667 if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
2686 READ_BUF(4); 2668 p = xdr_inline_decode(xdr, 4);
2687 READ32(*res); 2669 if (unlikely(!p))
2670 goto out_overflow;
2671 *res = be32_to_cpup(p);
2688 bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT; 2672 bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
2689 } 2673 }
2690 dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true"); 2674 dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
2691 return 0; 2675 return 0;
2676out_overflow:
2677 print_overflow_msg(__func__, xdr);
2678 return -EIO;
2692} 2679}
2693 2680
2694static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid) 2681static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
@@ -2701,9 +2688,11 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
2701 if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U))) 2688 if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
2702 return -EIO; 2689 return -EIO;
2703 if (likely(bitmap[0] & FATTR4_WORD0_FSID)) { 2690 if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
2704 READ_BUF(16); 2691 p = xdr_inline_decode(xdr, 16);
2705 READ64(fsid->major); 2692 if (unlikely(!p))
2706 READ64(fsid->minor); 2693 goto out_overflow;
2694 p = xdr_decode_hyper(p, &fsid->major);
2695 xdr_decode_hyper(p, &fsid->minor);
2707 bitmap[0] &= ~FATTR4_WORD0_FSID; 2696 bitmap[0] &= ~FATTR4_WORD0_FSID;
2708 ret = NFS_ATTR_FATTR_FSID; 2697 ret = NFS_ATTR_FATTR_FSID;
2709 } 2698 }
@@ -2711,6 +2700,9 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
2711 (unsigned long long)fsid->major, 2700 (unsigned long long)fsid->major,
2712 (unsigned long long)fsid->minor); 2701 (unsigned long long)fsid->minor);
2713 return ret; 2702 return ret;
2703out_overflow:
2704 print_overflow_msg(__func__, xdr);
2705 return -EIO;
2714} 2706}
2715 2707
2716static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 2708static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2721,12 +2713,17 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
2721 if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U))) 2713 if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
2722 return -EIO; 2714 return -EIO;
2723 if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) { 2715 if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
2724 READ_BUF(4); 2716 p = xdr_inline_decode(xdr, 4);
2725 READ32(*res); 2717 if (unlikely(!p))
2718 goto out_overflow;
2719 *res = be32_to_cpup(p);
2726 bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME; 2720 bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
2727 } 2721 }
2728 dprintk("%s: file size=%u\n", __func__, (unsigned int)*res); 2722 dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
2729 return 0; 2723 return 0;
2724out_overflow:
2725 print_overflow_msg(__func__, xdr);
2726 return -EIO;
2730} 2727}
2731 2728
2732static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 2729static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -2737,12 +2734,17 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
2737 if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U))) 2734 if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
2738 return -EIO; 2735 return -EIO;
2739 if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) { 2736 if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
2740 READ_BUF(4); 2737 p = xdr_inline_decode(xdr, 4);
2741 READ32(*res); 2738 if (unlikely(!p))
2739 goto out_overflow;
2740 *res = be32_to_cpup(p);
2742 bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT; 2741 bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
2743 } 2742 }
2744 dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res); 2743 dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
2745 return 0; 2744 return 0;
2745out_overflow:
2746 print_overflow_msg(__func__, xdr);
2747 return -EIO;
2746} 2748}
2747 2749
2748static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) 2750static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2754,13 +2756,18 @@ static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
2754 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U))) 2756 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
2755 return -EIO; 2757 return -EIO;
2756 if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) { 2758 if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
2757 READ_BUF(8); 2759 p = xdr_inline_decode(xdr, 8);
2758 READ64(*fileid); 2760 if (unlikely(!p))
2761 goto out_overflow;
2762 xdr_decode_hyper(p, fileid);
2759 bitmap[0] &= ~FATTR4_WORD0_FILEID; 2763 bitmap[0] &= ~FATTR4_WORD0_FILEID;
2760 ret = NFS_ATTR_FATTR_FILEID; 2764 ret = NFS_ATTR_FATTR_FILEID;
2761 } 2765 }
2762 dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); 2766 dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
2763 return ret; 2767 return ret;
2768out_overflow:
2769 print_overflow_msg(__func__, xdr);
2770 return -EIO;
2764} 2771}
2765 2772
2766static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) 2773static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
@@ -2772,13 +2779,18 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma
2772 if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U))) 2779 if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U)))
2773 return -EIO; 2780 return -EIO;
2774 if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) { 2781 if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
2775 READ_BUF(8); 2782 p = xdr_inline_decode(xdr, 8);
2776 READ64(*fileid); 2783 if (unlikely(!p))
2784 goto out_overflow;
2785 xdr_decode_hyper(p, fileid);
2777 bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 2786 bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
2778 ret = NFS_ATTR_FATTR_FILEID; 2787 ret = NFS_ATTR_FATTR_FILEID;
2779 } 2788 }
2780 dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); 2789 dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
2781 return ret; 2790 return ret;
2791out_overflow:
2792 print_overflow_msg(__func__, xdr);
2793 return -EIO;
2782} 2794}
2783 2795
2784static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 2796static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2790,12 +2802,17 @@ static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
2790 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U))) 2802 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
2791 return -EIO; 2803 return -EIO;
2792 if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) { 2804 if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
2793 READ_BUF(8); 2805 p = xdr_inline_decode(xdr, 8);
2794 READ64(*res); 2806 if (unlikely(!p))
2807 goto out_overflow;
2808 xdr_decode_hyper(p, res);
2795 bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL; 2809 bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
2796 } 2810 }
2797 dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res); 2811 dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
2798 return status; 2812 return status;
2813out_overflow:
2814 print_overflow_msg(__func__, xdr);
2815 return -EIO;
2799} 2816}
2800 2817
2801static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 2818static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2807,12 +2824,17 @@ static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
2807 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U))) 2824 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
2808 return -EIO; 2825 return -EIO;
2809 if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) { 2826 if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
2810 READ_BUF(8); 2827 p = xdr_inline_decode(xdr, 8);
2811 READ64(*res); 2828 if (unlikely(!p))
2829 goto out_overflow;
2830 xdr_decode_hyper(p, res);
2812 bitmap[0] &= ~FATTR4_WORD0_FILES_FREE; 2831 bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
2813 } 2832 }
2814 dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res); 2833 dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
2815 return status; 2834 return status;
2835out_overflow:
2836 print_overflow_msg(__func__, xdr);
2837 return -EIO;
2816} 2838}
2817 2839
2818static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 2840static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -2824,12 +2846,17 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
2824 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U))) 2846 if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
2825 return -EIO; 2847 return -EIO;
2826 if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) { 2848 if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
2827 READ_BUF(8); 2849 p = xdr_inline_decode(xdr, 8);
2828 READ64(*res); 2850 if (unlikely(!p))
2851 goto out_overflow;
2852 xdr_decode_hyper(p, res);
2829 bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL; 2853 bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
2830 } 2854 }
2831 dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res); 2855 dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
2832 return status; 2856 return status;
2857out_overflow:
2858 print_overflow_msg(__func__, xdr);
2859 return -EIO;
2833} 2860}
2834 2861
2835static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) 2862static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
@@ -2838,8 +2865,10 @@ static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
2838 __be32 *p; 2865 __be32 *p;
2839 int status = 0; 2866 int status = 0;
2840 2867
2841 READ_BUF(4); 2868 p = xdr_inline_decode(xdr, 4);
2842 READ32(n); 2869 if (unlikely(!p))
2870 goto out_overflow;
2871 n = be32_to_cpup(p);
2843 if (n == 0) 2872 if (n == 0)
2844 goto root_path; 2873 goto root_path;
2845 dprintk("path "); 2874 dprintk("path ");
@@ -2873,6 +2902,9 @@ out_eio:
2873 dprintk(" status %d", status); 2902 dprintk(" status %d", status);
2874 status = -EIO; 2903 status = -EIO;
2875 goto out; 2904 goto out;
2905out_overflow:
2906 print_overflow_msg(__func__, xdr);
2907 return -EIO;
2876} 2908}
2877 2909
2878static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res) 2910static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
@@ -2890,8 +2922,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
2890 status = decode_pathname(xdr, &res->fs_path); 2922 status = decode_pathname(xdr, &res->fs_path);
2891 if (unlikely(status != 0)) 2923 if (unlikely(status != 0))
2892 goto out; 2924 goto out;
2893 READ_BUF(4); 2925 p = xdr_inline_decode(xdr, 4);
2894 READ32(n); 2926 if (unlikely(!p))
2927 goto out_overflow;
2928 n = be32_to_cpup(p);
2895 if (n <= 0) 2929 if (n <= 0)
2896 goto out_eio; 2930 goto out_eio;
2897 res->nlocations = 0; 2931 res->nlocations = 0;
@@ -2899,8 +2933,10 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
2899 u32 m; 2933 u32 m;
2900 struct nfs4_fs_location *loc = &res->locations[res->nlocations]; 2934 struct nfs4_fs_location *loc = &res->locations[res->nlocations];
2901 2935
2902 READ_BUF(4); 2936 p = xdr_inline_decode(xdr, 4);
2903 READ32(m); 2937 if (unlikely(!p))
2938 goto out_overflow;
2939 m = be32_to_cpup(p);
2904 2940
2905 loc->nservers = 0; 2941 loc->nservers = 0;
2906 dprintk("%s: servers ", __func__); 2942 dprintk("%s: servers ", __func__);
@@ -2939,6 +2975,8 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
2939out: 2975out:
2940 dprintk("%s: fs_locations done, error = %d\n", __func__, status); 2976 dprintk("%s: fs_locations done, error = %d\n", __func__, status);
2941 return status; 2977 return status;
2978out_overflow:
2979 print_overflow_msg(__func__, xdr);
2942out_eio: 2980out_eio:
2943 status = -EIO; 2981 status = -EIO;
2944 goto out; 2982 goto out;
@@ -2953,12 +2991,17 @@ static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uin
2953 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U))) 2991 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
2954 return -EIO; 2992 return -EIO;
2955 if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) { 2993 if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
2956 READ_BUF(8); 2994 p = xdr_inline_decode(xdr, 8);
2957 READ64(*res); 2995 if (unlikely(!p))
2996 goto out_overflow;
2997 xdr_decode_hyper(p, res);
2958 bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE; 2998 bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
2959 } 2999 }
2960 dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res); 3000 dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
2961 return status; 3001 return status;
3002out_overflow:
3003 print_overflow_msg(__func__, xdr);
3004 return -EIO;
2962} 3005}
2963 3006
2964static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink) 3007static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
@@ -2970,12 +3013,17 @@ static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
2970 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U))) 3013 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
2971 return -EIO; 3014 return -EIO;
2972 if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) { 3015 if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
2973 READ_BUF(4); 3016 p = xdr_inline_decode(xdr, 4);
2974 READ32(*maxlink); 3017 if (unlikely(!p))
3018 goto out_overflow;
3019 *maxlink = be32_to_cpup(p);
2975 bitmap[0] &= ~FATTR4_WORD0_MAXLINK; 3020 bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
2976 } 3021 }
2977 dprintk("%s: maxlink=%u\n", __func__, *maxlink); 3022 dprintk("%s: maxlink=%u\n", __func__, *maxlink);
2978 return status; 3023 return status;
3024out_overflow:
3025 print_overflow_msg(__func__, xdr);
3026 return -EIO;
2979} 3027}
2980 3028
2981static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname) 3029static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
@@ -2987,12 +3035,17 @@ static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
2987 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U))) 3035 if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
2988 return -EIO; 3036 return -EIO;
2989 if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) { 3037 if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
2990 READ_BUF(4); 3038 p = xdr_inline_decode(xdr, 4);
2991 READ32(*maxname); 3039 if (unlikely(!p))
3040 goto out_overflow;
3041 *maxname = be32_to_cpup(p);
2992 bitmap[0] &= ~FATTR4_WORD0_MAXNAME; 3042 bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
2993 } 3043 }
2994 dprintk("%s: maxname=%u\n", __func__, *maxname); 3044 dprintk("%s: maxname=%u\n", __func__, *maxname);
2995 return status; 3045 return status;
3046out_overflow:
3047 print_overflow_msg(__func__, xdr);
3048 return -EIO;
2996} 3049}
2997 3050
2998static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 3051static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3005,8 +3058,10 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
3005 return -EIO; 3058 return -EIO;
3006 if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) { 3059 if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
3007 uint64_t maxread; 3060 uint64_t maxread;
3008 READ_BUF(8); 3061 p = xdr_inline_decode(xdr, 8);
3009 READ64(maxread); 3062 if (unlikely(!p))
3063 goto out_overflow;
3064 xdr_decode_hyper(p, &maxread);
3010 if (maxread > 0x7FFFFFFF) 3065 if (maxread > 0x7FFFFFFF)
3011 maxread = 0x7FFFFFFF; 3066 maxread = 0x7FFFFFFF;
3012 *res = (uint32_t)maxread; 3067 *res = (uint32_t)maxread;
@@ -3014,6 +3069,9 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
3014 } 3069 }
3015 dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res); 3070 dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
3016 return status; 3071 return status;
3072out_overflow:
3073 print_overflow_msg(__func__, xdr);
3074 return -EIO;
3017} 3075}
3018 3076
3019static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) 3077static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
@@ -3026,8 +3084,10 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
3026 return -EIO; 3084 return -EIO;
3027 if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) { 3085 if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
3028 uint64_t maxwrite; 3086 uint64_t maxwrite;
3029 READ_BUF(8); 3087 p = xdr_inline_decode(xdr, 8);
3030 READ64(maxwrite); 3088 if (unlikely(!p))
3089 goto out_overflow;
3090 xdr_decode_hyper(p, &maxwrite);
3031 if (maxwrite > 0x7FFFFFFF) 3091 if (maxwrite > 0x7FFFFFFF)
3032 maxwrite = 0x7FFFFFFF; 3092 maxwrite = 0x7FFFFFFF;
3033 *res = (uint32_t)maxwrite; 3093 *res = (uint32_t)maxwrite;
@@ -3035,6 +3095,9 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
3035 } 3095 }
3036 dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res); 3096 dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
3037 return status; 3097 return status;
3098out_overflow:
3099 print_overflow_msg(__func__, xdr);
3100 return -EIO;
3038} 3101}
3039 3102
3040static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode) 3103static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
@@ -3047,14 +3110,19 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *m
3047 if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U))) 3110 if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
3048 return -EIO; 3111 return -EIO;
3049 if (likely(bitmap[1] & FATTR4_WORD1_MODE)) { 3112 if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
3050 READ_BUF(4); 3113 p = xdr_inline_decode(xdr, 4);
3051 READ32(tmp); 3114 if (unlikely(!p))
3115 goto out_overflow;
3116 tmp = be32_to_cpup(p);
3052 *mode = tmp & ~S_IFMT; 3117 *mode = tmp & ~S_IFMT;
3053 bitmap[1] &= ~FATTR4_WORD1_MODE; 3118 bitmap[1] &= ~FATTR4_WORD1_MODE;
3054 ret = NFS_ATTR_FATTR_MODE; 3119 ret = NFS_ATTR_FATTR_MODE;
3055 } 3120 }
3056 dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode); 3121 dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
3057 return ret; 3122 return ret;
3123out_overflow:
3124 print_overflow_msg(__func__, xdr);
3125 return -EIO;
3058} 3126}
3059 3127
3060static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink) 3128static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
@@ -3066,16 +3134,22 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
3066 if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U))) 3134 if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
3067 return -EIO; 3135 return -EIO;
3068 if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) { 3136 if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
3069 READ_BUF(4); 3137 p = xdr_inline_decode(xdr, 4);
3070 READ32(*nlink); 3138 if (unlikely(!p))
3139 goto out_overflow;
3140 *nlink = be32_to_cpup(p);
3071 bitmap[1] &= ~FATTR4_WORD1_NUMLINKS; 3141 bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
3072 ret = NFS_ATTR_FATTR_NLINK; 3142 ret = NFS_ATTR_FATTR_NLINK;
3073 } 3143 }
3074 dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink); 3144 dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
3075 return ret; 3145 return ret;
3146out_overflow:
3147 print_overflow_msg(__func__, xdr);
3148 return -EIO;
3076} 3149}
3077 3150
3078static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *uid) 3151static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
3152 struct nfs_client *clp, uint32_t *uid, int may_sleep)
3079{ 3153{
3080 uint32_t len; 3154 uint32_t len;
3081 __be32 *p; 3155 __be32 *p;
@@ -3085,10 +3159,16 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
3085 if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) 3159 if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
3086 return -EIO; 3160 return -EIO;
3087 if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) { 3161 if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
3088 READ_BUF(4); 3162 p = xdr_inline_decode(xdr, 4);
3089 READ32(len); 3163 if (unlikely(!p))
3090 READ_BUF(len); 3164 goto out_overflow;
3091 if (len < XDR_MAX_NETOBJ) { 3165 len = be32_to_cpup(p);
3166 p = xdr_inline_decode(xdr, len);
3167 if (unlikely(!p))
3168 goto out_overflow;
3169 if (!may_sleep) {
3170 /* do nothing */
3171 } else if (len < XDR_MAX_NETOBJ) {
3092 if (nfs_map_name_to_uid(clp, (char *)p, len, uid) == 0) 3172 if (nfs_map_name_to_uid(clp, (char *)p, len, uid) == 0)
3093 ret = NFS_ATTR_FATTR_OWNER; 3173 ret = NFS_ATTR_FATTR_OWNER;
3094 else 3174 else
@@ -3101,9 +3181,13 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
3101 } 3181 }
3102 dprintk("%s: uid=%d\n", __func__, (int)*uid); 3182 dprintk("%s: uid=%d\n", __func__, (int)*uid);
3103 return ret; 3183 return ret;
3184out_overflow:
3185 print_overflow_msg(__func__, xdr);
3186 return -EIO;
3104} 3187}
3105 3188
3106static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_client *clp, uint32_t *gid) 3189static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
3190 struct nfs_client *clp, uint32_t *gid, int may_sleep)
3107{ 3191{
3108 uint32_t len; 3192 uint32_t len;
3109 __be32 *p; 3193 __be32 *p;
@@ -3113,10 +3197,16 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
3113 if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) 3197 if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
3114 return -EIO; 3198 return -EIO;
3115 if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { 3199 if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
3116 READ_BUF(4); 3200 p = xdr_inline_decode(xdr, 4);
3117 READ32(len); 3201 if (unlikely(!p))
3118 READ_BUF(len); 3202 goto out_overflow;
3119 if (len < XDR_MAX_NETOBJ) { 3203 len = be32_to_cpup(p);
3204 p = xdr_inline_decode(xdr, len);
3205 if (unlikely(!p))
3206 goto out_overflow;
3207 if (!may_sleep) {
3208 /* do nothing */
3209 } else if (len < XDR_MAX_NETOBJ) {
3120 if (nfs_map_group_to_gid(clp, (char *)p, len, gid) == 0) 3210 if (nfs_map_group_to_gid(clp, (char *)p, len, gid) == 0)
3121 ret = NFS_ATTR_FATTR_GROUP; 3211 ret = NFS_ATTR_FATTR_GROUP;
3122 else 3212 else
@@ -3129,6 +3219,9 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
3129 } 3219 }
3130 dprintk("%s: gid=%d\n", __func__, (int)*gid); 3220 dprintk("%s: gid=%d\n", __func__, (int)*gid);
3131 return ret; 3221 return ret;
3222out_overflow:
3223 print_overflow_msg(__func__, xdr);
3224 return -EIO;
3132} 3225}
3133 3226
3134static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev) 3227static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
@@ -3143,9 +3236,11 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
3143 if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) { 3236 if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
3144 dev_t tmp; 3237 dev_t tmp;
3145 3238
3146 READ_BUF(8); 3239 p = xdr_inline_decode(xdr, 8);
3147 READ32(major); 3240 if (unlikely(!p))
3148 READ32(minor); 3241 goto out_overflow;
3242 major = be32_to_cpup(p++);
3243 minor = be32_to_cpup(p);
3149 tmp = MKDEV(major, minor); 3244 tmp = MKDEV(major, minor);
3150 if (MAJOR(tmp) == major && MINOR(tmp) == minor) 3245 if (MAJOR(tmp) == major && MINOR(tmp) == minor)
3151 *rdev = tmp; 3246 *rdev = tmp;
@@ -3154,6 +3249,9 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
3154 } 3249 }
3155 dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor); 3250 dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
3156 return ret; 3251 return ret;
3252out_overflow:
3253 print_overflow_msg(__func__, xdr);
3254 return -EIO;
3157} 3255}
3158 3256
3159static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 3257static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3165,12 +3263,17 @@ static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
3165 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U))) 3263 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
3166 return -EIO; 3264 return -EIO;
3167 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) { 3265 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
3168 READ_BUF(8); 3266 p = xdr_inline_decode(xdr, 8);
3169 READ64(*res); 3267 if (unlikely(!p))
3268 goto out_overflow;
3269 xdr_decode_hyper(p, res);
3170 bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL; 3270 bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
3171 } 3271 }
3172 dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res); 3272 dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
3173 return status; 3273 return status;
3274out_overflow:
3275 print_overflow_msg(__func__, xdr);
3276 return -EIO;
3174} 3277}
3175 3278
3176static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 3279static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3182,12 +3285,17 @@ static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
3182 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U))) 3285 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
3183 return -EIO; 3286 return -EIO;
3184 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) { 3287 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
3185 READ_BUF(8); 3288 p = xdr_inline_decode(xdr, 8);
3186 READ64(*res); 3289 if (unlikely(!p))
3290 goto out_overflow;
3291 xdr_decode_hyper(p, res);
3187 bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE; 3292 bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
3188 } 3293 }
3189 dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res); 3294 dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
3190 return status; 3295 return status;
3296out_overflow:
3297 print_overflow_msg(__func__, xdr);
3298 return -EIO;
3191} 3299}
3192 3300
3193static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) 3301static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
@@ -3199,12 +3307,17 @@ static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
3199 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U))) 3307 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
3200 return -EIO; 3308 return -EIO;
3201 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) { 3309 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
3202 READ_BUF(8); 3310 p = xdr_inline_decode(xdr, 8);
3203 READ64(*res); 3311 if (unlikely(!p))
3312 goto out_overflow;
3313 xdr_decode_hyper(p, res);
3204 bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL; 3314 bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
3205 } 3315 }
3206 dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res); 3316 dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
3207 return status; 3317 return status;
3318out_overflow:
3319 print_overflow_msg(__func__, xdr);
3320 return -EIO;
3208} 3321}
3209 3322
3210static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used) 3323static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
@@ -3216,14 +3329,19 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
3216 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U))) 3329 if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
3217 return -EIO; 3330 return -EIO;
3218 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) { 3331 if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
3219 READ_BUF(8); 3332 p = xdr_inline_decode(xdr, 8);
3220 READ64(*used); 3333 if (unlikely(!p))
3334 goto out_overflow;
3335 xdr_decode_hyper(p, used);
3221 bitmap[1] &= ~FATTR4_WORD1_SPACE_USED; 3336 bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
3222 ret = NFS_ATTR_FATTR_SPACE_USED; 3337 ret = NFS_ATTR_FATTR_SPACE_USED;
3223 } 3338 }
3224 dprintk("%s: space used=%Lu\n", __func__, 3339 dprintk("%s: space used=%Lu\n", __func__,
3225 (unsigned long long)*used); 3340 (unsigned long long)*used);
3226 return ret; 3341 return ret;
3342out_overflow:
3343 print_overflow_msg(__func__, xdr);
3344 return -EIO;
3227} 3345}
3228 3346
3229static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time) 3347static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
@@ -3232,12 +3350,17 @@ static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
3232 uint64_t sec; 3350 uint64_t sec;
3233 uint32_t nsec; 3351 uint32_t nsec;
3234 3352
3235 READ_BUF(12); 3353 p = xdr_inline_decode(xdr, 12);
3236 READ64(sec); 3354 if (unlikely(!p))
3237 READ32(nsec); 3355 goto out_overflow;
3356 p = xdr_decode_hyper(p, &sec);
3357 nsec = be32_to_cpup(p);
3238 time->tv_sec = (time_t)sec; 3358 time->tv_sec = (time_t)sec;
3239 time->tv_nsec = (long)nsec; 3359 time->tv_nsec = (long)nsec;
3240 return 0; 3360 return 0;
3361out_overflow:
3362 print_overflow_msg(__func__, xdr);
3363 return -EIO;
3241} 3364}
3242 3365
3243static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) 3366static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
@@ -3315,11 +3438,16 @@ static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *c
3315{ 3438{
3316 __be32 *p; 3439 __be32 *p;
3317 3440
3318 READ_BUF(20); 3441 p = xdr_inline_decode(xdr, 20);
3319 READ32(cinfo->atomic); 3442 if (unlikely(!p))
3320 READ64(cinfo->before); 3443 goto out_overflow;
3321 READ64(cinfo->after); 3444 cinfo->atomic = be32_to_cpup(p++);
3445 p = xdr_decode_hyper(p, &cinfo->before);
3446 xdr_decode_hyper(p, &cinfo->after);
3322 return 0; 3447 return 0;
3448out_overflow:
3449 print_overflow_msg(__func__, xdr);
3450 return -EIO;
3323} 3451}
3324 3452
3325static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access) 3453static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
@@ -3331,40 +3459,62 @@ static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
3331 status = decode_op_hdr(xdr, OP_ACCESS); 3459 status = decode_op_hdr(xdr, OP_ACCESS);
3332 if (status) 3460 if (status)
3333 return status; 3461 return status;
3334 READ_BUF(8); 3462 p = xdr_inline_decode(xdr, 8);
3335 READ32(supp); 3463 if (unlikely(!p))
3336 READ32(acc); 3464 goto out_overflow;
3465 supp = be32_to_cpup(p++);
3466 acc = be32_to_cpup(p);
3337 access->supported = supp; 3467 access->supported = supp;
3338 access->access = acc; 3468 access->access = acc;
3339 return 0; 3469 return 0;
3470out_overflow:
3471 print_overflow_msg(__func__, xdr);
3472 return -EIO;
3340} 3473}
3341 3474
3342static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) 3475static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
3343{ 3476{
3344 __be32 *p; 3477 __be32 *p;
3478
3479 p = xdr_inline_decode(xdr, len);
3480 if (likely(p)) {
3481 memcpy(buf, p, len);
3482 return 0;
3483 }
3484 print_overflow_msg(__func__, xdr);
3485 return -EIO;
3486}
3487
3488static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
3489{
3490 return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
3491}
3492
3493static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
3494{
3345 int status; 3495 int status;
3346 3496
3347 status = decode_op_hdr(xdr, OP_CLOSE); 3497 status = decode_op_hdr(xdr, OP_CLOSE);
3348 if (status != -EIO) 3498 if (status != -EIO)
3349 nfs_increment_open_seqid(status, res->seqid); 3499 nfs_increment_open_seqid(status, res->seqid);
3350 if (status) 3500 if (!status)
3351 return status; 3501 status = decode_stateid(xdr, &res->stateid);
3352 READ_BUF(NFS4_STATEID_SIZE); 3502 return status;
3353 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); 3503}
3354 return 0; 3504
3505static int decode_verifier(struct xdr_stream *xdr, void *verifier)
3506{
3507 return decode_opaque_fixed(xdr, verifier, 8);
3355} 3508}
3356 3509
3357static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res) 3510static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
3358{ 3511{
3359 __be32 *p;
3360 int status; 3512 int status;
3361 3513
3362 status = decode_op_hdr(xdr, OP_COMMIT); 3514 status = decode_op_hdr(xdr, OP_COMMIT);
3363 if (status) 3515 if (!status)
3364 return status; 3516 status = decode_verifier(xdr, res->verf->verifier);
3365 READ_BUF(8); 3517 return status;
3366 COPYMEM(res->verf->verifier, 8);
3367 return 0;
3368} 3518}
3369 3519
3370static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) 3520static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3378,10 +3528,16 @@ static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
3378 return status; 3528 return status;
3379 if ((status = decode_change_info(xdr, cinfo))) 3529 if ((status = decode_change_info(xdr, cinfo)))
3380 return status; 3530 return status;
3381 READ_BUF(4); 3531 p = xdr_inline_decode(xdr, 4);
3382 READ32(bmlen); 3532 if (unlikely(!p))
3383 READ_BUF(bmlen << 2); 3533 goto out_overflow;
3384 return 0; 3534 bmlen = be32_to_cpup(p);
3535 p = xdr_inline_decode(xdr, bmlen << 2);
3536 if (likely(p))
3537 return 0;
3538out_overflow:
3539 print_overflow_msg(__func__, xdr);
3540 return -EIO;
3385} 3541}
3386 3542
3387static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res) 3543static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
@@ -3466,7 +3622,8 @@ xdr_error:
3466 return status; 3622 return status;
3467} 3623}
3468 3624
3469static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server) 3625static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
3626 const struct nfs_server *server, int may_sleep)
3470{ 3627{
3471 __be32 *savep; 3628 __be32 *savep;
3472 uint32_t attrlen, 3629 uint32_t attrlen,
@@ -3538,12 +3695,14 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
3538 goto xdr_error; 3695 goto xdr_error;
3539 fattr->valid |= status; 3696 fattr->valid |= status;
3540 3697
3541 status = decode_attr_owner(xdr, bitmap, server->nfs_client, &fattr->uid); 3698 status = decode_attr_owner(xdr, bitmap, server->nfs_client,
3699 &fattr->uid, may_sleep);
3542 if (status < 0) 3700 if (status < 0)
3543 goto xdr_error; 3701 goto xdr_error;
3544 fattr->valid |= status; 3702 fattr->valid |= status;
3545 3703
3546 status = decode_attr_group(xdr, bitmap, server->nfs_client, &fattr->gid); 3704 status = decode_attr_group(xdr, bitmap, server->nfs_client,
3705 &fattr->gid, may_sleep);
3547 if (status < 0) 3706 if (status < 0)
3548 goto xdr_error; 3707 goto xdr_error;
3549 fattr->valid |= status; 3708 fattr->valid |= status;
@@ -3633,14 +3792,21 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
3633 if (status) 3792 if (status)
3634 return status; 3793 return status;
3635 3794
3636 READ_BUF(4); 3795 p = xdr_inline_decode(xdr, 4);
3637 READ32(len); 3796 if (unlikely(!p))
3797 goto out_overflow;
3798 len = be32_to_cpup(p);
3638 if (len > NFS4_FHSIZE) 3799 if (len > NFS4_FHSIZE)
3639 return -EIO; 3800 return -EIO;
3640 fh->size = len; 3801 fh->size = len;
3641 READ_BUF(len); 3802 p = xdr_inline_decode(xdr, len);
3642 COPYMEM(fh->data, len); 3803 if (unlikely(!p))
3804 goto out_overflow;
3805 memcpy(fh->data, p, len);
3643 return 0; 3806 return 0;
3807out_overflow:
3808 print_overflow_msg(__func__, xdr);
3809 return -EIO;
3644} 3810}
3645 3811
3646static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) 3812static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -3662,10 +3828,12 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
3662 __be32 *p; 3828 __be32 *p;
3663 uint32_t namelen, type; 3829 uint32_t namelen, type;
3664 3830
3665 READ_BUF(32); 3831 p = xdr_inline_decode(xdr, 32);
3666 READ64(offset); 3832 if (unlikely(!p))
3667 READ64(length); 3833 goto out_overflow;
3668 READ32(type); 3834 p = xdr_decode_hyper(p, &offset);
3835 p = xdr_decode_hyper(p, &length);
3836 type = be32_to_cpup(p++);
3669 if (fl != NULL) { 3837 if (fl != NULL) {
3670 fl->fl_start = (loff_t)offset; 3838 fl->fl_start = (loff_t)offset;
3671 fl->fl_end = fl->fl_start + (loff_t)length - 1; 3839 fl->fl_end = fl->fl_start + (loff_t)length - 1;
@@ -3676,23 +3844,27 @@ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
3676 fl->fl_type = F_RDLCK; 3844 fl->fl_type = F_RDLCK;
3677 fl->fl_pid = 0; 3845 fl->fl_pid = 0;
3678 } 3846 }
3679 READ64(clientid); 3847 p = xdr_decode_hyper(p, &clientid);
3680 READ32(namelen); 3848 namelen = be32_to_cpup(p);
3681 READ_BUF(namelen); 3849 p = xdr_inline_decode(xdr, namelen);
3682 return -NFS4ERR_DENIED; 3850 if (likely(p))
3851 return -NFS4ERR_DENIED;
3852out_overflow:
3853 print_overflow_msg(__func__, xdr);
3854 return -EIO;
3683} 3855}
3684 3856
3685static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res) 3857static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
3686{ 3858{
3687 __be32 *p;
3688 int status; 3859 int status;
3689 3860
3690 status = decode_op_hdr(xdr, OP_LOCK); 3861 status = decode_op_hdr(xdr, OP_LOCK);
3691 if (status == -EIO) 3862 if (status == -EIO)
3692 goto out; 3863 goto out;
3693 if (status == 0) { 3864 if (status == 0) {
3694 READ_BUF(NFS4_STATEID_SIZE); 3865 status = decode_stateid(xdr, &res->stateid);
3695 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); 3866 if (unlikely(status))
3867 goto out;
3696 } else if (status == -NFS4ERR_DENIED) 3868 } else if (status == -NFS4ERR_DENIED)
3697 status = decode_lock_denied(xdr, NULL); 3869 status = decode_lock_denied(xdr, NULL);
3698 if (res->open_seqid != NULL) 3870 if (res->open_seqid != NULL)
@@ -3713,16 +3885,13 @@ static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
3713 3885
3714static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res) 3886static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
3715{ 3887{
3716 __be32 *p;
3717 int status; 3888 int status;
3718 3889
3719 status = decode_op_hdr(xdr, OP_LOCKU); 3890 status = decode_op_hdr(xdr, OP_LOCKU);
3720 if (status != -EIO) 3891 if (status != -EIO)
3721 nfs_increment_lock_seqid(status, res->seqid); 3892 nfs_increment_lock_seqid(status, res->seqid);
3722 if (status == 0) { 3893 if (status == 0)
3723 READ_BUF(NFS4_STATEID_SIZE); 3894 status = decode_stateid(xdr, &res->stateid);
3724 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3725 }
3726 return status; 3895 return status;
3727} 3896}
3728 3897
@@ -3737,34 +3906,46 @@ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
3737 __be32 *p; 3906 __be32 *p;
3738 uint32_t limit_type, nblocks, blocksize; 3907 uint32_t limit_type, nblocks, blocksize;
3739 3908
3740 READ_BUF(12); 3909 p = xdr_inline_decode(xdr, 12);
3741 READ32(limit_type); 3910 if (unlikely(!p))
3911 goto out_overflow;
3912 limit_type = be32_to_cpup(p++);
3742 switch (limit_type) { 3913 switch (limit_type) {
3743 case 1: 3914 case 1:
3744 READ64(*maxsize); 3915 xdr_decode_hyper(p, maxsize);
3745 break; 3916 break;
3746 case 2: 3917 case 2:
3747 READ32(nblocks); 3918 nblocks = be32_to_cpup(p++);
3748 READ32(blocksize); 3919 blocksize = be32_to_cpup(p);
3749 *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; 3920 *maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
3750 } 3921 }
3751 return 0; 3922 return 0;
3923out_overflow:
3924 print_overflow_msg(__func__, xdr);
3925 return -EIO;
3752} 3926}
3753 3927
3754static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) 3928static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
3755{ 3929{
3756 __be32 *p; 3930 __be32 *p;
3757 uint32_t delegation_type; 3931 uint32_t delegation_type;
3932 int status;
3758 3933
3759 READ_BUF(4); 3934 p = xdr_inline_decode(xdr, 4);
3760 READ32(delegation_type); 3935 if (unlikely(!p))
3936 goto out_overflow;
3937 delegation_type = be32_to_cpup(p);
3761 if (delegation_type == NFS4_OPEN_DELEGATE_NONE) { 3938 if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
3762 res->delegation_type = 0; 3939 res->delegation_type = 0;
3763 return 0; 3940 return 0;
3764 } 3941 }
3765 READ_BUF(NFS4_STATEID_SIZE+4); 3942 status = decode_stateid(xdr, &res->delegation);
3766 COPYMEM(res->delegation.data, NFS4_STATEID_SIZE); 3943 if (unlikely(status))
3767 READ32(res->do_recall); 3944 return status;
3945 p = xdr_inline_decode(xdr, 4);
3946 if (unlikely(!p))
3947 goto out_overflow;
3948 res->do_recall = be32_to_cpup(p);
3768 3949
3769 switch (delegation_type) { 3950 switch (delegation_type) {
3770 case NFS4_OPEN_DELEGATE_READ: 3951 case NFS4_OPEN_DELEGATE_READ:
@@ -3776,6 +3957,9 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
3776 return -EIO; 3957 return -EIO;
3777 } 3958 }
3778 return decode_ace(xdr, NULL, res->server->nfs_client); 3959 return decode_ace(xdr, NULL, res->server->nfs_client);
3960out_overflow:
3961 print_overflow_msg(__func__, xdr);
3962 return -EIO;
3779} 3963}
3780 3964
3781static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) 3965static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
@@ -3787,23 +3971,27 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
3787 status = decode_op_hdr(xdr, OP_OPEN); 3971 status = decode_op_hdr(xdr, OP_OPEN);
3788 if (status != -EIO) 3972 if (status != -EIO)
3789 nfs_increment_open_seqid(status, res->seqid); 3973 nfs_increment_open_seqid(status, res->seqid);
3790 if (status) 3974 if (!status)
3975 status = decode_stateid(xdr, &res->stateid);
3976 if (unlikely(status))
3791 return status; 3977 return status;
3792 READ_BUF(NFS4_STATEID_SIZE);
3793 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3794 3978
3795 decode_change_info(xdr, &res->cinfo); 3979 decode_change_info(xdr, &res->cinfo);
3796 3980
3797 READ_BUF(8); 3981 p = xdr_inline_decode(xdr, 8);
3798 READ32(res->rflags); 3982 if (unlikely(!p))
3799 READ32(bmlen); 3983 goto out_overflow;
3984 res->rflags = be32_to_cpup(p++);
3985 bmlen = be32_to_cpup(p);
3800 if (bmlen > 10) 3986 if (bmlen > 10)
3801 goto xdr_error; 3987 goto xdr_error;
3802 3988
3803 READ_BUF(bmlen << 2); 3989 p = xdr_inline_decode(xdr, bmlen << 2);
3990 if (unlikely(!p))
3991 goto out_overflow;
3804 savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); 3992 savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
3805 for (i = 0; i < savewords; ++i) 3993 for (i = 0; i < savewords; ++i)
3806 READ32(res->attrset[i]); 3994 res->attrset[i] = be32_to_cpup(p++);
3807 for (; i < NFS4_BITMAP_SIZE; i++) 3995 for (; i < NFS4_BITMAP_SIZE; i++)
3808 res->attrset[i] = 0; 3996 res->attrset[i] = 0;
3809 3997
@@ -3811,36 +3999,33 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
3811xdr_error: 3999xdr_error:
3812 dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen); 4000 dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
3813 return -EIO; 4001 return -EIO;
4002out_overflow:
4003 print_overflow_msg(__func__, xdr);
4004 return -EIO;
3814} 4005}
3815 4006
3816static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res) 4007static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
3817{ 4008{
3818 __be32 *p;
3819 int status; 4009 int status;
3820 4010
3821 status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); 4011 status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
3822 if (status != -EIO) 4012 if (status != -EIO)
3823 nfs_increment_open_seqid(status, res->seqid); 4013 nfs_increment_open_seqid(status, res->seqid);
3824 if (status) 4014 if (!status)
3825 return status; 4015 status = decode_stateid(xdr, &res->stateid);
3826 READ_BUF(NFS4_STATEID_SIZE); 4016 return status;
3827 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3828 return 0;
3829} 4017}
3830 4018
3831static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res) 4019static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
3832{ 4020{
3833 __be32 *p;
3834 int status; 4021 int status;
3835 4022
3836 status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); 4023 status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
3837 if (status != -EIO) 4024 if (status != -EIO)
3838 nfs_increment_open_seqid(status, res->seqid); 4025 nfs_increment_open_seqid(status, res->seqid);
3839 if (status) 4026 if (!status)
3840 return status; 4027 status = decode_stateid(xdr, &res->stateid);
3841 READ_BUF(NFS4_STATEID_SIZE); 4028 return status;
3842 COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
3843 return 0;
3844} 4029}
3845 4030
3846static int decode_putfh(struct xdr_stream *xdr) 4031static int decode_putfh(struct xdr_stream *xdr)
@@ -3863,9 +4048,11 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
3863 status = decode_op_hdr(xdr, OP_READ); 4048 status = decode_op_hdr(xdr, OP_READ);
3864 if (status) 4049 if (status)
3865 return status; 4050 return status;
3866 READ_BUF(8); 4051 p = xdr_inline_decode(xdr, 8);
3867 READ32(eof); 4052 if (unlikely(!p))
3868 READ32(count); 4053 goto out_overflow;
4054 eof = be32_to_cpup(p++);
4055 count = be32_to_cpup(p);
3869 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 4056 hdrlen = (u8 *) p - (u8 *) iov->iov_base;
3870 recvd = req->rq_rcv_buf.len - hdrlen; 4057 recvd = req->rq_rcv_buf.len - hdrlen;
3871 if (count > recvd) { 4058 if (count > recvd) {
@@ -3878,6 +4065,9 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
3878 res->eof = eof; 4065 res->eof = eof;
3879 res->count = count; 4066 res->count = count;
3880 return 0; 4067 return 0;
4068out_overflow:
4069 print_overflow_msg(__func__, xdr);
4070 return -EIO;
3881} 4071}
3882 4072
3883static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir) 4073static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
@@ -3892,17 +4082,17 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
3892 int status; 4082 int status;
3893 4083
3894 status = decode_op_hdr(xdr, OP_READDIR); 4084 status = decode_op_hdr(xdr, OP_READDIR);
3895 if (status) 4085 if (!status)
4086 status = decode_verifier(xdr, readdir->verifier.data);
4087 if (unlikely(status))
3896 return status; 4088 return status;
3897 READ_BUF(8);
3898 COPYMEM(readdir->verifier.data, 8);
3899 dprintk("%s: verifier = %08x:%08x\n", 4089 dprintk("%s: verifier = %08x:%08x\n",
3900 __func__, 4090 __func__,
3901 ((u32 *)readdir->verifier.data)[0], 4091 ((u32 *)readdir->verifier.data)[0],
3902 ((u32 *)readdir->verifier.data)[1]); 4092 ((u32 *)readdir->verifier.data)[1]);
3903 4093
3904 4094
3905 hdrlen = (char *) p - (char *) iov->iov_base; 4095 hdrlen = (char *) xdr->p - (char *) iov->iov_base;
3906 recvd = rcvbuf->len - hdrlen; 4096 recvd = rcvbuf->len - hdrlen;
3907 if (pglen > recvd) 4097 if (pglen > recvd)
3908 pglen = recvd; 4098 pglen = recvd;
@@ -3990,8 +4180,10 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
3990 return status; 4180 return status;
3991 4181
3992 /* Convert length of symlink */ 4182 /* Convert length of symlink */
3993 READ_BUF(4); 4183 p = xdr_inline_decode(xdr, 4);
3994 READ32(len); 4184 if (unlikely(!p))
4185 goto out_overflow;
4186 len = be32_to_cpup(p);
3995 if (len >= rcvbuf->page_len || len <= 0) { 4187 if (len >= rcvbuf->page_len || len <= 0) {
3996 dprintk("nfs: server returned giant symlink!\n"); 4188 dprintk("nfs: server returned giant symlink!\n");
3997 return -ENAMETOOLONG; 4189 return -ENAMETOOLONG;
@@ -4015,6 +4207,9 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
4015 kaddr[len+rcvbuf->page_base] = '\0'; 4207 kaddr[len+rcvbuf->page_base] = '\0';
4016 kunmap_atomic(kaddr, KM_USER0); 4208 kunmap_atomic(kaddr, KM_USER0);
4017 return 0; 4209 return 0;
4210out_overflow:
4211 print_overflow_msg(__func__, xdr);
4212 return -EIO;
4018} 4213}
4019 4214
4020static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) 4215static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
@@ -4112,10 +4307,16 @@ static int decode_setattr(struct xdr_stream *xdr)
4112 status = decode_op_hdr(xdr, OP_SETATTR); 4307 status = decode_op_hdr(xdr, OP_SETATTR);
4113 if (status) 4308 if (status)
4114 return status; 4309 return status;
4115 READ_BUF(4); 4310 p = xdr_inline_decode(xdr, 4);
4116 READ32(bmlen); 4311 if (unlikely(!p))
4117 READ_BUF(bmlen << 2); 4312 goto out_overflow;
4118 return 0; 4313 bmlen = be32_to_cpup(p);
4314 p = xdr_inline_decode(xdr, bmlen << 2);
4315 if (likely(p))
4316 return 0;
4317out_overflow:
4318 print_overflow_msg(__func__, xdr);
4319 return -EIO;
4119} 4320}
4120 4321
4121static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp) 4322static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
@@ -4124,35 +4325,50 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
4124 uint32_t opnum; 4325 uint32_t opnum;
4125 int32_t nfserr; 4326 int32_t nfserr;
4126 4327
4127 READ_BUF(8); 4328 p = xdr_inline_decode(xdr, 8);
4128 READ32(opnum); 4329 if (unlikely(!p))
4330 goto out_overflow;
4331 opnum = be32_to_cpup(p++);
4129 if (opnum != OP_SETCLIENTID) { 4332 if (opnum != OP_SETCLIENTID) {
4130 dprintk("nfs: decode_setclientid: Server returned operation" 4333 dprintk("nfs: decode_setclientid: Server returned operation"
4131 " %d\n", opnum); 4334 " %d\n", opnum);
4132 return -EIO; 4335 return -EIO;
4133 } 4336 }
4134 READ32(nfserr); 4337 nfserr = be32_to_cpup(p);
4135 if (nfserr == NFS_OK) { 4338 if (nfserr == NFS_OK) {
4136 READ_BUF(8 + NFS4_VERIFIER_SIZE); 4339 p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
4137 READ64(clp->cl_clientid); 4340 if (unlikely(!p))
4138 COPYMEM(clp->cl_confirm.data, NFS4_VERIFIER_SIZE); 4341 goto out_overflow;
4342 p = xdr_decode_hyper(p, &clp->cl_clientid);
4343 memcpy(clp->cl_confirm.data, p, NFS4_VERIFIER_SIZE);
4139 } else if (nfserr == NFSERR_CLID_INUSE) { 4344 } else if (nfserr == NFSERR_CLID_INUSE) {
4140 uint32_t len; 4345 uint32_t len;
4141 4346
4142 /* skip netid string */ 4347 /* skip netid string */
4143 READ_BUF(4); 4348 p = xdr_inline_decode(xdr, 4);
4144 READ32(len); 4349 if (unlikely(!p))
4145 READ_BUF(len); 4350 goto out_overflow;
4351 len = be32_to_cpup(p);
4352 p = xdr_inline_decode(xdr, len);
4353 if (unlikely(!p))
4354 goto out_overflow;
4146 4355
4147 /* skip uaddr string */ 4356 /* skip uaddr string */
4148 READ_BUF(4); 4357 p = xdr_inline_decode(xdr, 4);
4149 READ32(len); 4358 if (unlikely(!p))
4150 READ_BUF(len); 4359 goto out_overflow;
4360 len = be32_to_cpup(p);
4361 p = xdr_inline_decode(xdr, len);
4362 if (unlikely(!p))
4363 goto out_overflow;
4151 return -NFSERR_CLID_INUSE; 4364 return -NFSERR_CLID_INUSE;
4152 } else 4365 } else
4153 return nfs4_stat_to_errno(nfserr); 4366 return nfs4_stat_to_errno(nfserr);
4154 4367
4155 return 0; 4368 return 0;
4369out_overflow:
4370 print_overflow_msg(__func__, xdr);
4371 return -EIO;
4156} 4372}
4157 4373
4158static int decode_setclientid_confirm(struct xdr_stream *xdr) 4374static int decode_setclientid_confirm(struct xdr_stream *xdr)
@@ -4169,11 +4385,16 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
4169 if (status) 4385 if (status)
4170 return status; 4386 return status;
4171 4387
4172 READ_BUF(16); 4388 p = xdr_inline_decode(xdr, 16);
4173 READ32(res->count); 4389 if (unlikely(!p))
4174 READ32(res->verf->committed); 4390 goto out_overflow;
4175 COPYMEM(res->verf->verifier, 8); 4391 res->count = be32_to_cpup(p++);
4392 res->verf->committed = be32_to_cpup(p++);
4393 memcpy(res->verf->verifier, p, 8);
4176 return 0; 4394 return 0;
4395out_overflow:
4396 print_overflow_msg(__func__, xdr);
4397 return -EIO;
4177} 4398}
4178 4399
4179static int decode_delegreturn(struct xdr_stream *xdr) 4400static int decode_delegreturn(struct xdr_stream *xdr)
@@ -4187,6 +4408,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
4187{ 4408{
4188 __be32 *p; 4409 __be32 *p;
4189 uint32_t dummy; 4410 uint32_t dummy;
4411 char *dummy_str;
4190 int status; 4412 int status;
4191 struct nfs_client *clp = res->client; 4413 struct nfs_client *clp = res->client;
4192 4414
@@ -4194,36 +4416,45 @@ static int decode_exchange_id(struct xdr_stream *xdr,
4194 if (status) 4416 if (status)
4195 return status; 4417 return status;
4196 4418
4197 READ_BUF(8); 4419 p = xdr_inline_decode(xdr, 8);
4198 READ64(clp->cl_ex_clid); 4420 if (unlikely(!p))
4199 READ_BUF(12); 4421 goto out_overflow;
4200 READ32(clp->cl_seqid); 4422 xdr_decode_hyper(p, &clp->cl_ex_clid);
4201 READ32(clp->cl_exchange_flags); 4423 p = xdr_inline_decode(xdr, 12);
4424 if (unlikely(!p))
4425 goto out_overflow;
4426 clp->cl_seqid = be32_to_cpup(p++);
4427 clp->cl_exchange_flags = be32_to_cpup(p++);
4202 4428
4203 /* We ask for SP4_NONE */ 4429 /* We ask for SP4_NONE */
4204 READ32(dummy); 4430 dummy = be32_to_cpup(p);
4205 if (dummy != SP4_NONE) 4431 if (dummy != SP4_NONE)
4206 return -EIO; 4432 return -EIO;
4207 4433
4208 /* Throw away minor_id */ 4434 /* Throw away minor_id */
4209 READ_BUF(8); 4435 p = xdr_inline_decode(xdr, 8);
4436 if (unlikely(!p))
4437 goto out_overflow;
4210 4438
4211 /* Throw away Major id */ 4439 /* Throw away Major id */
4212 READ_BUF(4); 4440 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
4213 READ32(dummy); 4441 if (unlikely(status))
4214 READ_BUF(dummy); 4442 return status;
4215 4443
4216 /* Throw away server_scope */ 4444 /* Throw away server_scope */
4217 READ_BUF(4); 4445 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
4218 READ32(dummy); 4446 if (unlikely(status))
4219 READ_BUF(dummy); 4447 return status;
4220 4448
4221 /* Throw away Implementation id array */ 4449 /* Throw away Implementation id array */
4222 READ_BUF(4); 4450 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
4223 READ32(dummy); 4451 if (unlikely(status))
4224 READ_BUF(dummy); 4452 return status;
4225 4453
4226 return 0; 4454 return 0;
4455out_overflow:
4456 print_overflow_msg(__func__, xdr);
4457 return -EIO;
4227} 4458}
4228 4459
4229static int decode_chan_attrs(struct xdr_stream *xdr, 4460static int decode_chan_attrs(struct xdr_stream *xdr,
@@ -4232,22 +4463,35 @@ static int decode_chan_attrs(struct xdr_stream *xdr,
4232 __be32 *p; 4463 __be32 *p;
4233 u32 nr_attrs; 4464 u32 nr_attrs;
4234 4465
4235 READ_BUF(28); 4466 p = xdr_inline_decode(xdr, 28);
4236 READ32(attrs->headerpadsz); 4467 if (unlikely(!p))
4237 READ32(attrs->max_rqst_sz); 4468 goto out_overflow;
4238 READ32(attrs->max_resp_sz); 4469 attrs->headerpadsz = be32_to_cpup(p++);
4239 READ32(attrs->max_resp_sz_cached); 4470 attrs->max_rqst_sz = be32_to_cpup(p++);
4240 READ32(attrs->max_ops); 4471 attrs->max_resp_sz = be32_to_cpup(p++);
4241 READ32(attrs->max_reqs); 4472 attrs->max_resp_sz_cached = be32_to_cpup(p++);
4242 READ32(nr_attrs); 4473 attrs->max_ops = be32_to_cpup(p++);
4474 attrs->max_reqs = be32_to_cpup(p++);
4475 nr_attrs = be32_to_cpup(p);
4243 if (unlikely(nr_attrs > 1)) { 4476 if (unlikely(nr_attrs > 1)) {
4244 printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n", 4477 printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
4245 __func__, nr_attrs); 4478 __func__, nr_attrs);
4246 return -EINVAL; 4479 return -EINVAL;
4247 } 4480 }
4248 if (nr_attrs == 1) 4481 if (nr_attrs == 1) {
4249 READ_BUF(4); /* skip rdma_attrs */ 4482 p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
4483 if (unlikely(!p))
4484 goto out_overflow;
4485 }
4250 return 0; 4486 return 0;
4487out_overflow:
4488 print_overflow_msg(__func__, xdr);
4489 return -EIO;
4490}
4491
4492static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
4493{
4494 return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
4251} 4495}
4252 4496
4253static int decode_create_session(struct xdr_stream *xdr, 4497static int decode_create_session(struct xdr_stream *xdr,
@@ -4259,24 +4503,26 @@ static int decode_create_session(struct xdr_stream *xdr,
4259 struct nfs4_session *session = clp->cl_session; 4503 struct nfs4_session *session = clp->cl_session;
4260 4504
4261 status = decode_op_hdr(xdr, OP_CREATE_SESSION); 4505 status = decode_op_hdr(xdr, OP_CREATE_SESSION);
4262 4506 if (!status)
4263 if (status) 4507 status = decode_sessionid(xdr, &session->sess_id);
4508 if (unlikely(status))
4264 return status; 4509 return status;
4265 4510
4266 /* sessionid */
4267 READ_BUF(NFS4_MAX_SESSIONID_LEN);
4268 COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
4269
4270 /* seqid, flags */ 4511 /* seqid, flags */
4271 READ_BUF(8); 4512 p = xdr_inline_decode(xdr, 8);
4272 READ32(clp->cl_seqid); 4513 if (unlikely(!p))
4273 READ32(session->flags); 4514 goto out_overflow;
4515 clp->cl_seqid = be32_to_cpup(p++);
4516 session->flags = be32_to_cpup(p);
4274 4517
4275 /* Channel attributes */ 4518 /* Channel attributes */
4276 status = decode_chan_attrs(xdr, &session->fc_attrs); 4519 status = decode_chan_attrs(xdr, &session->fc_attrs);
4277 if (!status) 4520 if (!status)
4278 status = decode_chan_attrs(xdr, &session->bc_attrs); 4521 status = decode_chan_attrs(xdr, &session->bc_attrs);
4279 return status; 4522 return status;
4523out_overflow:
4524 print_overflow_msg(__func__, xdr);
4525 return -EIO;
4280} 4526}
4281 4527
4282static int decode_destroy_session(struct xdr_stream *xdr, void *dummy) 4528static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
@@ -4300,7 +4546,9 @@ static int decode_sequence(struct xdr_stream *xdr,
4300 return 0; 4546 return 0;
4301 4547
4302 status = decode_op_hdr(xdr, OP_SEQUENCE); 4548 status = decode_op_hdr(xdr, OP_SEQUENCE);
4303 if (status) 4549 if (!status)
4550 status = decode_sessionid(xdr, &id);
4551 if (unlikely(status))
4304 goto out_err; 4552 goto out_err;
4305 4553
4306 /* 4554 /*
@@ -4309,36 +4557,43 @@ static int decode_sequence(struct xdr_stream *xdr,
4309 */ 4557 */
4310 status = -ESERVERFAULT; 4558 status = -ESERVERFAULT;
4311 4559
4312 slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
4313 READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
4314 COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
4315 if (memcmp(id.data, res->sr_session->sess_id.data, 4560 if (memcmp(id.data, res->sr_session->sess_id.data,
4316 NFS4_MAX_SESSIONID_LEN)) { 4561 NFS4_MAX_SESSIONID_LEN)) {
4317 dprintk("%s Invalid session id\n", __func__); 4562 dprintk("%s Invalid session id\n", __func__);
4318 goto out_err; 4563 goto out_err;
4319 } 4564 }
4565
4566 p = xdr_inline_decode(xdr, 20);
4567 if (unlikely(!p))
4568 goto out_overflow;
4569
4320 /* seqid */ 4570 /* seqid */
4321 READ32(dummy); 4571 slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
4572 dummy = be32_to_cpup(p++);
4322 if (dummy != slot->seq_nr) { 4573 if (dummy != slot->seq_nr) {
4323 dprintk("%s Invalid sequence number\n", __func__); 4574 dprintk("%s Invalid sequence number\n", __func__);
4324 goto out_err; 4575 goto out_err;
4325 } 4576 }
4326 /* slot id */ 4577 /* slot id */
4327 READ32(dummy); 4578 dummy = be32_to_cpup(p++);
4328 if (dummy != res->sr_slotid) { 4579 if (dummy != res->sr_slotid) {
4329 dprintk("%s Invalid slot id\n", __func__); 4580 dprintk("%s Invalid slot id\n", __func__);
4330 goto out_err; 4581 goto out_err;
4331 } 4582 }
4332 /* highest slot id - currently not processed */ 4583 /* highest slot id - currently not processed */
4333 READ32(dummy); 4584 dummy = be32_to_cpup(p++);
4334 /* target highest slot id - currently not processed */ 4585 /* target highest slot id - currently not processed */
4335 READ32(dummy); 4586 dummy = be32_to_cpup(p++);
4336 /* result flags - currently not processed */ 4587 /* result flags - currently not processed */
4337 READ32(dummy); 4588 dummy = be32_to_cpup(p);
4338 status = 0; 4589 status = 0;
4339out_err: 4590out_err:
4340 res->sr_status = status; 4591 res->sr_status = status;
4341 return status; 4592 return status;
4593out_overflow:
4594 print_overflow_msg(__func__, xdr);
4595 status = -EIO;
4596 goto out_err;
4342#else /* CONFIG_NFS_V4_1 */ 4597#else /* CONFIG_NFS_V4_1 */
4343 return 0; 4598 return 0;
4344#endif /* CONFIG_NFS_V4_1 */ 4599#endif /* CONFIG_NFS_V4_1 */
@@ -4370,7 +4625,8 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct
4370 status = decode_open_downgrade(&xdr, res); 4625 status = decode_open_downgrade(&xdr, res);
4371 if (status != 0) 4626 if (status != 0)
4372 goto out; 4627 goto out;
4373 decode_getfattr(&xdr, res->fattr, res->server); 4628 decode_getfattr(&xdr, res->fattr, res->server,
4629 !RPC_IS_ASYNC(rqstp->rq_task));
4374out: 4630out:
4375 return status; 4631 return status;
4376} 4632}
@@ -4397,7 +4653,8 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_ac
4397 status = decode_access(&xdr, res); 4653 status = decode_access(&xdr, res);
4398 if (status != 0) 4654 if (status != 0)
4399 goto out; 4655 goto out;
4400 decode_getfattr(&xdr, res->fattr, res->server); 4656 decode_getfattr(&xdr, res->fattr, res->server,
4657 !RPC_IS_ASYNC(rqstp->rq_task));
4401out: 4658out:
4402 return status; 4659 return status;
4403} 4660}
@@ -4424,7 +4681,8 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lo
4424 goto out; 4681 goto out;
4425 if ((status = decode_getfh(&xdr, res->fh)) != 0) 4682 if ((status = decode_getfh(&xdr, res->fh)) != 0)
4426 goto out; 4683 goto out;
4427 status = decode_getfattr(&xdr, res->fattr, res->server); 4684 status = decode_getfattr(&xdr, res->fattr, res->server
4685 ,!RPC_IS_ASYNC(rqstp->rq_task));
4428out: 4686out:
4429 return status; 4687 return status;
4430} 4688}
@@ -4448,7 +4706,8 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nf
4448 if ((status = decode_putrootfh(&xdr)) != 0) 4706 if ((status = decode_putrootfh(&xdr)) != 0)
4449 goto out; 4707 goto out;
4450 if ((status = decode_getfh(&xdr, res->fh)) == 0) 4708 if ((status = decode_getfh(&xdr, res->fh)) == 0)
4451 status = decode_getfattr(&xdr, res->fattr, res->server); 4709 status = decode_getfattr(&xdr, res->fattr, res->server,
4710 !RPC_IS_ASYNC(rqstp->rq_task));
4452out: 4711out:
4453 return status; 4712 return status;
4454} 4713}
@@ -4473,7 +4732,8 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
4473 goto out; 4732 goto out;
4474 if ((status = decode_remove(&xdr, &res->cinfo)) != 0) 4733 if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
4475 goto out; 4734 goto out;
4476 decode_getfattr(&xdr, &res->dir_attr, res->server); 4735 decode_getfattr(&xdr, &res->dir_attr, res->server,
4736 !RPC_IS_ASYNC(rqstp->rq_task));
4477out: 4737out:
4478 return status; 4738 return status;
4479} 4739}
@@ -4503,11 +4763,13 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re
4503 if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0) 4763 if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
4504 goto out; 4764 goto out;
4505 /* Current FH is target directory */ 4765 /* Current FH is target directory */
4506 if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0) 4766 if (decode_getfattr(&xdr, res->new_fattr, res->server,
4767 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
4507 goto out; 4768 goto out;
4508 if ((status = decode_restorefh(&xdr)) != 0) 4769 if ((status = decode_restorefh(&xdr)) != 0)
4509 goto out; 4770 goto out;
4510 decode_getfattr(&xdr, res->old_fattr, res->server); 4771 decode_getfattr(&xdr, res->old_fattr, res->server,
4772 !RPC_IS_ASYNC(rqstp->rq_task));
4511out: 4773out:
4512 return status; 4774 return status;
4513} 4775}
@@ -4540,11 +4802,13 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link
4540 * Note order: OP_LINK leaves the directory as the current 4802 * Note order: OP_LINK leaves the directory as the current
4541 * filehandle. 4803 * filehandle.
4542 */ 4804 */
4543 if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0) 4805 if (decode_getfattr(&xdr, res->dir_attr, res->server,
4806 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
4544 goto out; 4807 goto out;
4545 if ((status = decode_restorefh(&xdr)) != 0) 4808 if ((status = decode_restorefh(&xdr)) != 0)
4546 goto out; 4809 goto out;
4547 decode_getfattr(&xdr, res->fattr, res->server); 4810 decode_getfattr(&xdr, res->fattr, res->server,
4811 !RPC_IS_ASYNC(rqstp->rq_task));
4548out: 4812out:
4549 return status; 4813 return status;
4550} 4814}
@@ -4573,11 +4837,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_cr
4573 goto out; 4837 goto out;
4574 if ((status = decode_getfh(&xdr, res->fh)) != 0) 4838 if ((status = decode_getfh(&xdr, res->fh)) != 0)
4575 goto out; 4839 goto out;
4576 if (decode_getfattr(&xdr, res->fattr, res->server) != 0) 4840 if (decode_getfattr(&xdr, res->fattr, res->server,
4841 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
4577 goto out; 4842 goto out;
4578 if ((status = decode_restorefh(&xdr)) != 0) 4843 if ((status = decode_restorefh(&xdr)) != 0)
4579 goto out; 4844 goto out;
4580 decode_getfattr(&xdr, res->dir_fattr, res->server); 4845 decode_getfattr(&xdr, res->dir_fattr, res->server,
4846 !RPC_IS_ASYNC(rqstp->rq_task));
4581out: 4847out:
4582 return status; 4848 return status;
4583} 4849}
@@ -4609,7 +4875,8 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g
4609 status = decode_putfh(&xdr); 4875 status = decode_putfh(&xdr);
4610 if (status) 4876 if (status)
4611 goto out; 4877 goto out;
4612 status = decode_getfattr(&xdr, res->fattr, res->server); 4878 status = decode_getfattr(&xdr, res->fattr, res->server,
4879 !RPC_IS_ASYNC(rqstp->rq_task));
4613out: 4880out:
4614 return status; 4881 return status;
4615} 4882}
@@ -4716,7 +4983,8 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
4716 * an ESTALE error. Shouldn't be a problem, 4983 * an ESTALE error. Shouldn't be a problem,
4717 * though, since fattr->valid will remain unset. 4984 * though, since fattr->valid will remain unset.
4718 */ 4985 */
4719 decode_getfattr(&xdr, res->fattr, res->server); 4986 decode_getfattr(&xdr, res->fattr, res->server,
4987 !RPC_IS_ASYNC(rqstp->rq_task));
4720out: 4988out:
4721 return status; 4989 return status;
4722} 4990}
@@ -4748,11 +5016,13 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr
4748 goto out; 5016 goto out;
4749 if (decode_getfh(&xdr, &res->fh) != 0) 5017 if (decode_getfh(&xdr, &res->fh) != 0)
4750 goto out; 5018 goto out;
4751 if (decode_getfattr(&xdr, res->f_attr, res->server) != 0) 5019 if (decode_getfattr(&xdr, res->f_attr, res->server,
5020 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
4752 goto out; 5021 goto out;
4753 if (decode_restorefh(&xdr) != 0) 5022 if (decode_restorefh(&xdr) != 0)
4754 goto out; 5023 goto out;
4755 decode_getfattr(&xdr, res->dir_attr, res->server); 5024 decode_getfattr(&xdr, res->dir_attr, res->server,
5025 !RPC_IS_ASYNC(rqstp->rq_task));
4756out: 5026out:
4757 return status; 5027 return status;
4758} 5028}
@@ -4800,7 +5070,8 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nf
4800 status = decode_open(&xdr, res); 5070 status = decode_open(&xdr, res);
4801 if (status) 5071 if (status)
4802 goto out; 5072 goto out;
4803 decode_getfattr(&xdr, res->f_attr, res->server); 5073 decode_getfattr(&xdr, res->f_attr, res->server,
5074 !RPC_IS_ASYNC(rqstp->rq_task));
4804out: 5075out:
4805 return status; 5076 return status;
4806} 5077}
@@ -4827,7 +5098,8 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_se
4827 status = decode_setattr(&xdr); 5098 status = decode_setattr(&xdr);
4828 if (status) 5099 if (status)
4829 goto out; 5100 goto out;
4830 decode_getfattr(&xdr, res->fattr, res->server); 5101 decode_getfattr(&xdr, res->fattr, res->server,
5102 !RPC_IS_ASYNC(rqstp->rq_task));
4831out: 5103out:
4832 return status; 5104 return status;
4833} 5105}
@@ -5001,7 +5273,8 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writ
5001 status = decode_write(&xdr, res); 5273 status = decode_write(&xdr, res);
5002 if (status) 5274 if (status)
5003 goto out; 5275 goto out;
5004 decode_getfattr(&xdr, res->fattr, res->server); 5276 decode_getfattr(&xdr, res->fattr, res->server,
5277 !RPC_IS_ASYNC(rqstp->rq_task));
5005 if (!status) 5278 if (!status)
5006 status = res->count; 5279 status = res->count;
5007out: 5280out:
@@ -5030,7 +5303,8 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_wri
5030 status = decode_commit(&xdr, res); 5303 status = decode_commit(&xdr, res);
5031 if (status) 5304 if (status)
5032 goto out; 5305 goto out;
5033 decode_getfattr(&xdr, res->fattr, res->server); 5306 decode_getfattr(&xdr, res->fattr, res->server,
5307 !RPC_IS_ASYNC(rqstp->rq_task));
5034out: 5308out:
5035 return status; 5309 return status;
5036} 5310}
@@ -5194,7 +5468,8 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf
5194 if (status != 0) 5468 if (status != 0)
5195 goto out; 5469 goto out;
5196 status = decode_delegreturn(&xdr); 5470 status = decode_delegreturn(&xdr);
5197 decode_getfattr(&xdr, res->fattr, res->server); 5471 decode_getfattr(&xdr, res->fattr, res->server,
5472 !RPC_IS_ASYNC(rqstp->rq_task));
5198out: 5473out:
5199 return status; 5474 return status;
5200} 5475}
@@ -5222,7 +5497,8 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
5222 goto out; 5497 goto out;
5223 xdr_enter_page(&xdr, PAGE_SIZE); 5498 xdr_enter_page(&xdr, PAGE_SIZE);
5224 status = decode_getfattr(&xdr, &res->fs_locations->fattr, 5499 status = decode_getfattr(&xdr, &res->fs_locations->fattr,
5225 res->fs_locations->server); 5500 res->fs_locations->server,
5501 !RPC_IS_ASYNC(req->rq_task));
5226out: 5502out:
5227 return status; 5503 return status;
5228} 5504}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0b4cbdc60abd..867f70504531 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -73,7 +73,7 @@ enum {
73 Opt_cto, Opt_nocto, 73 Opt_cto, Opt_nocto,
74 Opt_ac, Opt_noac, 74 Opt_ac, Opt_noac,
75 Opt_lock, Opt_nolock, 75 Opt_lock, Opt_nolock,
76 Opt_v2, Opt_v3, 76 Opt_v2, Opt_v3, Opt_v4,
77 Opt_udp, Opt_tcp, Opt_rdma, 77 Opt_udp, Opt_tcp, Opt_rdma,
78 Opt_acl, Opt_noacl, 78 Opt_acl, Opt_noacl,
79 Opt_rdirplus, Opt_nordirplus, 79 Opt_rdirplus, Opt_nordirplus,
@@ -127,6 +127,7 @@ static const match_table_t nfs_mount_option_tokens = {
127 { Opt_nolock, "nolock" }, 127 { Opt_nolock, "nolock" },
128 { Opt_v2, "v2" }, 128 { Opt_v2, "v2" },
129 { Opt_v3, "v3" }, 129 { Opt_v3, "v3" },
130 { Opt_v4, "v4" },
130 { Opt_udp, "udp" }, 131 { Opt_udp, "udp" },
131 { Opt_tcp, "tcp" }, 132 { Opt_tcp, "tcp" },
132 { Opt_rdma, "rdma" }, 133 { Opt_rdma, "rdma" },
@@ -158,7 +159,7 @@ static const match_table_t nfs_mount_option_tokens = {
158 { Opt_mountvers, "mountvers=%s" }, 159 { Opt_mountvers, "mountvers=%s" },
159 { Opt_nfsvers, "nfsvers=%s" }, 160 { Opt_nfsvers, "nfsvers=%s" },
160 { Opt_nfsvers, "vers=%s" }, 161 { Opt_nfsvers, "vers=%s" },
161 { Opt_minorversion, "minorversion=%u" }, 162 { Opt_minorversion, "minorversion=%s" },
162 163
163 { Opt_sec, "sec=%s" }, 164 { Opt_sec, "sec=%s" },
164 { Opt_proto, "proto=%s" }, 165 { Opt_proto, "proto=%s" },
@@ -272,6 +273,10 @@ static const struct super_operations nfs_sops = {
272}; 273};
273 274
274#ifdef CONFIG_NFS_V4 275#ifdef CONFIG_NFS_V4
276static int nfs4_validate_text_mount_data(void *options,
277 struct nfs_parsed_mount_data *args, const char *dev_name);
278static int nfs4_try_mount(int flags, const char *dev_name,
279 struct nfs_parsed_mount_data *data, struct vfsmount *mnt);
275static int nfs4_get_sb(struct file_system_type *fs_type, 280static int nfs4_get_sb(struct file_system_type *fs_type,
276 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt); 281 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
277static int nfs4_remote_get_sb(struct file_system_type *fs_type, 282static int nfs4_remote_get_sb(struct file_system_type *fs_type,
@@ -742,127 +747,23 @@ static int nfs_verify_server_address(struct sockaddr *addr)
742 } 747 }
743 } 748 }
744 749
750 dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
745 return 0; 751 return 0;
746} 752}
747 753
748static void nfs_parse_ipv4_address(char *string, size_t str_len,
749 struct sockaddr *sap, size_t *addr_len)
750{
751 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
752 u8 *addr = (u8 *)&sin->sin_addr.s_addr;
753
754 if (str_len <= INET_ADDRSTRLEN) {
755 dfprintk(MOUNT, "NFS: parsing IPv4 address %*s\n",
756 (int)str_len, string);
757
758 sin->sin_family = AF_INET;
759 *addr_len = sizeof(*sin);
760 if (in4_pton(string, str_len, addr, '\0', NULL))
761 return;
762 }
763
764 sap->sa_family = AF_UNSPEC;
765 *addr_len = 0;
766}
767
768#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
769static int nfs_parse_ipv6_scope_id(const char *string, const size_t str_len,
770 const char *delim,
771 struct sockaddr_in6 *sin6)
772{
773 char *p;
774 size_t len;
775
776 if ((string + str_len) == delim)
777 return 1;
778
779 if (*delim != IPV6_SCOPE_DELIMITER)
780 return 0;
781
782 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
783 return 0;
784
785 len = (string + str_len) - delim - 1;
786 p = kstrndup(delim + 1, len, GFP_KERNEL);
787 if (p) {
788 unsigned long scope_id = 0;
789 struct net_device *dev;
790
791 dev = dev_get_by_name(&init_net, p);
792 if (dev != NULL) {
793 scope_id = dev->ifindex;
794 dev_put(dev);
795 } else {
796 if (strict_strtoul(p, 10, &scope_id) == 0) {
797 kfree(p);
798 return 0;
799 }
800 }
801
802 kfree(p);
803
804 sin6->sin6_scope_id = scope_id;
805 dfprintk(MOUNT, "NFS: IPv6 scope ID = %lu\n", scope_id);
806 return 1;
807 }
808
809 return 0;
810}
811
812static void nfs_parse_ipv6_address(char *string, size_t str_len,
813 struct sockaddr *sap, size_t *addr_len)
814{
815 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
816 u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
817 const char *delim;
818
819 if (str_len <= INET6_ADDRSTRLEN) {
820 dfprintk(MOUNT, "NFS: parsing IPv6 address %*s\n",
821 (int)str_len, string);
822
823 sin6->sin6_family = AF_INET6;
824 *addr_len = sizeof(*sin6);
825 if (in6_pton(string, str_len, addr,
826 IPV6_SCOPE_DELIMITER, &delim) != 0) {
827 if (nfs_parse_ipv6_scope_id(string, str_len,
828 delim, sin6) != 0)
829 return;
830 }
831 }
832
833 sap->sa_family = AF_UNSPEC;
834 *addr_len = 0;
835}
836#else
837static void nfs_parse_ipv6_address(char *string, size_t str_len,
838 struct sockaddr *sap, size_t *addr_len)
839{
840 sap->sa_family = AF_UNSPEC;
841 *addr_len = 0;
842}
843#endif
844
845/* 754/*
846 * Construct a sockaddr based on the contents of a string that contains 755 * Select between a default port value and a user-specified port value.
847 * an IP address in presentation format. 756 * If a zero value is set, then autobind will be used.
848 *
849 * If there is a problem constructing the new sockaddr, set the address
850 * family to AF_UNSPEC.
851 */ 757 */
852void nfs_parse_ip_address(char *string, size_t str_len, 758static void nfs_set_default_port(struct sockaddr *sap, const int parsed_port,
853 struct sockaddr *sap, size_t *addr_len) 759 const unsigned short default_port)
854{ 760{
855 unsigned int i, colons; 761 unsigned short port = default_port;
856 762
857 colons = 0; 763 if (parsed_port != NFS_UNSPEC_PORT)
858 for (i = 0; i < str_len; i++) 764 port = parsed_port;
859 if (string[i] == ':')
860 colons++;
861 765
862 if (colons >= 2) 766 rpc_set_port(sap, port);
863 nfs_parse_ipv6_address(string, str_len, sap, addr_len);
864 else
865 nfs_parse_ipv4_address(string, str_len, sap, addr_len);
866} 767}
867 768
868/* 769/*
@@ -904,8 +805,6 @@ static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
904 805
905/* 806/*
906 * Parse the value of the 'sec=' option. 807 * Parse the value of the 'sec=' option.
907 *
908 * The flavor_len setting is for v4 mounts.
909 */ 808 */
910static int nfs_parse_security_flavors(char *value, 809static int nfs_parse_security_flavors(char *value,
911 struct nfs_parsed_mount_data *mnt) 810 struct nfs_parsed_mount_data *mnt)
@@ -916,53 +815,43 @@ static int nfs_parse_security_flavors(char *value,
916 815
917 switch (match_token(value, nfs_secflavor_tokens, args)) { 816 switch (match_token(value, nfs_secflavor_tokens, args)) {
918 case Opt_sec_none: 817 case Opt_sec_none:
919 mnt->auth_flavor_len = 0;
920 mnt->auth_flavors[0] = RPC_AUTH_NULL; 818 mnt->auth_flavors[0] = RPC_AUTH_NULL;
921 break; 819 break;
922 case Opt_sec_sys: 820 case Opt_sec_sys:
923 mnt->auth_flavor_len = 0;
924 mnt->auth_flavors[0] = RPC_AUTH_UNIX; 821 mnt->auth_flavors[0] = RPC_AUTH_UNIX;
925 break; 822 break;
926 case Opt_sec_krb5: 823 case Opt_sec_krb5:
927 mnt->auth_flavor_len = 1;
928 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5; 824 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5;
929 break; 825 break;
930 case Opt_sec_krb5i: 826 case Opt_sec_krb5i:
931 mnt->auth_flavor_len = 1;
932 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I; 827 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I;
933 break; 828 break;
934 case Opt_sec_krb5p: 829 case Opt_sec_krb5p:
935 mnt->auth_flavor_len = 1;
936 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P; 830 mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P;
937 break; 831 break;
938 case Opt_sec_lkey: 832 case Opt_sec_lkey:
939 mnt->auth_flavor_len = 1;
940 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY; 833 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY;
941 break; 834 break;
942 case Opt_sec_lkeyi: 835 case Opt_sec_lkeyi:
943 mnt->auth_flavor_len = 1;
944 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI; 836 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI;
945 break; 837 break;
946 case Opt_sec_lkeyp: 838 case Opt_sec_lkeyp:
947 mnt->auth_flavor_len = 1;
948 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP; 839 mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP;
949 break; 840 break;
950 case Opt_sec_spkm: 841 case Opt_sec_spkm:
951 mnt->auth_flavor_len = 1;
952 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM; 842 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM;
953 break; 843 break;
954 case Opt_sec_spkmi: 844 case Opt_sec_spkmi:
955 mnt->auth_flavor_len = 1;
956 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI; 845 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI;
957 break; 846 break;
958 case Opt_sec_spkmp: 847 case Opt_sec_spkmp:
959 mnt->auth_flavor_len = 1;
960 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP; 848 mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP;
961 break; 849 break;
962 default: 850 default:
963 return 0; 851 return 0;
964 } 852 }
965 853
854 mnt->auth_flavor_len = 1;
966 return 1; 855 return 1;
967} 856}
968 857
@@ -1001,7 +890,6 @@ static int nfs_parse_mount_options(char *raw,
1001 while ((p = strsep(&raw, ",")) != NULL) { 890 while ((p = strsep(&raw, ",")) != NULL) {
1002 substring_t args[MAX_OPT_ARGS]; 891 substring_t args[MAX_OPT_ARGS];
1003 unsigned long option; 892 unsigned long option;
1004 int int_option;
1005 int token; 893 int token;
1006 894
1007 if (!*p) 895 if (!*p)
@@ -1047,10 +935,18 @@ static int nfs_parse_mount_options(char *raw,
1047 break; 935 break;
1048 case Opt_v2: 936 case Opt_v2:
1049 mnt->flags &= ~NFS_MOUNT_VER3; 937 mnt->flags &= ~NFS_MOUNT_VER3;
938 mnt->version = 2;
1050 break; 939 break;
1051 case Opt_v3: 940 case Opt_v3:
1052 mnt->flags |= NFS_MOUNT_VER3; 941 mnt->flags |= NFS_MOUNT_VER3;
942 mnt->version = 3;
1053 break; 943 break;
944#ifdef CONFIG_NFS_V4
945 case Opt_v4:
946 mnt->flags &= ~NFS_MOUNT_VER3;
947 mnt->version = 4;
948 break;
949#endif
1054 case Opt_udp: 950 case Opt_udp:
1055 mnt->flags &= ~NFS_MOUNT_TCP; 951 mnt->flags &= ~NFS_MOUNT_TCP;
1056 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; 952 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
@@ -1264,20 +1160,33 @@ static int nfs_parse_mount_options(char *raw,
1264 switch (option) { 1160 switch (option) {
1265 case NFS2_VERSION: 1161 case NFS2_VERSION:
1266 mnt->flags &= ~NFS_MOUNT_VER3; 1162 mnt->flags &= ~NFS_MOUNT_VER3;
1163 mnt->version = 2;
1267 break; 1164 break;
1268 case NFS3_VERSION: 1165 case NFS3_VERSION:
1269 mnt->flags |= NFS_MOUNT_VER3; 1166 mnt->flags |= NFS_MOUNT_VER3;
1167 mnt->version = 3;
1270 break; 1168 break;
1169#ifdef CONFIG_NFS_V4
1170 case NFS4_VERSION:
1171 mnt->flags &= ~NFS_MOUNT_VER3;
1172 mnt->version = 4;
1173 break;
1174#endif
1271 default: 1175 default:
1272 goto out_invalid_value; 1176 goto out_invalid_value;
1273 } 1177 }
1274 break; 1178 break;
1275 case Opt_minorversion: 1179 case Opt_minorversion:
1276 if (match_int(args, &int_option)) 1180 string = match_strdup(args);
1277 return 0; 1181 if (string == NULL)
1278 if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION) 1182 goto out_nomem;
1279 return 0; 1183 rc = strict_strtoul(string, 10, &option);
1280 mnt->minorversion = int_option; 1184 kfree(string);
1185 if (rc != 0)
1186 goto out_invalid_value;
1187 if (option > NFS4_MAX_MINOR_VERSION)
1188 goto out_invalid_value;
1189 mnt->minorversion = option;
1281 break; 1190 break;
1282 1191
1283 /* 1192 /*
@@ -1352,11 +1261,14 @@ static int nfs_parse_mount_options(char *raw,
1352 string = match_strdup(args); 1261 string = match_strdup(args);
1353 if (string == NULL) 1262 if (string == NULL)
1354 goto out_nomem; 1263 goto out_nomem;
1355 nfs_parse_ip_address(string, strlen(string), 1264 mnt->nfs_server.addrlen =
1356 (struct sockaddr *) 1265 rpc_pton(string, strlen(string),
1357 &mnt->nfs_server.address, 1266 (struct sockaddr *)
1358 &mnt->nfs_server.addrlen); 1267 &mnt->nfs_server.address,
1268 sizeof(mnt->nfs_server.address));
1359 kfree(string); 1269 kfree(string);
1270 if (mnt->nfs_server.addrlen == 0)
1271 goto out_invalid_address;
1360 break; 1272 break;
1361 case Opt_clientaddr: 1273 case Opt_clientaddr:
1362 string = match_strdup(args); 1274 string = match_strdup(args);
@@ -1376,11 +1288,14 @@ static int nfs_parse_mount_options(char *raw,
1376 string = match_strdup(args); 1288 string = match_strdup(args);
1377 if (string == NULL) 1289 if (string == NULL)
1378 goto out_nomem; 1290 goto out_nomem;
1379 nfs_parse_ip_address(string, strlen(string), 1291 mnt->mount_server.addrlen =
1380 (struct sockaddr *) 1292 rpc_pton(string, strlen(string),
1381 &mnt->mount_server.address, 1293 (struct sockaddr *)
1382 &mnt->mount_server.addrlen); 1294 &mnt->mount_server.address,
1295 sizeof(mnt->mount_server.address));
1383 kfree(string); 1296 kfree(string);
1297 if (mnt->mount_server.addrlen == 0)
1298 goto out_invalid_address;
1384 break; 1299 break;
1385 case Opt_lookupcache: 1300 case Opt_lookupcache:
1386 string = match_strdup(args); 1301 string = match_strdup(args);
@@ -1432,8 +1347,11 @@ static int nfs_parse_mount_options(char *raw,
1432 1347
1433 return 1; 1348 return 1;
1434 1349
1350out_invalid_address:
1351 printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
1352 return 0;
1435out_invalid_value: 1353out_invalid_value:
1436 printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p); 1354 printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
1437 return 0; 1355 return 0;
1438out_nomem: 1356out_nomem:
1439 printk(KERN_INFO "NFS: not enough memory to parse option\n"); 1357 printk(KERN_INFO "NFS: not enough memory to parse option\n");
@@ -1445,13 +1363,60 @@ out_security_failure:
1445} 1363}
1446 1364
1447/* 1365/*
1366 * Match the requested auth flavors with the list returned by
1367 * the server. Returns zero and sets the mount's authentication
1368 * flavor on success; returns -EACCES if server does not support
1369 * the requested flavor.
1370 */
1371static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
1372 struct nfs_mount_request *request)
1373{
1374 unsigned int i, j, server_authlist_len = *(request->auth_flav_len);
1375
1376 /*
1377 * Certain releases of Linux's mountd return an empty
1378 * flavor list. To prevent behavioral regression with
1379 * these servers (ie. rejecting mounts that used to
1380 * succeed), revert to pre-2.6.32 behavior (no checking)
1381 * if the returned flavor list is empty.
1382 */
1383 if (server_authlist_len == 0)
1384 return 0;
1385
1386 /*
1387 * We avoid sophisticated negotiating here, as there are
1388 * plenty of cases where we can get it wrong, providing
1389 * either too little or too much security.
1390 *
1391 * RFC 2623, section 2.7 suggests we SHOULD prefer the
1392 * flavor listed first. However, some servers list
1393 * AUTH_NULL first. Our caller plants AUTH_SYS, the
1394 * preferred default, in args->auth_flavors[0] if user
1395 * didn't specify sec= mount option.
1396 */
1397 for (i = 0; i < args->auth_flavor_len; i++)
1398 for (j = 0; j < server_authlist_len; j++)
1399 if (args->auth_flavors[i] == request->auth_flavs[j]) {
1400 dfprintk(MOUNT, "NFS: using auth flavor %d\n",
1401 request->auth_flavs[j]);
1402 args->auth_flavors[0] = request->auth_flavs[j];
1403 return 0;
1404 }
1405
1406 dfprintk(MOUNT, "NFS: server does not support requested auth flavor\n");
1407 nfs_umount(request);
1408 return -EACCES;
1409}
1410
1411/*
1448 * Use the remote server's MOUNT service to request the NFS file handle 1412 * Use the remote server's MOUNT service to request the NFS file handle
1449 * corresponding to the provided path. 1413 * corresponding to the provided path.
1450 */ 1414 */
1451static int nfs_try_mount(struct nfs_parsed_mount_data *args, 1415static int nfs_try_mount(struct nfs_parsed_mount_data *args,
1452 struct nfs_fh *root_fh) 1416 struct nfs_fh *root_fh)
1453{ 1417{
1454 unsigned int auth_flavor_len = 0; 1418 rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
1419 unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
1455 struct nfs_mount_request request = { 1420 struct nfs_mount_request request = {
1456 .sap = (struct sockaddr *) 1421 .sap = (struct sockaddr *)
1457 &args->mount_server.address, 1422 &args->mount_server.address,
@@ -1459,7 +1424,8 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
1459 .protocol = args->mount_server.protocol, 1424 .protocol = args->mount_server.protocol,
1460 .fh = root_fh, 1425 .fh = root_fh,
1461 .noresvport = args->flags & NFS_MOUNT_NORESVPORT, 1426 .noresvport = args->flags & NFS_MOUNT_NORESVPORT,
1462 .auth_flav_len = &auth_flavor_len, 1427 .auth_flav_len = &server_authlist_len,
1428 .auth_flavs = server_authlist,
1463 }; 1429 };
1464 int status; 1430 int status;
1465 1431
@@ -1485,23 +1451,25 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
1485 args->mount_server.addrlen = args->nfs_server.addrlen; 1451 args->mount_server.addrlen = args->nfs_server.addrlen;
1486 } 1452 }
1487 request.salen = args->mount_server.addrlen; 1453 request.salen = args->mount_server.addrlen;
1488 1454 nfs_set_default_port(request.sap, args->mount_server.port, 0);
1489 /*
1490 * autobind will be used if mount_server.port == 0
1491 */
1492 nfs_set_port(request.sap, args->mount_server.port);
1493 1455
1494 /* 1456 /*
1495 * Now ask the mount server to map our export path 1457 * Now ask the mount server to map our export path
1496 * to a file handle. 1458 * to a file handle.
1497 */ 1459 */
1498 status = nfs_mount(&request); 1460 status = nfs_mount(&request);
1499 if (status == 0) 1461 if (status != 0) {
1500 return 0; 1462 dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
1463 request.hostname, status);
1464 return status;
1465 }
1501 1466
1502 dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n", 1467 /*
1503 request.hostname, status); 1468 * MNTv1 (NFSv2) does not support auth flavor negotiation.
1504 return status; 1469 */
1470 if (args->mount_server.version != NFS_MNT3_VERSION)
1471 return 0;
1472 return nfs_walk_authlist(args, &request);
1505} 1473}
1506 1474
1507static int nfs_parse_simple_hostname(const char *dev_name, 1475static int nfs_parse_simple_hostname(const char *dev_name,
@@ -1661,6 +1629,7 @@ static int nfs_validate_mount_data(void *options,
1661 const char *dev_name) 1629 const char *dev_name)
1662{ 1630{
1663 struct nfs_mount_data *data = (struct nfs_mount_data *)options; 1631 struct nfs_mount_data *data = (struct nfs_mount_data *)options;
1632 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
1664 1633
1665 if (data == NULL) 1634 if (data == NULL)
1666 goto out_no_data; 1635 goto out_no_data;
@@ -1672,10 +1641,12 @@ static int nfs_validate_mount_data(void *options,
1672 args->acregmax = NFS_DEF_ACREGMAX; 1641 args->acregmax = NFS_DEF_ACREGMAX;
1673 args->acdirmin = NFS_DEF_ACDIRMIN; 1642 args->acdirmin = NFS_DEF_ACDIRMIN;
1674 args->acdirmax = NFS_DEF_ACDIRMAX; 1643 args->acdirmax = NFS_DEF_ACDIRMAX;
1675 args->mount_server.port = 0; /* autobind unless user sets port */ 1644 args->mount_server.port = NFS_UNSPEC_PORT;
1676 args->nfs_server.port = 0; /* autobind unless user sets port */ 1645 args->nfs_server.port = NFS_UNSPEC_PORT;
1677 args->nfs_server.protocol = XPRT_TRANSPORT_TCP; 1646 args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
1678 args->auth_flavors[0] = RPC_AUTH_UNIX; 1647 args->auth_flavors[0] = RPC_AUTH_UNIX;
1648 args->auth_flavor_len = 1;
1649 args->minorversion = 0;
1679 1650
1680 switch (data->version) { 1651 switch (data->version) {
1681 case 1: 1652 case 1:
@@ -1697,8 +1668,11 @@ static int nfs_validate_mount_data(void *options,
1697 if (data->root.size > NFS3_FHSIZE || data->root.size == 0) 1668 if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
1698 goto out_invalid_fh; 1669 goto out_invalid_fh;
1699 mntfh->size = data->root.size; 1670 mntfh->size = data->root.size;
1700 } else 1671 args->version = 3;
1672 } else {
1701 mntfh->size = NFS2_FHSIZE; 1673 mntfh->size = NFS2_FHSIZE;
1674 args->version = 2;
1675 }
1702 1676
1703 1677
1704 memcpy(mntfh->data, data->root.data, mntfh->size); 1678 memcpy(mntfh->data, data->root.data, mntfh->size);
@@ -1720,11 +1694,9 @@ static int nfs_validate_mount_data(void *options,
1720 args->acdirmin = data->acdirmin; 1694 args->acdirmin = data->acdirmin;
1721 args->acdirmax = data->acdirmax; 1695 args->acdirmax = data->acdirmax;
1722 1696
1723 memcpy(&args->nfs_server.address, &data->addr, 1697 memcpy(sap, &data->addr, sizeof(data->addr));
1724 sizeof(data->addr));
1725 args->nfs_server.addrlen = sizeof(data->addr); 1698 args->nfs_server.addrlen = sizeof(data->addr);
1726 if (!nfs_verify_server_address((struct sockaddr *) 1699 if (!nfs_verify_server_address(sap))
1727 &args->nfs_server.address))
1728 goto out_no_address; 1700 goto out_no_address;
1729 1701
1730 if (!(data->flags & NFS_MOUNT_TCP)) 1702 if (!(data->flags & NFS_MOUNT_TCP))
@@ -1772,12 +1744,18 @@ static int nfs_validate_mount_data(void *options,
1772 if (nfs_parse_mount_options((char *)options, args) == 0) 1744 if (nfs_parse_mount_options((char *)options, args) == 0)
1773 return -EINVAL; 1745 return -EINVAL;
1774 1746
1775 if (!nfs_verify_server_address((struct sockaddr *) 1747 if (!nfs_verify_server_address(sap))
1776 &args->nfs_server.address))
1777 goto out_no_address; 1748 goto out_no_address;
1778 1749
1779 nfs_set_port((struct sockaddr *)&args->nfs_server.address, 1750 if (args->version == 4)
1780 args->nfs_server.port); 1751#ifdef CONFIG_NFS_V4
1752 return nfs4_validate_text_mount_data(options,
1753 args, dev_name);
1754#else
1755 goto out_v4_not_compiled;
1756#endif
1757
1758 nfs_set_default_port(sap, args->nfs_server.port, 0);
1781 1759
1782 nfs_set_mount_transport_protocol(args); 1760 nfs_set_mount_transport_protocol(args);
1783 1761
@@ -1825,6 +1803,12 @@ out_v3_not_compiled:
1825 return -EPROTONOSUPPORT; 1803 return -EPROTONOSUPPORT;
1826#endif /* !CONFIG_NFS_V3 */ 1804#endif /* !CONFIG_NFS_V3 */
1827 1805
1806#ifndef CONFIG_NFS_V4
1807out_v4_not_compiled:
1808 dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
1809 return -EPROTONOSUPPORT;
1810#endif /* !CONFIG_NFS_V4 */
1811
1828out_nomem: 1812out_nomem:
1829 dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n"); 1813 dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
1830 return -ENOMEM; 1814 return -ENOMEM;
@@ -2120,6 +2104,14 @@ static int nfs_get_sb(struct file_system_type *fs_type,
2120 if (error < 0) 2104 if (error < 0)
2121 goto out; 2105 goto out;
2122 2106
2107#ifdef CONFIG_NFS_V4
2108 if (data->version == 4) {
2109 error = nfs4_try_mount(flags, dev_name, data, mnt);
2110 kfree(data->client_address);
2111 goto out;
2112 }
2113#endif /* CONFIG_NFS_V4 */
2114
2123 /* Get a volume representation */ 2115 /* Get a volume representation */
2124 server = nfs_create_server(data, mntfh); 2116 server = nfs_create_server(data, mntfh);
2125 if (IS_ERR(server)) { 2117 if (IS_ERR(server)) {
@@ -2317,6 +2309,43 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
2317 args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3); 2309 args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
2318} 2310}
2319 2311
2312static int nfs4_validate_text_mount_data(void *options,
2313 struct nfs_parsed_mount_data *args,
2314 const char *dev_name)
2315{
2316 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
2317
2318 nfs_set_default_port(sap, args->nfs_server.port, NFS_PORT);
2319
2320 nfs_validate_transport_protocol(args);
2321
2322 nfs4_validate_mount_flags(args);
2323
2324 if (args->version != 4) {
2325 dfprintk(MOUNT,
2326 "NFS4: Illegal mount version\n");
2327 return -EINVAL;
2328 }
2329
2330 if (args->auth_flavor_len > 1) {
2331 dfprintk(MOUNT,
2332 "NFS4: Too many RPC auth flavours specified\n");
2333 return -EINVAL;
2334 }
2335
2336 if (args->client_address == NULL) {
2337 dfprintk(MOUNT,
2338 "NFS4: mount program didn't pass callback address\n");
2339 return -EINVAL;
2340 }
2341
2342 return nfs_parse_devname(dev_name,
2343 &args->nfs_server.hostname,
2344 NFS4_MAXNAMLEN,
2345 &args->nfs_server.export_path,
2346 NFS4_MAXPATHLEN);
2347}
2348
2320/* 2349/*
2321 * Validate NFSv4 mount options 2350 * Validate NFSv4 mount options
2322 */ 2351 */
@@ -2324,7 +2353,7 @@ static int nfs4_validate_mount_data(void *options,
2324 struct nfs_parsed_mount_data *args, 2353 struct nfs_parsed_mount_data *args,
2325 const char *dev_name) 2354 const char *dev_name)
2326{ 2355{
2327 struct sockaddr_in *ap; 2356 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
2328 struct nfs4_mount_data *data = (struct nfs4_mount_data *)options; 2357 struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
2329 char *c; 2358 char *c;
2330 2359
@@ -2337,23 +2366,22 @@ static int nfs4_validate_mount_data(void *options,
2337 args->acregmax = NFS_DEF_ACREGMAX; 2366 args->acregmax = NFS_DEF_ACREGMAX;
2338 args->acdirmin = NFS_DEF_ACDIRMIN; 2367 args->acdirmin = NFS_DEF_ACDIRMIN;
2339 args->acdirmax = NFS_DEF_ACDIRMAX; 2368 args->acdirmax = NFS_DEF_ACDIRMAX;
2340 args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */ 2369 args->nfs_server.port = NFS_UNSPEC_PORT;
2341 args->auth_flavors[0] = RPC_AUTH_UNIX; 2370 args->auth_flavors[0] = RPC_AUTH_UNIX;
2342 args->auth_flavor_len = 0; 2371 args->auth_flavor_len = 1;
2372 args->version = 4;
2343 args->minorversion = 0; 2373 args->minorversion = 0;
2344 2374
2345 switch (data->version) { 2375 switch (data->version) {
2346 case 1: 2376 case 1:
2347 ap = (struct sockaddr_in *)&args->nfs_server.address;
2348 if (data->host_addrlen > sizeof(args->nfs_server.address)) 2377 if (data->host_addrlen > sizeof(args->nfs_server.address))
2349 goto out_no_address; 2378 goto out_no_address;
2350 if (data->host_addrlen == 0) 2379 if (data->host_addrlen == 0)
2351 goto out_no_address; 2380 goto out_no_address;
2352 args->nfs_server.addrlen = data->host_addrlen; 2381 args->nfs_server.addrlen = data->host_addrlen;
2353 if (copy_from_user(ap, data->host_addr, data->host_addrlen)) 2382 if (copy_from_user(sap, data->host_addr, data->host_addrlen))
2354 return -EFAULT; 2383 return -EFAULT;
2355 if (!nfs_verify_server_address((struct sockaddr *) 2384 if (!nfs_verify_server_address(sap))
2356 &args->nfs_server.address))
2357 goto out_no_address; 2385 goto out_no_address;
2358 2386
2359 if (data->auth_flavourlen) { 2387 if (data->auth_flavourlen) {
@@ -2399,39 +2427,14 @@ static int nfs4_validate_mount_data(void *options,
2399 nfs_validate_transport_protocol(args); 2427 nfs_validate_transport_protocol(args);
2400 2428
2401 break; 2429 break;
2402 default: { 2430 default:
2403 int status;
2404
2405 if (nfs_parse_mount_options((char *)options, args) == 0) 2431 if (nfs_parse_mount_options((char *)options, args) == 0)
2406 return -EINVAL; 2432 return -EINVAL;
2407 2433
2408 if (!nfs_verify_server_address((struct sockaddr *) 2434 if (!nfs_verify_server_address(sap))
2409 &args->nfs_server.address))
2410 return -EINVAL; 2435 return -EINVAL;
2411 2436
2412 nfs_set_port((struct sockaddr *)&args->nfs_server.address, 2437 return nfs4_validate_text_mount_data(options, args, dev_name);
2413 args->nfs_server.port);
2414
2415 nfs_validate_transport_protocol(args);
2416
2417 nfs4_validate_mount_flags(args);
2418
2419 if (args->auth_flavor_len > 1)
2420 goto out_inval_auth;
2421
2422 if (args->client_address == NULL)
2423 goto out_no_client_address;
2424
2425 status = nfs_parse_devname(dev_name,
2426 &args->nfs_server.hostname,
2427 NFS4_MAXNAMLEN,
2428 &args->nfs_server.export_path,
2429 NFS4_MAXPATHLEN);
2430 if (status < 0)
2431 return status;
2432
2433 break;
2434 }
2435 } 2438 }
2436 2439
2437 return 0; 2440 return 0;
@@ -2448,10 +2451,6 @@ out_inval_auth:
2448out_no_address: 2451out_no_address:
2449 dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n"); 2452 dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
2450 return -EINVAL; 2453 return -EINVAL;
2451
2452out_no_client_address:
2453 dfprintk(MOUNT, "NFS4: mount program didn't pass callback address\n");
2454 return -EINVAL;
2455} 2454}
2456 2455
2457/* 2456/*
@@ -2618,6 +2617,34 @@ out_err:
2618 return ret; 2617 return ret;
2619} 2618}
2620 2619
2620static int nfs4_try_mount(int flags, const char *dev_name,
2621 struct nfs_parsed_mount_data *data,
2622 struct vfsmount *mnt)
2623{
2624 char *export_path;
2625 struct vfsmount *root_mnt;
2626 int error;
2627
2628 dfprintk(MOUNT, "--> nfs4_try_mount()\n");
2629
2630 export_path = data->nfs_server.export_path;
2631 data->nfs_server.export_path = "/";
2632 root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
2633 data->nfs_server.hostname);
2634 data->nfs_server.export_path = export_path;
2635
2636 error = PTR_ERR(root_mnt);
2637 if (IS_ERR(root_mnt))
2638 goto out;
2639
2640 error = nfs_follow_remote_path(root_mnt, export_path, mnt);
2641
2642out:
2643 dfprintk(MOUNT, "<-- nfs4_try_mount() = %d%s\n", error,
2644 error != 0 ? " [error]" : "");
2645 return error;
2646}
2647
2621/* 2648/*
2622 * Get the superblock for an NFS4 mountpoint 2649 * Get the superblock for an NFS4 mountpoint
2623 */ 2650 */
@@ -2625,8 +2652,6 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
2625 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt) 2652 int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
2626{ 2653{
2627 struct nfs_parsed_mount_data *data; 2654 struct nfs_parsed_mount_data *data;
2628 char *export_path;
2629 struct vfsmount *root_mnt;
2630 int error = -ENOMEM; 2655 int error = -ENOMEM;
2631 2656
2632 data = kzalloc(sizeof(*data), GFP_KERNEL); 2657 data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -2638,17 +2663,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
2638 if (error < 0) 2663 if (error < 0)
2639 goto out; 2664 goto out;
2640 2665
2641 export_path = data->nfs_server.export_path; 2666 error = nfs4_try_mount(flags, dev_name, data, mnt);
2642 data->nfs_server.export_path = "/";
2643 root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
2644 data->nfs_server.hostname);
2645 data->nfs_server.export_path = export_path;
2646
2647 error = PTR_ERR(root_mnt);
2648 if (IS_ERR(root_mnt))
2649 goto out;
2650
2651 error = nfs_follow_remote_path(root_mnt, export_path, mnt);
2652 2667
2653out: 2668out:
2654 kfree(data->client_address); 2669 kfree(data->client_address);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index a34fae21fe10..120acadc6a84 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -13,6 +13,7 @@
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/writeback.h> 14#include <linux/writeback.h>
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/migrate.h>
16 17
17#include <linux/sunrpc/clnt.h> 18#include <linux/sunrpc/clnt.h>
18#include <linux/nfs_fs.h> 19#include <linux/nfs_fs.h>
@@ -26,6 +27,7 @@
26#include "internal.h" 27#include "internal.h"
27#include "iostat.h" 28#include "iostat.h"
28#include "nfs4_fs.h" 29#include "nfs4_fs.h"
30#include "fscache.h"
29 31
30#define NFSDBG_FACILITY NFSDBG_PAGECACHE 32#define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 33
@@ -218,24 +220,17 @@ static void nfs_end_page_writeback(struct page *page)
218 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 220 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
219} 221}
220 222
221/* 223static struct nfs_page *nfs_find_and_lock_request(struct page *page)
222 * Find an associated nfs write request, and prepare to flush it out
223 * May return an error if the user signalled nfs_wait_on_request().
224 */
225static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
226 struct page *page)
227{ 224{
228 struct inode *inode = page->mapping->host; 225 struct inode *inode = page->mapping->host;
229 struct nfs_page *req; 226 struct nfs_page *req;
230 int ret; 227 int ret;
231 228
232 spin_lock(&inode->i_lock); 229 spin_lock(&inode->i_lock);
233 for(;;) { 230 for (;;) {
234 req = nfs_page_find_request_locked(page); 231 req = nfs_page_find_request_locked(page);
235 if (req == NULL) { 232 if (req == NULL)
236 spin_unlock(&inode->i_lock); 233 break;
237 return 0;
238 }
239 if (nfs_set_page_tag_locked(req)) 234 if (nfs_set_page_tag_locked(req))
240 break; 235 break;
241 /* Note: If we hold the page lock, as is the case in nfs_writepage, 236 /* Note: If we hold the page lock, as is the case in nfs_writepage,
@@ -247,23 +242,40 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
247 ret = nfs_wait_on_request(req); 242 ret = nfs_wait_on_request(req);
248 nfs_release_request(req); 243 nfs_release_request(req);
249 if (ret != 0) 244 if (ret != 0)
250 return ret; 245 return ERR_PTR(ret);
251 spin_lock(&inode->i_lock); 246 spin_lock(&inode->i_lock);
252 } 247 }
253 if (test_bit(PG_CLEAN, &req->wb_flags)) {
254 spin_unlock(&inode->i_lock);
255 BUG();
256 }
257 if (nfs_set_page_writeback(page) != 0) {
258 spin_unlock(&inode->i_lock);
259 BUG();
260 }
261 spin_unlock(&inode->i_lock); 248 spin_unlock(&inode->i_lock);
249 return req;
250}
251
252/*
253 * Find an associated nfs write request, and prepare to flush it out
254 * May return an error if the user signalled nfs_wait_on_request().
255 */
256static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
257 struct page *page)
258{
259 struct nfs_page *req;
260 int ret = 0;
261
262 req = nfs_find_and_lock_request(page);
263 if (!req)
264 goto out;
265 ret = PTR_ERR(req);
266 if (IS_ERR(req))
267 goto out;
268
269 ret = nfs_set_page_writeback(page);
270 BUG_ON(ret != 0);
271 BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
272
262 if (!nfs_pageio_add_request(pgio, req)) { 273 if (!nfs_pageio_add_request(pgio, req)) {
263 nfs_redirty_request(req); 274 nfs_redirty_request(req);
264 return pgio->pg_error; 275 ret = pgio->pg_error;
265 } 276 }
266 return 0; 277out:
278 return ret;
267} 279}
268 280
269static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 281static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
@@ -1580,6 +1592,41 @@ int nfs_wb_page(struct inode *inode, struct page* page)
1580 return nfs_wb_page_priority(inode, page, FLUSH_STABLE); 1592 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1581} 1593}
1582 1594
1595#ifdef CONFIG_MIGRATION
1596int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1597 struct page *page)
1598{
1599 struct nfs_page *req;
1600 int ret;
1601
1602 if (PageFsCache(page))
1603 nfs_fscache_release_page(page, GFP_KERNEL);
1604
1605 req = nfs_find_and_lock_request(page);
1606 ret = PTR_ERR(req);
1607 if (IS_ERR(req))
1608 goto out;
1609
1610 ret = migrate_page(mapping, newpage, page);
1611 if (!req)
1612 goto out;
1613 if (ret)
1614 goto out_unlock;
1615 page_cache_get(newpage);
1616 req->wb_page = newpage;
1617 SetPagePrivate(newpage);
1618 set_page_private(newpage, page_private(page));
1619 ClearPagePrivate(page);
1620 set_page_private(page, 0);
1621 page_cache_release(page);
1622out_unlock:
1623 nfs_clear_page_tag_locked(req);
1624 nfs_release_request(req);
1625out:
1626 return ret;
1627}
1628#endif
1629
1583int __init nfs_init_writepagecache(void) 1630int __init nfs_init_writepagecache(void)
1584{ 1631{
1585 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1632 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 5573508f707f..36fcabbf5186 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -34,6 +34,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
34 int flags = nfsexp_flags(rqstp, exp); 34 int flags = nfsexp_flags(rqstp, exp);
35 int ret; 35 int ret;
36 36
37 validate_process_creds();
38
37 /* discard any old override before preparing the new set */ 39 /* discard any old override before preparing the new set */
38 revert_creds(get_cred(current->real_cred)); 40 revert_creds(get_cred(current->real_cred));
39 new = prepare_creds(); 41 new = prepare_creds();
@@ -86,8 +88,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
86 else 88 else
87 new->cap_effective = cap_raise_nfsd_set(new->cap_effective, 89 new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
88 new->cap_permitted); 90 new->cap_permitted);
91 validate_process_creds();
89 put_cred(override_creds(new)); 92 put_cred(override_creds(new));
90 put_cred(new); 93 put_cred(new);
94 validate_process_creds();
91 return 0; 95 return 0;
92 96
93oom: 97oom:
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b92a27629fb7..d9462643155c 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -85,6 +85,11 @@ static void expkey_request(struct cache_detail *cd,
85 (*bpp)[-1] = '\n'; 85 (*bpp)[-1] = '\n';
86} 86}
87 87
88static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
89{
90 return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
91}
92
88static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old); 93static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old);
89static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *); 94static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *);
90static struct cache_detail svc_expkey_cache; 95static struct cache_detail svc_expkey_cache;
@@ -259,7 +264,7 @@ static struct cache_detail svc_expkey_cache = {
259 .hash_table = expkey_table, 264 .hash_table = expkey_table,
260 .name = "nfsd.fh", 265 .name = "nfsd.fh",
261 .cache_put = expkey_put, 266 .cache_put = expkey_put,
262 .cache_request = expkey_request, 267 .cache_upcall = expkey_upcall,
263 .cache_parse = expkey_parse, 268 .cache_parse = expkey_parse,
264 .cache_show = expkey_show, 269 .cache_show = expkey_show,
265 .match = expkey_match, 270 .match = expkey_match,
@@ -355,6 +360,11 @@ static void svc_export_request(struct cache_detail *cd,
355 (*bpp)[-1] = '\n'; 360 (*bpp)[-1] = '\n';
356} 361}
357 362
363static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
364{
365 return sunrpc_cache_pipe_upcall(cd, h, svc_export_request);
366}
367
358static struct svc_export *svc_export_update(struct svc_export *new, 368static struct svc_export *svc_export_update(struct svc_export *new,
359 struct svc_export *old); 369 struct svc_export *old);
360static struct svc_export *svc_export_lookup(struct svc_export *); 370static struct svc_export *svc_export_lookup(struct svc_export *);
@@ -724,7 +734,7 @@ struct cache_detail svc_export_cache = {
724 .hash_table = export_table, 734 .hash_table = export_table,
725 .name = "nfsd.export", 735 .name = "nfsd.export",
726 .cache_put = svc_export_put, 736 .cache_put = svc_export_put,
727 .cache_request = svc_export_request, 737 .cache_upcall = svc_export_upcall,
728 .cache_parse = svc_export_parse, 738 .cache_parse = svc_export_parse,
729 .cache_show = svc_export_show, 739 .cache_show = svc_export_show,
730 .match = svc_export_match, 740 .match = svc_export_match,
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 5b398421b051..cdfa86fa1471 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -146,6 +146,12 @@ idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
146} 146}
147 147
148static int 148static int
149idtoname_upcall(struct cache_detail *cd, struct cache_head *ch)
150{
151 return sunrpc_cache_pipe_upcall(cd, ch, idtoname_request);
152}
153
154static int
149idtoname_match(struct cache_head *ca, struct cache_head *cb) 155idtoname_match(struct cache_head *ca, struct cache_head *cb)
150{ 156{
151 struct ent *a = container_of(ca, struct ent, h); 157 struct ent *a = container_of(ca, struct ent, h);
@@ -175,10 +181,10 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
175} 181}
176 182
177static void 183static void
178warn_no_idmapd(struct cache_detail *detail) 184warn_no_idmapd(struct cache_detail *detail, int has_died)
179{ 185{
180 printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n", 186 printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n",
181 detail->last_close? "died" : "not been started"); 187 has_died ? "died" : "not been started");
182} 188}
183 189
184 190
@@ -192,7 +198,7 @@ static struct cache_detail idtoname_cache = {
192 .hash_table = idtoname_table, 198 .hash_table = idtoname_table,
193 .name = "nfs4.idtoname", 199 .name = "nfs4.idtoname",
194 .cache_put = ent_put, 200 .cache_put = ent_put,
195 .cache_request = idtoname_request, 201 .cache_upcall = idtoname_upcall,
196 .cache_parse = idtoname_parse, 202 .cache_parse = idtoname_parse,
197 .cache_show = idtoname_show, 203 .cache_show = idtoname_show,
198 .warn_no_listener = warn_no_idmapd, 204 .warn_no_listener = warn_no_idmapd,
@@ -325,6 +331,12 @@ nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp,
325} 331}
326 332
327static int 333static int
334nametoid_upcall(struct cache_detail *cd, struct cache_head *ch)
335{
336 return sunrpc_cache_pipe_upcall(cd, ch, nametoid_request);
337}
338
339static int
328nametoid_match(struct cache_head *ca, struct cache_head *cb) 340nametoid_match(struct cache_head *ca, struct cache_head *cb)
329{ 341{
330 struct ent *a = container_of(ca, struct ent, h); 342 struct ent *a = container_of(ca, struct ent, h);
@@ -363,7 +375,7 @@ static struct cache_detail nametoid_cache = {
363 .hash_table = nametoid_table, 375 .hash_table = nametoid_table,
364 .name = "nfs4.nametoid", 376 .name = "nfs4.nametoid",
365 .cache_put = ent_put, 377 .cache_put = ent_put,
366 .cache_request = nametoid_request, 378 .cache_upcall = nametoid_upcall,
367 .cache_parse = nametoid_parse, 379 .cache_parse = nametoid_parse,
368 .cache_show = nametoid_show, 380 .cache_show = nametoid_show,
369 .warn_no_listener = warn_no_idmapd, 381 .warn_no_listener = warn_no_idmapd,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 6d0847562d87..7e906c5b7671 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -37,6 +37,7 @@
37#include <linux/nfsd/xdr.h> 37#include <linux/nfsd/xdr.h>
38#include <linux/nfsd/syscall.h> 38#include <linux/nfsd/syscall.h>
39#include <linux/lockd/lockd.h> 39#include <linux/lockd/lockd.h>
40#include <linux/sunrpc/clnt.h>
40 41
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <net/ipv6.h> 43#include <net/ipv6.h>
@@ -490,22 +491,18 @@ static ssize_t write_getfd(struct file *file, char *buf, size_t size)
490 * 491 *
491 * Input: 492 * Input:
492 * buf: '\n'-terminated C string containing a 493 * buf: '\n'-terminated C string containing a
493 * presentation format IPv4 address 494 * presentation format IP address
494 * size: length of C string in @buf 495 * size: length of C string in @buf
495 * Output: 496 * Output:
496 * On success: returns zero if all specified locks were released; 497 * On success: returns zero if all specified locks were released;
497 * returns one if one or more locks were not released 498 * returns one if one or more locks were not released
498 * On error: return code is negative errno value 499 * On error: return code is negative errno value
499 *
500 * Note: Only AF_INET client addresses are passed in
501 */ 500 */
502static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size) 501static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
503{ 502{
504 struct sockaddr_in sin = { 503 struct sockaddr_storage address;
505 .sin_family = AF_INET, 504 struct sockaddr *sap = (struct sockaddr *)&address;
506 }; 505 size_t salen = sizeof(address);
507 int b1, b2, b3, b4;
508 char c;
509 char *fo_path; 506 char *fo_path;
510 507
511 /* sanity check */ 508 /* sanity check */
@@ -519,14 +516,10 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
519 if (qword_get(&buf, fo_path, size) < 0) 516 if (qword_get(&buf, fo_path, size) < 0)
520 return -EINVAL; 517 return -EINVAL;
521 518
522 /* get ipv4 address */ 519 if (rpc_pton(fo_path, size, sap, salen) == 0)
523 if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
524 return -EINVAL;
525 if (b1 > 255 || b2 > 255 || b3 > 255 || b4 > 255)
526 return -EINVAL; 520 return -EINVAL;
527 sin.sin_addr.s_addr = htonl((b1 << 24) | (b2 << 16) | (b3 << 8) | b4);
528 521
529 return nlmsvc_unlock_all_by_ip((struct sockaddr *)&sin); 522 return nlmsvc_unlock_all_by_ip(sap);
530} 523}
531 524
532/** 525/**
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 492c79b7800b..24d58adfe5fd 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -496,7 +496,9 @@ nfsd(void *vrqstp)
496 /* Lock the export hash tables for reading. */ 496 /* Lock the export hash tables for reading. */
497 exp_readlock(); 497 exp_readlock();
498 498
499 validate_process_creds();
499 svc_process(rqstp); 500 svc_process(rqstp);
501 validate_process_creds();
500 502
501 /* Unlock export hash tables */ 503 /* Unlock export hash tables */
502 exp_readunlock(); 504 exp_readunlock();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 23341c1063bc..8fa09bfbcba7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -684,6 +684,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
684 __be32 err; 684 __be32 err;
685 int host_err; 685 int host_err;
686 686
687 validate_process_creds();
688
687 /* 689 /*
688 * If we get here, then the client has already done an "open", 690 * If we get here, then the client has already done an "open",
689 * and (hopefully) checked permission - so allow OWNER_OVERRIDE 691 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
@@ -740,6 +742,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
740out_nfserr: 742out_nfserr:
741 err = nfserrno(host_err); 743 err = nfserrno(host_err);
742out: 744out:
745 validate_process_creds();
743 return err; 746 return err;
744} 747}
745 748
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 7e0b61be212e..c668bca579c1 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -209,6 +209,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
209 * We cannot call radix_tree_preload for the kernels older 209 * We cannot call radix_tree_preload for the kernels older
210 * than 2.6.23, because it is not exported for modules. 210 * than 2.6.23, because it is not exported for modules.
211 */ 211 */
212retry:
212 err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); 213 err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
213 if (err) 214 if (err)
214 goto failed_unlock; 215 goto failed_unlock;
@@ -219,7 +220,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
219 (unsigned long long)oldkey, 220 (unsigned long long)oldkey,
220 (unsigned long long)newkey); 221 (unsigned long long)newkey);
221 222
222retry:
223 spin_lock_irq(&btnc->tree_lock); 223 spin_lock_irq(&btnc->tree_lock);
224 err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page); 224 err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page);
225 spin_unlock_irq(&btnc->tree_lock); 225 spin_unlock_irq(&btnc->tree_lock);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b401654011a2..8a1e61545f41 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1747,8 +1747,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1747 * we know zeros will only be needed in the first and/or last cluster. 1747 * we know zeros will only be needed in the first and/or last cluster.
1748 */ 1748 */
1749 if (clusters_to_alloc || extents_to_split || 1749 if (clusters_to_alloc || extents_to_split ||
1750 wc->w_desc[0].c_needs_zero || 1750 (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1751 wc->w_desc[wc->w_clen - 1].c_needs_zero) 1751 wc->w_desc[wc->w_clen - 1].c_needs_zero)))
1752 cluster_of_pages = 1; 1752 cluster_of_pages = 1;
1753 else 1753 else
1754 cluster_of_pages = 0; 1754 cluster_of_pages = 0;
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 2f28b7de2c8d..b4957c7d9fe2 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -85,6 +85,17 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
85 goto bail; 85 goto bail;
86 } 86 }
87 87
88 /*
89 * If the last lookup failed to create dentry lock, let us
90 * redo it.
91 */
92 if (!dentry->d_fsdata) {
93 mlog(0, "Inode %llu doesn't have dentry lock, "
94 "returning false\n",
95 (unsigned long long)OCFS2_I(inode)->ip_blkno);
96 goto bail;
97 }
98
88 ret = 1; 99 ret = 1;
89 100
90bail: 101bail:
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 1c9efb406a96..02bf17808bdc 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -325,6 +325,7 @@ clear_fields:
325} 325}
326 326
327static struct backing_dev_info dlmfs_backing_dev_info = { 327static struct backing_dev_info dlmfs_backing_dev_info = {
328 .name = "ocfs2-dlmfs",
328 .ra_pages = 0, /* No readahead */ 329 .ra_pages = 0, /* No readahead */
329 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 330 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
330}; 331};
diff --git a/fs/open.c b/fs/open.c
index dd98e8076024..31191bf513e4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -199,7 +199,7 @@ out:
199int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, 199int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
200 struct file *filp) 200 struct file *filp)
201{ 201{
202 int err; 202 int ret;
203 struct iattr newattrs; 203 struct iattr newattrs;
204 204
205 /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */ 205 /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */
@@ -214,12 +214,14 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
214 } 214 }
215 215
216 /* Remove suid/sgid on truncate too */ 216 /* Remove suid/sgid on truncate too */
217 newattrs.ia_valid |= should_remove_suid(dentry); 217 ret = should_remove_suid(dentry);
218 if (ret)
219 newattrs.ia_valid |= ret | ATTR_FORCE;
218 220
219 mutex_lock(&dentry->d_inode->i_mutex); 221 mutex_lock(&dentry->d_inode->i_mutex);
220 err = notify_change(dentry, &newattrs); 222 ret = notify_change(dentry, &newattrs);
221 mutex_unlock(&dentry->d_inode->i_mutex); 223 mutex_unlock(&dentry->d_inode->i_mutex);
222 return err; 224 return ret;
223} 225}
224 226
225static long do_sys_truncate(const char __user *pathname, loff_t length) 227static long do_sys_truncate(const char __user *pathname, loff_t length)
@@ -957,6 +959,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
957 int error; 959 int error;
958 struct file *f; 960 struct file *f;
959 961
962 validate_creds(cred);
963
960 /* 964 /*
961 * We must always pass in a valid mount pointer. Historically 965 * We must always pass in a valid mount pointer. Historically
962 * callers got away with not passing it, but we must enforce this at 966 * callers got away with not passing it, but we must enforce this at
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 0ff7566c767c..a7f0110fca4c 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -46,6 +46,7 @@ static const struct super_operations ramfs_ops;
46static const struct inode_operations ramfs_dir_inode_operations; 46static const struct inode_operations ramfs_dir_inode_operations;
47 47
48static struct backing_dev_info ramfs_backing_dev_info = { 48static struct backing_dev_info ramfs_backing_dev_info = {
49 .name = "ramfs",
49 .ra_pages = 0, /* No readahead */ 50 .ra_pages = 0, /* No readahead */
50 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | 51 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
51 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | 52 BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
diff --git a/fs/super.c b/fs/super.c
index 2761d3e22ed9..9cda337ddae2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -62,9 +62,6 @@ static struct super_block *alloc_super(struct file_system_type *type)
62 s = NULL; 62 s = NULL;
63 goto out; 63 goto out;
64 } 64 }
65 INIT_LIST_HEAD(&s->s_dirty);
66 INIT_LIST_HEAD(&s->s_io);
67 INIT_LIST_HEAD(&s->s_more_io);
68 INIT_LIST_HEAD(&s->s_files); 65 INIT_LIST_HEAD(&s->s_files);
69 INIT_LIST_HEAD(&s->s_instances); 66 INIT_LIST_HEAD(&s->s_instances);
70 INIT_HLIST_HEAD(&s->s_anon); 67 INIT_HLIST_HEAD(&s->s_anon);
@@ -171,7 +168,7 @@ int __put_super_and_need_restart(struct super_block *sb)
171 * Drops a temporary reference, frees superblock if there's no 168 * Drops a temporary reference, frees superblock if there's no
172 * references left. 169 * references left.
173 */ 170 */
174static void put_super(struct super_block *sb) 171void put_super(struct super_block *sb)
175{ 172{
176 spin_lock(&sb_lock); 173 spin_lock(&sb_lock);
177 __put_super(sb); 174 __put_super(sb);
diff --git a/fs/sync.c b/fs/sync.c
index 3422ba61d86d..103cc7fdd3df 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -19,20 +19,22 @@
19 SYNC_FILE_RANGE_WAIT_AFTER) 19 SYNC_FILE_RANGE_WAIT_AFTER)
20 20
21/* 21/*
22 * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) 22 * Do the filesystem syncing work. For simple filesystems
23 * just dirties buffers with inodes so we have to submit IO for these buffers 23 * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
24 * via __sync_blockdev(). This also speeds up the wait == 1 case since in that 24 * submit IO for these buffers via __sync_blockdev(). This also speeds up the
25 * case write_inode() functions do sync_dirty_buffer() and thus effectively 25 * wait == 1 case since in that case write_inode() functions do
26 * write one block at a time. 26 * sync_dirty_buffer() and thus effectively write one block at a time.
27 */ 27 */
28static int __sync_filesystem(struct super_block *sb, int wait) 28static int __sync_filesystem(struct super_block *sb, int wait)
29{ 29{
30 /* Avoid doing twice syncing and cache pruning for quota sync */ 30 /* Avoid doing twice syncing and cache pruning for quota sync */
31 if (!wait) 31 if (!wait) {
32 writeout_quota_sb(sb, -1); 32 writeout_quota_sb(sb, -1);
33 else 33 writeback_inodes_sb(sb);
34 } else {
34 sync_quota_sb(sb, -1); 35 sync_quota_sb(sb, -1);
35 sync_inodes_sb(sb, wait); 36 sync_inodes_sb(sb);
37 }
36 if (sb->s_op->sync_fs) 38 if (sb->s_op->sync_fs)
37 sb->s_op->sync_fs(sb, wait); 39 sb->s_op->sync_fs(sb, wait);
38 return __sync_blockdev(sb->s_bdev, wait); 40 return __sync_blockdev(sb->s_bdev, wait);
@@ -118,7 +120,7 @@ restart:
118 */ 120 */
119SYSCALL_DEFINE0(sync) 121SYSCALL_DEFINE0(sync)
120{ 122{
121 wakeup_pdflush(0); 123 wakeup_flusher_threads(0);
122 sync_filesystems(0); 124 sync_filesystems(0);
123 sync_filesystems(1); 125 sync_filesystems(1);
124 if (unlikely(laptop_mode)) 126 if (unlikely(laptop_mode))
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 14f2d71ea3ce..0050fc40e8c9 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -760,6 +760,7 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
760const struct inode_operations sysfs_dir_inode_operations = { 760const struct inode_operations sysfs_dir_inode_operations = {
761 .lookup = sysfs_lookup, 761 .lookup = sysfs_lookup,
762 .setattr = sysfs_setattr, 762 .setattr = sysfs_setattr,
763 .setxattr = sysfs_setxattr,
763}; 764};
764 765
765static void remove_dir(struct sysfs_dirent *sd) 766static void remove_dir(struct sysfs_dirent *sd)
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 555f0ff988df..e28cecf179f5 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/xattr.h>
22#include <linux/security.h>
21#include "sysfs.h" 23#include "sysfs.h"
22 24
23extern struct super_block * sysfs_sb; 25extern struct super_block * sysfs_sb;
@@ -29,12 +31,14 @@ static const struct address_space_operations sysfs_aops = {
29}; 31};
30 32
31static struct backing_dev_info sysfs_backing_dev_info = { 33static struct backing_dev_info sysfs_backing_dev_info = {
34 .name = "sysfs",
32 .ra_pages = 0, /* No readahead */ 35 .ra_pages = 0, /* No readahead */
33 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 36 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
34}; 37};
35 38
36static const struct inode_operations sysfs_inode_operations ={ 39static const struct inode_operations sysfs_inode_operations ={
37 .setattr = sysfs_setattr, 40 .setattr = sysfs_setattr,
41 .setxattr = sysfs_setxattr,
38}; 42};
39 43
40int __init sysfs_inode_init(void) 44int __init sysfs_inode_init(void)
@@ -42,18 +46,37 @@ int __init sysfs_inode_init(void)
42 return bdi_init(&sysfs_backing_dev_info); 46 return bdi_init(&sysfs_backing_dev_info);
43} 47}
44 48
49struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
50{
51 struct sysfs_inode_attrs *attrs;
52 struct iattr *iattrs;
53
54 attrs = kzalloc(sizeof(struct sysfs_inode_attrs), GFP_KERNEL);
55 if (!attrs)
56 return NULL;
57 iattrs = &attrs->ia_iattr;
58
59 /* assign default attributes */
60 iattrs->ia_mode = sd->s_mode;
61 iattrs->ia_uid = 0;
62 iattrs->ia_gid = 0;
63 iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME;
64
65 return attrs;
66}
45int sysfs_setattr(struct dentry * dentry, struct iattr * iattr) 67int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
46{ 68{
47 struct inode * inode = dentry->d_inode; 69 struct inode * inode = dentry->d_inode;
48 struct sysfs_dirent * sd = dentry->d_fsdata; 70 struct sysfs_dirent * sd = dentry->d_fsdata;
49 struct iattr * sd_iattr; 71 struct sysfs_inode_attrs *sd_attrs;
72 struct iattr *iattrs;
50 unsigned int ia_valid = iattr->ia_valid; 73 unsigned int ia_valid = iattr->ia_valid;
51 int error; 74 int error;
52 75
53 if (!sd) 76 if (!sd)
54 return -EINVAL; 77 return -EINVAL;
55 78
56 sd_iattr = sd->s_iattr; 79 sd_attrs = sd->s_iattr;
57 80
58 error = inode_change_ok(inode, iattr); 81 error = inode_change_ok(inode, iattr);
59 if (error) 82 if (error)
@@ -65,42 +88,77 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
65 if (error) 88 if (error)
66 return error; 89 return error;
67 90
68 if (!sd_iattr) { 91 if (!sd_attrs) {
69 /* setting attributes for the first time, allocate now */ 92 /* setting attributes for the first time, allocate now */
70 sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL); 93 sd_attrs = sysfs_init_inode_attrs(sd);
71 if (!sd_iattr) 94 if (!sd_attrs)
72 return -ENOMEM; 95 return -ENOMEM;
73 /* assign default attributes */ 96 sd->s_iattr = sd_attrs;
74 sd_iattr->ia_mode = sd->s_mode; 97 } else {
75 sd_iattr->ia_uid = 0; 98 /* attributes were changed at least once in past */
76 sd_iattr->ia_gid = 0; 99 iattrs = &sd_attrs->ia_iattr;
77 sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME; 100
78 sd->s_iattr = sd_iattr; 101 if (ia_valid & ATTR_UID)
102 iattrs->ia_uid = iattr->ia_uid;
103 if (ia_valid & ATTR_GID)
104 iattrs->ia_gid = iattr->ia_gid;
105 if (ia_valid & ATTR_ATIME)
106 iattrs->ia_atime = timespec_trunc(iattr->ia_atime,
107 inode->i_sb->s_time_gran);
108 if (ia_valid & ATTR_MTIME)
109 iattrs->ia_mtime = timespec_trunc(iattr->ia_mtime,
110 inode->i_sb->s_time_gran);
111 if (ia_valid & ATTR_CTIME)
112 iattrs->ia_ctime = timespec_trunc(iattr->ia_ctime,
113 inode->i_sb->s_time_gran);
114 if (ia_valid & ATTR_MODE) {
115 umode_t mode = iattr->ia_mode;
116
117 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
118 mode &= ~S_ISGID;
119 iattrs->ia_mode = sd->s_mode = mode;
120 }
79 } 121 }
122 return error;
123}
80 124
81 /* attributes were changed atleast once in past */ 125int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
82 126 size_t size, int flags)
83 if (ia_valid & ATTR_UID) 127{
84 sd_iattr->ia_uid = iattr->ia_uid; 128 struct sysfs_dirent *sd = dentry->d_fsdata;
85 if (ia_valid & ATTR_GID) 129 struct sysfs_inode_attrs *iattrs;
86 sd_iattr->ia_gid = iattr->ia_gid; 130 void *secdata;
87 if (ia_valid & ATTR_ATIME) 131 int error;
88 sd_iattr->ia_atime = timespec_trunc(iattr->ia_atime, 132 u32 secdata_len = 0;
89 inode->i_sb->s_time_gran); 133
90 if (ia_valid & ATTR_MTIME) 134 if (!sd)
91 sd_iattr->ia_mtime = timespec_trunc(iattr->ia_mtime, 135 return -EINVAL;
92 inode->i_sb->s_time_gran); 136 if (!sd->s_iattr)
93 if (ia_valid & ATTR_CTIME) 137 sd->s_iattr = sysfs_init_inode_attrs(sd);
94 sd_iattr->ia_ctime = timespec_trunc(iattr->ia_ctime, 138 if (!sd->s_iattr)
95 inode->i_sb->s_time_gran); 139 return -ENOMEM;
96 if (ia_valid & ATTR_MODE) { 140
97 umode_t mode = iattr->ia_mode; 141 iattrs = sd->s_iattr;
98 142
99 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 143 if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
100 mode &= ~S_ISGID; 144 const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
101 sd_iattr->ia_mode = sd->s_mode = mode; 145 error = security_inode_setsecurity(dentry->d_inode, suffix,
102 } 146 value, size, flags);
147 if (error)
148 goto out;
149 error = security_inode_getsecctx(dentry->d_inode,
150 &secdata, &secdata_len);
151 if (error)
152 goto out;
153 if (iattrs->ia_secdata)
154 security_release_secctx(iattrs->ia_secdata,
155 iattrs->ia_secdata_len);
156 iattrs->ia_secdata = secdata;
157 iattrs->ia_secdata_len = secdata_len;
103 158
159 } else
160 return -EINVAL;
161out:
104 return error; 162 return error;
105} 163}
106 164
@@ -146,6 +204,7 @@ static int sysfs_count_nlink(struct sysfs_dirent *sd)
146static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode) 204static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
147{ 205{
148 struct bin_attribute *bin_attr; 206 struct bin_attribute *bin_attr;
207 struct sysfs_inode_attrs *iattrs;
149 208
150 inode->i_private = sysfs_get(sd); 209 inode->i_private = sysfs_get(sd);
151 inode->i_mapping->a_ops = &sysfs_aops; 210 inode->i_mapping->a_ops = &sysfs_aops;
@@ -154,16 +213,20 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
154 inode->i_ino = sd->s_ino; 213 inode->i_ino = sd->s_ino;
155 lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key); 214 lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key);
156 215
157 if (sd->s_iattr) { 216 iattrs = sd->s_iattr;
217 if (iattrs) {
158 /* sysfs_dirent has non-default attributes 218 /* sysfs_dirent has non-default attributes
159 * get them for the new inode from persistent copy 219 * get them for the new inode from persistent copy
160 * in sysfs_dirent 220 * in sysfs_dirent
161 */ 221 */
162 set_inode_attr(inode, sd->s_iattr); 222 set_inode_attr(inode, &iattrs->ia_iattr);
223 if (iattrs->ia_secdata)
224 security_inode_notifysecctx(inode,
225 iattrs->ia_secdata,
226 iattrs->ia_secdata_len);
163 } else 227 } else
164 set_default_inode_attr(inode, sd->s_mode); 228 set_default_inode_attr(inode, sd->s_mode);
165 229
166
167 /* initialize inode according to type */ 230 /* initialize inode according to type */
168 switch (sysfs_type(sd)) { 231 switch (sysfs_type(sd)) {
169 case SYSFS_DIR: 232 case SYSFS_DIR:
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 1d897ad808e0..c5081ad77026 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -16,6 +16,7 @@
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/namei.h> 17#include <linux/namei.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/security.h>
19 20
20#include "sysfs.h" 21#include "sysfs.h"
21 22
@@ -209,6 +210,7 @@ static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *co
209} 210}
210 211
211const struct inode_operations sysfs_symlink_inode_operations = { 212const struct inode_operations sysfs_symlink_inode_operations = {
213 .setxattr = sysfs_setxattr,
212 .readlink = generic_readlink, 214 .readlink = generic_readlink,
213 .follow_link = sysfs_follow_link, 215 .follow_link = sysfs_follow_link,
214 .put_link = sysfs_put_link, 216 .put_link = sysfs_put_link,
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3fa0d98481e2..af4c4e7482ac 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -8,6 +8,8 @@
8 * This file is released under the GPLv2. 8 * This file is released under the GPLv2.
9 */ 9 */
10 10
11#include <linux/fs.h>
12
11struct sysfs_open_dirent; 13struct sysfs_open_dirent;
12 14
13/* type-specific structures for sysfs_dirent->s_* union members */ 15/* type-specific structures for sysfs_dirent->s_* union members */
@@ -31,6 +33,12 @@ struct sysfs_elem_bin_attr {
31 struct hlist_head buffers; 33 struct hlist_head buffers;
32}; 34};
33 35
36struct sysfs_inode_attrs {
37 struct iattr ia_iattr;
38 void *ia_secdata;
39 u32 ia_secdata_len;
40};
41
34/* 42/*
35 * sysfs_dirent - the building block of sysfs hierarchy. Each and 43 * sysfs_dirent - the building block of sysfs hierarchy. Each and
36 * every sysfs node is represented by single sysfs_dirent. 44 * every sysfs node is represented by single sysfs_dirent.
@@ -56,7 +64,7 @@ struct sysfs_dirent {
56 unsigned int s_flags; 64 unsigned int s_flags;
57 ino_t s_ino; 65 ino_t s_ino;
58 umode_t s_mode; 66 umode_t s_mode;
59 struct iattr *s_iattr; 67 struct sysfs_inode_attrs *s_iattr;
60}; 68};
61 69
62#define SD_DEACTIVATED_BIAS INT_MIN 70#define SD_DEACTIVATED_BIAS INT_MIN
@@ -148,6 +156,8 @@ static inline void __sysfs_put(struct sysfs_dirent *sd)
148struct inode *sysfs_get_inode(struct sysfs_dirent *sd); 156struct inode *sysfs_get_inode(struct sysfs_dirent *sd);
149void sysfs_delete_inode(struct inode *inode); 157void sysfs_delete_inode(struct inode *inode);
150int sysfs_setattr(struct dentry *dentry, struct iattr *iattr); 158int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
159int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
160 size_t size, int flags);
151int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name); 161int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const char *name);
152int sysfs_inode_init(void); 162int sysfs_inode_init(void);
153 163
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index eaf6d891d46f..1c8991b0db13 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -65,26 +65,14 @@
65static int shrink_liability(struct ubifs_info *c, int nr_to_write) 65static int shrink_liability(struct ubifs_info *c, int nr_to_write)
66{ 66{
67 int nr_written; 67 int nr_written;
68 struct writeback_control wbc = {
69 .sync_mode = WB_SYNC_NONE,
70 .range_end = LLONG_MAX,
71 .nr_to_write = nr_to_write,
72 };
73
74 generic_sync_sb_inodes(c->vfs_sb, &wbc);
75 nr_written = nr_to_write - wbc.nr_to_write;
76 68
69 nr_written = writeback_inodes_sb(c->vfs_sb);
77 if (!nr_written) { 70 if (!nr_written) {
78 /* 71 /*
79 * Re-try again but wait on pages/inodes which are being 72 * Re-try again but wait on pages/inodes which are being
80 * written-back concurrently (e.g., by pdflush). 73 * written-back concurrently (e.g., by pdflush).
81 */ 74 */
82 memset(&wbc, 0, sizeof(struct writeback_control)); 75 nr_written = sync_inodes_sb(c->vfs_sb);
83 wbc.sync_mode = WB_SYNC_ALL;
84 wbc.range_end = LLONG_MAX;
85 wbc.nr_to_write = nr_to_write;
86 generic_sync_sb_inodes(c->vfs_sb, &wbc);
87 nr_written = nr_to_write - wbc.nr_to_write;
88 } 76 }
89 77
90 dbg_budg("%d pages were written back", nr_written); 78 dbg_budg("%d pages were written back", nr_written);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 26d2e0d80465..51763aa8f4de 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -438,12 +438,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
438{ 438{
439 int i, err; 439 int i, err;
440 struct ubifs_info *c = sb->s_fs_info; 440 struct ubifs_info *c = sb->s_fs_info;
441 struct writeback_control wbc = {
442 .sync_mode = WB_SYNC_ALL,
443 .range_start = 0,
444 .range_end = LLONG_MAX,
445 .nr_to_write = LONG_MAX,
446 };
447 441
448 /* 442 /*
449 * Zero @wait is just an advisory thing to help the file system shove 443 * Zero @wait is just an advisory thing to help the file system shove
@@ -462,7 +456,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
462 * the user be able to get more accurate results of 'statfs()' after 456 * the user be able to get more accurate results of 'statfs()' after
463 * they synchronize the file system. 457 * they synchronize the file system.
464 */ 458 */
465 generic_sync_sb_inodes(sb, &wbc); 459 sync_inodes_sb(sb);
466 460
467 /* 461 /*
468 * Synchronize write buffers, because 'ubifs_run_commit()' does not 462 * Synchronize write buffers, because 'ubifs_run_commit()' does not
@@ -1971,6 +1965,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
1971 * 1965 *
1972 * Read-ahead will be disabled because @c->bdi.ra_pages is 0. 1966 * Read-ahead will be disabled because @c->bdi.ra_pages is 0.
1973 */ 1967 */
1968 c->bdi.name = "ubifs",
1974 c->bdi.capabilities = BDI_CAP_MAP_COPY; 1969 c->bdi.capabilities = BDI_CAP_MAP_COPY;
1975 c->bdi.unplug_io_fn = default_unplug_io_fn; 1970 c->bdi.unplug_io_fn = default_unplug_io_fn;
1976 err = bdi_init(&c->bdi); 1971 err = bdi_init(&c->bdi);
diff --git a/fs/xattr.c b/fs/xattr.c
index 1c3d0af59ddf..6d4f6d3449fb 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -66,22 +66,28 @@ xattr_permission(struct inode *inode, const char *name, int mask)
66 return inode_permission(inode, mask); 66 return inode_permission(inode, mask);
67} 67}
68 68
69int 69/**
70vfs_setxattr(struct dentry *dentry, const char *name, const void *value, 70 * __vfs_setxattr_noperm - perform setxattr operation without performing
71 size_t size, int flags) 71 * permission checks.
72 *
73 * @dentry - object to perform setxattr on
74 * @name - xattr name to set
75 * @value - value to set @name to
76 * @size - size of @value
77 * @flags - flags to pass into filesystem operations
78 *
79 * returns the result of the internal setxattr or setsecurity operations.
80 *
81 * This function requires the caller to lock the inode's i_mutex before it
82 * is executed. It also assumes that the caller will make the appropriate
83 * permission checks.
84 */
85int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
86 const void *value, size_t size, int flags)
72{ 87{
73 struct inode *inode = dentry->d_inode; 88 struct inode *inode = dentry->d_inode;
74 int error; 89 int error = -EOPNOTSUPP;
75
76 error = xattr_permission(inode, name, MAY_WRITE);
77 if (error)
78 return error;
79 90
80 mutex_lock(&inode->i_mutex);
81 error = security_inode_setxattr(dentry, name, value, size, flags);
82 if (error)
83 goto out;
84 error = -EOPNOTSUPP;
85 if (inode->i_op->setxattr) { 91 if (inode->i_op->setxattr) {
86 error = inode->i_op->setxattr(dentry, name, value, size, flags); 92 error = inode->i_op->setxattr(dentry, name, value, size, flags);
87 if (!error) { 93 if (!error) {
@@ -97,6 +103,29 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
97 if (!error) 103 if (!error)
98 fsnotify_xattr(dentry); 104 fsnotify_xattr(dentry);
99 } 105 }
106
107 return error;
108}
109
110
111int
112vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
113 size_t size, int flags)
114{
115 struct inode *inode = dentry->d_inode;
116 int error;
117
118 error = xattr_permission(inode, name, MAY_WRITE);
119 if (error)
120 return error;
121
122 mutex_lock(&inode->i_mutex);
123 error = security_inode_setxattr(dentry, name, value, size, flags);
124 if (error)
125 goto out;
126
127 error = __vfs_setxattr_noperm(dentry, name, value, size, flags);
128
100out: 129out:
101 mutex_unlock(&inode->i_mutex); 130 mutex_unlock(&inode->i_mutex);
102 return error; 131 return error;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0882d166239a..eafcc7c18706 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -619,7 +619,7 @@ xfs_file_compat_ioctl(
619 case XFS_IOC_GETVERSION_32: 619 case XFS_IOC_GETVERSION_32:
620 cmd = _NATIVE_IOC(cmd, long); 620 cmd = _NATIVE_IOC(cmd, long);
621 return xfs_file_ioctl(filp, cmd, p); 621 return xfs_file_ioctl(filp, cmd, p);
622 case XFS_IOC_SWAPEXT: { 622 case XFS_IOC_SWAPEXT_32: {
623 struct xfs_swapext sxp; 623 struct xfs_swapext sxp;
624 struct compat_xfs_swapext __user *sxu = arg; 624 struct compat_xfs_swapext __user *sxu = arg;
625 625
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 8070b34cc287..6c32f1d63d8c 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -485,14 +485,6 @@ xfs_vn_put_link(
485} 485}
486 486
487STATIC int 487STATIC int
488xfs_vn_permission(
489 struct inode *inode,
490 int mask)
491{
492 return generic_permission(inode, mask, xfs_check_acl);
493}
494
495STATIC int
496xfs_vn_getattr( 488xfs_vn_getattr(
497 struct vfsmount *mnt, 489 struct vfsmount *mnt,
498 struct dentry *dentry, 490 struct dentry *dentry,
@@ -696,7 +688,7 @@ xfs_vn_fiemap(
696} 688}
697 689
698static const struct inode_operations xfs_inode_operations = { 690static const struct inode_operations xfs_inode_operations = {
699 .permission = xfs_vn_permission, 691 .check_acl = xfs_check_acl,
700 .truncate = xfs_vn_truncate, 692 .truncate = xfs_vn_truncate,
701 .getattr = xfs_vn_getattr, 693 .getattr = xfs_vn_getattr,
702 .setattr = xfs_vn_setattr, 694 .setattr = xfs_vn_setattr,
@@ -724,7 +716,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
724 .rmdir = xfs_vn_unlink, 716 .rmdir = xfs_vn_unlink,
725 .mknod = xfs_vn_mknod, 717 .mknod = xfs_vn_mknod,
726 .rename = xfs_vn_rename, 718 .rename = xfs_vn_rename,
727 .permission = xfs_vn_permission, 719 .check_acl = xfs_check_acl,
728 .getattr = xfs_vn_getattr, 720 .getattr = xfs_vn_getattr,
729 .setattr = xfs_vn_setattr, 721 .setattr = xfs_vn_setattr,
730 .setxattr = generic_setxattr, 722 .setxattr = generic_setxattr,
@@ -749,7 +741,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
749 .rmdir = xfs_vn_unlink, 741 .rmdir = xfs_vn_unlink,
750 .mknod = xfs_vn_mknod, 742 .mknod = xfs_vn_mknod,
751 .rename = xfs_vn_rename, 743 .rename = xfs_vn_rename,
752 .permission = xfs_vn_permission, 744 .check_acl = xfs_check_acl,
753 .getattr = xfs_vn_getattr, 745 .getattr = xfs_vn_getattr,
754 .setattr = xfs_vn_setattr, 746 .setattr = xfs_vn_setattr,
755 .setxattr = generic_setxattr, 747 .setxattr = generic_setxattr,
@@ -762,7 +754,7 @@ static const struct inode_operations xfs_symlink_inode_operations = {
762 .readlink = generic_readlink, 754 .readlink = generic_readlink,
763 .follow_link = xfs_vn_follow_link, 755 .follow_link = xfs_vn_follow_link,
764 .put_link = xfs_vn_put_link, 756 .put_link = xfs_vn_put_link,
765 .permission = xfs_vn_permission, 757 .check_acl = xfs_check_acl,
766 .getattr = xfs_vn_getattr, 758 .getattr = xfs_vn_getattr,
767 .setattr = xfs_vn_setattr, 759 .setattr = xfs_vn_setattr,
768 .setxattr = generic_setxattr, 760 .setxattr = generic_setxattr,
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 5406a601185c..e694263445f7 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -103,7 +103,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
103 if (ops->sync_single_for_cpu) 103 if (ops->sync_single_for_cpu)
104 ops->sync_single_for_cpu(dev, addr, size, dir); 104 ops->sync_single_for_cpu(dev, addr, size, dir);
105 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 105 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106 flush_write_buffers();
107} 106}
108 107
109static inline void dma_sync_single_for_device(struct device *dev, 108static inline void dma_sync_single_for_device(struct device *dev,
@@ -116,7 +115,6 @@ static inline void dma_sync_single_for_device(struct device *dev,
116 if (ops->sync_single_for_device) 115 if (ops->sync_single_for_device)
117 ops->sync_single_for_device(dev, addr, size, dir); 116 ops->sync_single_for_device(dev, addr, size, dir);
118 debug_dma_sync_single_for_device(dev, addr, size, dir); 117 debug_dma_sync_single_for_device(dev, addr, size, dir);
119 flush_write_buffers();
120} 118}
121 119
122static inline void dma_sync_single_range_for_cpu(struct device *dev, 120static inline void dma_sync_single_range_for_cpu(struct device *dev,
@@ -132,7 +130,6 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
132 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir); 130 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
133 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 131 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
134 132
135 flush_write_buffers();
136 } else 133 } else
137 dma_sync_single_for_cpu(dev, addr, size, dir); 134 dma_sync_single_for_cpu(dev, addr, size, dir);
138} 135}
@@ -150,7 +147,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
150 ops->sync_single_range_for_device(dev, addr, offset, size, dir); 147 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
151 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 148 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152 149
153 flush_write_buffers();
154 } else 150 } else
155 dma_sync_single_for_device(dev, addr, size, dir); 151 dma_sync_single_for_device(dev, addr, size, dir);
156} 152}
@@ -165,7 +161,6 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
165 if (ops->sync_sg_for_cpu) 161 if (ops->sync_sg_for_cpu)
166 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 162 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
167 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 163 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168 flush_write_buffers();
169} 164}
170 165
171static inline void 166static inline void
@@ -179,7 +174,6 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
179 ops->sync_sg_for_device(dev, sg, nelems, dir); 174 ops->sync_sg_for_device(dev, sg, nelems, dir);
180 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 175 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
181 176
182 flush_write_buffers();
183} 177}
184 178
185#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 179#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 010545436efa..1ffb53f74d37 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -22,11 +22,9 @@ struct seq_file;
22 22
23struct crypto_type { 23struct crypto_type {
24 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); 24 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
25 unsigned int (*extsize)(struct crypto_alg *alg, 25 unsigned int (*extsize)(struct crypto_alg *alg);
26 const struct crypto_type *frontend);
27 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); 26 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
28 int (*init_tfm)(struct crypto_tfm *tfm, 27 int (*init_tfm)(struct crypto_tfm *tfm);
29 const struct crypto_type *frontend);
30 void (*show)(struct seq_file *m, struct crypto_alg *alg); 28 void (*show)(struct seq_file *m, struct crypto_alg *alg);
31 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); 29 struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
32 30
@@ -52,6 +50,7 @@ struct crypto_template {
52 50
53 struct crypto_instance *(*alloc)(struct rtattr **tb); 51 struct crypto_instance *(*alloc)(struct rtattr **tb);
54 void (*free)(struct crypto_instance *inst); 52 void (*free)(struct crypto_instance *inst);
53 int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
55 54
56 char name[CRYPTO_MAX_ALG_NAME]; 55 char name[CRYPTO_MAX_ALG_NAME];
57}; 56};
@@ -60,6 +59,7 @@ struct crypto_spawn {
60 struct list_head list; 59 struct list_head list;
61 struct crypto_alg *alg; 60 struct crypto_alg *alg;
62 struct crypto_instance *inst; 61 struct crypto_instance *inst;
62 const struct crypto_type *frontend;
63 u32 mask; 63 u32 mask;
64}; 64};
65 65
@@ -114,11 +114,19 @@ int crypto_register_template(struct crypto_template *tmpl);
114void crypto_unregister_template(struct crypto_template *tmpl); 114void crypto_unregister_template(struct crypto_template *tmpl);
115struct crypto_template *crypto_lookup_template(const char *name); 115struct crypto_template *crypto_lookup_template(const char *name);
116 116
117int crypto_register_instance(struct crypto_template *tmpl,
118 struct crypto_instance *inst);
119
117int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, 120int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
118 struct crypto_instance *inst, u32 mask); 121 struct crypto_instance *inst, u32 mask);
122int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
123 struct crypto_instance *inst,
124 const struct crypto_type *frontend);
125
119void crypto_drop_spawn(struct crypto_spawn *spawn); 126void crypto_drop_spawn(struct crypto_spawn *spawn);
120struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, 127struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
121 u32 mask); 128 u32 mask);
129void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
122 130
123static inline void crypto_set_spawn(struct crypto_spawn *spawn, 131static inline void crypto_set_spawn(struct crypto_spawn *spawn,
124 struct crypto_instance *inst) 132 struct crypto_instance *inst)
@@ -129,14 +137,26 @@ static inline void crypto_set_spawn(struct crypto_spawn *spawn,
129struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); 137struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
130int crypto_check_attr_type(struct rtattr **tb, u32 type); 138int crypto_check_attr_type(struct rtattr **tb, u32 type);
131const char *crypto_attr_alg_name(struct rtattr *rta); 139const char *crypto_attr_alg_name(struct rtattr *rta);
132struct crypto_alg *crypto_attr_alg(struct rtattr *rta, u32 type, u32 mask); 140struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
141 const struct crypto_type *frontend,
142 u32 type, u32 mask);
143
144static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
145 u32 type, u32 mask)
146{
147 return crypto_attr_alg2(rta, NULL, type, mask);
148}
149
133int crypto_attr_u32(struct rtattr *rta, u32 *num); 150int crypto_attr_u32(struct rtattr *rta, u32 *num);
151void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
152 unsigned int head);
134struct crypto_instance *crypto_alloc_instance(const char *name, 153struct crypto_instance *crypto_alloc_instance(const char *name,
135 struct crypto_alg *alg); 154 struct crypto_alg *alg);
136 155
137void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); 156void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
138int crypto_enqueue_request(struct crypto_queue *queue, 157int crypto_enqueue_request(struct crypto_queue *queue,
139 struct crypto_async_request *request); 158 struct crypto_async_request *request);
159void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
140struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 160struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
141int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); 161int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
142 162
@@ -156,12 +176,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
156 176
157static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 177static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
158{ 178{
159 unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); 179 return PTR_ALIGN(crypto_tfm_ctx(tfm),
160 unsigned long align = crypto_tfm_alg_alignmask(tfm); 180 crypto_tfm_alg_alignmask(tfm) + 1);
161
162 if (align <= crypto_tfm_ctx_alignment())
163 align = 1;
164 return (void *)ALIGN(addr, align);
165} 181}
166 182
167static inline struct crypto_instance *crypto_tfm_alg_instance( 183static inline struct crypto_instance *crypto_tfm_alg_instance(
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 55fa7bbdbc71..2f65a6e8ea4d 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -7,6 +7,7 @@
7 7
8#include <linux/crypto.h> 8#include <linux/crypto.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <crypto/hash.h>
10 11
11struct cryptd_ablkcipher { 12struct cryptd_ablkcipher {
12 struct crypto_ablkcipher base; 13 struct crypto_ablkcipher base;
@@ -24,4 +25,20 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
24struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm); 25struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
25void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm); 26void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
26 27
28struct cryptd_ahash {
29 struct crypto_ahash base;
30};
31
32static inline struct cryptd_ahash *__cryptd_ahash_cast(
33 struct crypto_ahash *tfm)
34{
35 return (struct cryptd_ahash *)tfm;
36}
37
38/* alg_name should be algorithm to be cryptd-ed */
39struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
40 u32 type, u32 mask);
41struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
42void cryptd_free_ahash(struct cryptd_ahash *tfm);
43
27#endif 44#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d56bb71617c3..26cb1eb16f4c 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -15,6 +15,42 @@
15 15
16#include <linux/crypto.h> 16#include <linux/crypto.h>
17 17
18struct crypto_ahash;
19
20struct hash_alg_common {
21 unsigned int digestsize;
22 unsigned int statesize;
23
24 struct crypto_alg base;
25};
26
27struct ahash_request {
28 struct crypto_async_request base;
29
30 unsigned int nbytes;
31 struct scatterlist *src;
32 u8 *result;
33
34 /* This field may only be used by the ahash API code. */
35 void *priv;
36
37 void *__ctx[] CRYPTO_MINALIGN_ATTR;
38};
39
40struct ahash_alg {
41 int (*init)(struct ahash_request *req);
42 int (*update)(struct ahash_request *req);
43 int (*final)(struct ahash_request *req);
44 int (*finup)(struct ahash_request *req);
45 int (*digest)(struct ahash_request *req);
46 int (*export)(struct ahash_request *req, void *out);
47 int (*import)(struct ahash_request *req, const void *in);
48 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
49 unsigned int keylen);
50
51 struct hash_alg_common halg;
52};
53
18struct shash_desc { 54struct shash_desc {
19 struct crypto_shash *tfm; 55 struct crypto_shash *tfm;
20 u32 flags; 56 u32 flags;
@@ -24,7 +60,6 @@ struct shash_desc {
24 60
25struct shash_alg { 61struct shash_alg {
26 int (*init)(struct shash_desc *desc); 62 int (*init)(struct shash_desc *desc);
27 int (*reinit)(struct shash_desc *desc);
28 int (*update)(struct shash_desc *desc, const u8 *data, 63 int (*update)(struct shash_desc *desc, const u8 *data,
29 unsigned int len); 64 unsigned int len);
30 int (*final)(struct shash_desc *desc, u8 *out); 65 int (*final)(struct shash_desc *desc, u8 *out);
@@ -32,38 +67,48 @@ struct shash_alg {
32 unsigned int len, u8 *out); 67 unsigned int len, u8 *out);
33 int (*digest)(struct shash_desc *desc, const u8 *data, 68 int (*digest)(struct shash_desc *desc, const u8 *data,
34 unsigned int len, u8 *out); 69 unsigned int len, u8 *out);
70 int (*export)(struct shash_desc *desc, void *out);
71 int (*import)(struct shash_desc *desc, const void *in);
35 int (*setkey)(struct crypto_shash *tfm, const u8 *key, 72 int (*setkey)(struct crypto_shash *tfm, const u8 *key,
36 unsigned int keylen); 73 unsigned int keylen);
37 74
38 unsigned int descsize; 75 unsigned int descsize;
39 unsigned int digestsize; 76
77 /* These fields must match hash_alg_common. */
78 unsigned int digestsize
79 __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
80 unsigned int statesize;
40 81
41 struct crypto_alg base; 82 struct crypto_alg base;
42}; 83};
43 84
44struct crypto_ahash { 85struct crypto_ahash {
86 int (*init)(struct ahash_request *req);
87 int (*update)(struct ahash_request *req);
88 int (*final)(struct ahash_request *req);
89 int (*finup)(struct ahash_request *req);
90 int (*digest)(struct ahash_request *req);
91 int (*export)(struct ahash_request *req, void *out);
92 int (*import)(struct ahash_request *req, const void *in);
93 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
94 unsigned int keylen);
95
96 unsigned int reqsize;
45 struct crypto_tfm base; 97 struct crypto_tfm base;
46}; 98};
47 99
48struct crypto_shash { 100struct crypto_shash {
101 unsigned int descsize;
49 struct crypto_tfm base; 102 struct crypto_tfm base;
50}; 103};
51 104
52static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm) 105static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
53{ 106{
54 return (struct crypto_ahash *)tfm; 107 return container_of(tfm, struct crypto_ahash, base);
55} 108}
56 109
57static inline struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, 110struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
58 u32 type, u32 mask) 111 u32 mask);
59{
60 type &= ~CRYPTO_ALG_TYPE_MASK;
61 mask &= ~CRYPTO_ALG_TYPE_MASK;
62 type |= CRYPTO_ALG_TYPE_AHASH;
63 mask |= CRYPTO_ALG_TYPE_AHASH_MASK;
64
65 return __crypto_ahash_cast(crypto_alloc_base(alg_name, type, mask));
66}
67 112
68static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) 113static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
69{ 114{
@@ -72,7 +117,7 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
72 117
73static inline void crypto_free_ahash(struct crypto_ahash *tfm) 118static inline void crypto_free_ahash(struct crypto_ahash *tfm)
74{ 119{
75 crypto_free_tfm(crypto_ahash_tfm(tfm)); 120 crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
76} 121}
77 122
78static inline unsigned int crypto_ahash_alignmask( 123static inline unsigned int crypto_ahash_alignmask(
@@ -81,14 +126,26 @@ static inline unsigned int crypto_ahash_alignmask(
81 return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm)); 126 return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
82} 127}
83 128
84static inline struct ahash_tfm *crypto_ahash_crt(struct crypto_ahash *tfm) 129static inline struct hash_alg_common *__crypto_hash_alg_common(
130 struct crypto_alg *alg)
131{
132 return container_of(alg, struct hash_alg_common, base);
133}
134
135static inline struct hash_alg_common *crypto_hash_alg_common(
136 struct crypto_ahash *tfm)
85{ 137{
86 return &crypto_ahash_tfm(tfm)->crt_ahash; 138 return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
87} 139}
88 140
89static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) 141static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
90{ 142{
91 return crypto_ahash_crt(tfm)->digestsize; 143 return crypto_hash_alg_common(tfm)->digestsize;
144}
145
146static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
147{
148 return crypto_hash_alg_common(tfm)->statesize;
92} 149}
93 150
94static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm) 151static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -114,7 +171,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm(
114 171
115static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) 172static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
116{ 173{
117 return crypto_ahash_crt(tfm)->reqsize; 174 return tfm->reqsize;
118} 175}
119 176
120static inline void *ahash_request_ctx(struct ahash_request *req) 177static inline void *ahash_request_ctx(struct ahash_request *req)
@@ -122,44 +179,30 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
122 return req->__ctx; 179 return req->__ctx;
123} 180}
124 181
125static inline int crypto_ahash_setkey(struct crypto_ahash *tfm, 182int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
126 const u8 *key, unsigned int keylen) 183 unsigned int keylen);
127{ 184int crypto_ahash_finup(struct ahash_request *req);
128 struct ahash_tfm *crt = crypto_ahash_crt(tfm); 185int crypto_ahash_final(struct ahash_request *req);
129 186int crypto_ahash_digest(struct ahash_request *req);
130 return crt->setkey(tfm, key, keylen);
131}
132 187
133static inline int crypto_ahash_digest(struct ahash_request *req) 188static inline int crypto_ahash_export(struct ahash_request *req, void *out)
134{ 189{
135 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); 190 return crypto_ahash_reqtfm(req)->export(req, out);
136 return crt->digest(req);
137} 191}
138 192
139static inline void crypto_ahash_export(struct ahash_request *req, u8 *out) 193static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
140{ 194{
141 memcpy(out, ahash_request_ctx(req), 195 return crypto_ahash_reqtfm(req)->import(req, in);
142 crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
143} 196}
144 197
145int crypto_ahash_import(struct ahash_request *req, const u8 *in);
146
147static inline int crypto_ahash_init(struct ahash_request *req) 198static inline int crypto_ahash_init(struct ahash_request *req)
148{ 199{
149 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); 200 return crypto_ahash_reqtfm(req)->init(req);
150 return crt->init(req);
151} 201}
152 202
153static inline int crypto_ahash_update(struct ahash_request *req) 203static inline int crypto_ahash_update(struct ahash_request *req)
154{ 204{
155 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req)); 205 return crypto_ahash_reqtfm(req)->update(req);
156 return crt->update(req);
157}
158
159static inline int crypto_ahash_final(struct ahash_request *req)
160{
161 struct ahash_tfm *crt = crypto_ahash_crt(crypto_ahash_reqtfm(req));
162 return crt->final(req);
163} 206}
164 207
165static inline void ahash_request_set_tfm(struct ahash_request *req, 208static inline void ahash_request_set_tfm(struct ahash_request *req,
@@ -184,7 +227,7 @@ static inline struct ahash_request *ahash_request_alloc(
184 227
185static inline void ahash_request_free(struct ahash_request *req) 228static inline void ahash_request_free(struct ahash_request *req)
186{ 229{
187 kfree(req); 230 kzfree(req);
188} 231}
189 232
190static inline struct ahash_request *ahash_request_cast( 233static inline struct ahash_request *ahash_request_cast(
@@ -251,6 +294,11 @@ static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
251 return crypto_shash_alg(tfm)->digestsize; 294 return crypto_shash_alg(tfm)->digestsize;
252} 295}
253 296
297static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
298{
299 return crypto_shash_alg(tfm)->statesize;
300}
301
254static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm) 302static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
255{ 303{
256 return crypto_tfm_get_flags(crypto_shash_tfm(tfm)); 304 return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
@@ -268,7 +316,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
268 316
269static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm) 317static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
270{ 318{
271 return crypto_shash_alg(tfm)->descsize; 319 return tfm->descsize;
272} 320}
273 321
274static inline void *shash_desc_ctx(struct shash_desc *desc) 322static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -281,12 +329,15 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
281int crypto_shash_digest(struct shash_desc *desc, const u8 *data, 329int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
282 unsigned int len, u8 *out); 330 unsigned int len, u8 *out);
283 331
284static inline void crypto_shash_export(struct shash_desc *desc, u8 *out) 332static inline int crypto_shash_export(struct shash_desc *desc, void *out)
285{ 333{
286 memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); 334 return crypto_shash_alg(desc->tfm)->export(desc, out);
287} 335}
288 336
289int crypto_shash_import(struct shash_desc *desc, const u8 *in); 337static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
338{
339 return crypto_shash_alg(desc->tfm)->import(desc, in);
340}
290 341
291static inline int crypto_shash_init(struct shash_desc *desc) 342static inline int crypto_shash_init(struct shash_desc *desc)
292{ 343{
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 82b70564bcab..5bfad8c80595 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -34,6 +34,22 @@ struct crypto_hash_walk {
34 unsigned int flags; 34 unsigned int flags;
35}; 35};
36 36
37struct ahash_instance {
38 struct ahash_alg alg;
39};
40
41struct shash_instance {
42 struct shash_alg alg;
43};
44
45struct crypto_ahash_spawn {
46 struct crypto_spawn base;
47};
48
49struct crypto_shash_spawn {
50 struct crypto_spawn base;
51};
52
37extern const struct crypto_type crypto_ahash_type; 53extern const struct crypto_type crypto_ahash_type;
38 54
39int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); 55int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
@@ -43,18 +59,100 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
43 struct crypto_hash_walk *walk, 59 struct crypto_hash_walk *walk,
44 struct scatterlist *sg, unsigned int len); 60 struct scatterlist *sg, unsigned int len);
45 61
62static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
63{
64 return !(walk->entrylen | walk->total);
65}
66
67int crypto_register_ahash(struct ahash_alg *alg);
68int crypto_unregister_ahash(struct ahash_alg *alg);
69int ahash_register_instance(struct crypto_template *tmpl,
70 struct ahash_instance *inst);
71void ahash_free_instance(struct crypto_instance *inst);
72
73int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
74 struct hash_alg_common *alg,
75 struct crypto_instance *inst);
76
77static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
78{
79 crypto_drop_spawn(&spawn->base);
80}
81
82struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
83
46int crypto_register_shash(struct shash_alg *alg); 84int crypto_register_shash(struct shash_alg *alg);
47int crypto_unregister_shash(struct shash_alg *alg); 85int crypto_unregister_shash(struct shash_alg *alg);
86int shash_register_instance(struct crypto_template *tmpl,
87 struct shash_instance *inst);
88void shash_free_instance(struct crypto_instance *inst);
89
90int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
91 struct shash_alg *alg,
92 struct crypto_instance *inst);
93
94static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
95{
96 crypto_drop_spawn(&spawn->base);
97}
98
99struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
100
101int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
102int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
103int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
104
105int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
48 106
49static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) 107static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
50{ 108{
51 return crypto_tfm_ctx(&tfm->base); 109 return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
110}
111
112static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
113{
114 return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
115 halg);
116}
117
118static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
119 unsigned int reqsize)
120{
121 tfm->reqsize = reqsize;
122}
123
124static inline struct crypto_instance *ahash_crypto_instance(
125 struct ahash_instance *inst)
126{
127 return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
52} 128}
53 129
54static inline struct ahash_alg *crypto_ahash_alg( 130static inline struct ahash_instance *ahash_instance(
55 struct crypto_ahash *tfm) 131 struct crypto_instance *inst)
56{ 132{
57 return &crypto_ahash_tfm(tfm)->__crt_alg->cra_ahash; 133 return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
134}
135
136static inline void *ahash_instance_ctx(struct ahash_instance *inst)
137{
138 return crypto_instance_ctx(ahash_crypto_instance(inst));
139}
140
141static inline unsigned int ahash_instance_headroom(void)
142{
143 return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
144}
145
146static inline struct ahash_instance *ahash_alloc_instance(
147 const char *name, struct crypto_alg *alg)
148{
149 return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
150}
151
152static inline struct crypto_ahash *crypto_spawn_ahash(
153 struct crypto_ahash_spawn *spawn)
154{
155 return crypto_spawn_tfm2(&spawn->base);
58} 156}
59 157
60static inline int ahash_enqueue_request(struct crypto_queue *queue, 158static inline int ahash_enqueue_request(struct crypto_queue *queue,
@@ -80,5 +178,46 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
80 return crypto_tfm_ctx(&tfm->base); 178 return crypto_tfm_ctx(&tfm->base);
81} 179}
82 180
181static inline struct crypto_instance *shash_crypto_instance(
182 struct shash_instance *inst)
183{
184 return container_of(&inst->alg.base, struct crypto_instance, alg);
185}
186
187static inline struct shash_instance *shash_instance(
188 struct crypto_instance *inst)
189{
190 return container_of(__crypto_shash_alg(&inst->alg),
191 struct shash_instance, alg);
192}
193
194static inline void *shash_instance_ctx(struct shash_instance *inst)
195{
196 return crypto_instance_ctx(shash_crypto_instance(inst));
197}
198
199static inline struct shash_instance *shash_alloc_instance(
200 const char *name, struct crypto_alg *alg)
201{
202 return crypto_alloc_instance2(name, alg,
203 sizeof(struct shash_alg) - sizeof(*alg));
204}
205
206static inline struct crypto_shash *crypto_spawn_shash(
207 struct crypto_shash_spawn *spawn)
208{
209 return crypto_spawn_tfm2(&spawn->base);
210}
211
212static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
213{
214 return crypto_tfm_ctx_aligned(&tfm->base);
215}
216
217static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
218{
219 return container_of(tfm, struct crypto_shash, base);
220}
221
83#endif /* _CRYPTO_INTERNAL_HASH_H */ 222#endif /* _CRYPTO_INTERNAL_HASH_H */
84 223
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 2ba42cd7d6aa..3a748a6bf772 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -79,8 +79,8 @@ static inline int skcipher_enqueue_givcrypt(
79static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( 79static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
80 struct crypto_queue *queue) 80 struct crypto_queue *queue)
81{ 81{
82 return container_of(ablkcipher_dequeue_request(queue), 82 return __crypto_dequeue_request(
83 struct skcipher_givcrypt_request, creq); 83 queue, offsetof(struct skcipher_givcrypt_request, creq.base));
84} 84}
85 85
86static inline void *skcipher_givcrypt_reqctx( 86static inline void *skcipher_givcrypt_reqctx(
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index c0ccc2b1a2d8..069e85ba97e1 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -5,6 +5,8 @@
5#ifndef _CRYPTO_SHA_H 5#ifndef _CRYPTO_SHA_H
6#define _CRYPTO_SHA_H 6#define _CRYPTO_SHA_H
7 7
8#include <linux/types.h>
9
8#define SHA1_DIGEST_SIZE 20 10#define SHA1_DIGEST_SIZE 20
9#define SHA1_BLOCK_SIZE 64 11#define SHA1_BLOCK_SIZE 64
10 12
@@ -62,4 +64,22 @@
62#define SHA512_H6 0x1f83d9abfb41bd6bULL 64#define SHA512_H6 0x1f83d9abfb41bd6bULL
63#define SHA512_H7 0x5be0cd19137e2179ULL 65#define SHA512_H7 0x5be0cd19137e2179ULL
64 66
67struct sha1_state {
68 u64 count;
69 u32 state[SHA1_DIGEST_SIZE / 4];
70 u8 buffer[SHA1_BLOCK_SIZE];
71};
72
73struct sha256_state {
74 u64 count;
75 u32 state[SHA256_DIGEST_SIZE / 4];
76 u8 buf[SHA256_BLOCK_SIZE];
77};
78
79struct sha512_state {
80 u64 count[2];
81 u64 state[SHA512_DIGEST_SIZE / 8];
82 u8 buf[SHA512_BLOCK_SIZE];
83};
84
65#endif 85#endif
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
new file mode 100644
index 000000000000..c4467c55df1e
--- /dev/null
+++ b/include/crypto/vmac.h
@@ -0,0 +1,61 @@
1/*
2 * Modified to interface to the Linux kernel
3 * Copyright (c) 2009, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#ifndef __CRYPTO_VMAC_H
20#define __CRYPTO_VMAC_H
21
22/* --------------------------------------------------------------------------
23 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
24 * This implementation is herby placed in the public domain.
25 * The authors offers no warranty. Use at your own risk.
26 * Please send bug reports to the authors.
27 * Last modified: 17 APR 08, 1700 PDT
28 * ----------------------------------------------------------------------- */
29
30/*
31 * User definable settings.
32 */
33#define VMAC_TAG_LEN 64
34#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
35#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
36#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
37
38/*
39 * This implementation uses u32 and u64 as names for unsigned 32-
40 * and 64-bit integer types. These are defined in C99 stdint.h. The
41 * following may need adaptation if you are not running a C99 or
42 * Microsoft C environment.
43 */
44struct vmac_ctx {
45 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
46 u64 polykey[2*VMAC_TAG_LEN/64];
47 u64 l3key[2*VMAC_TAG_LEN/64];
48 u64 polytmp[2*VMAC_TAG_LEN/64];
49 u64 cached_nonce[2];
50 u64 cached_aes[2];
51 int first_block_processed;
52};
53
54typedef u64 vmac_t;
55
56struct vmac_ctx_t {
57 struct crypto_cipher *child;
58 struct vmac_ctx __vmac_ctx;
59};
60
61#endif /* __CRYPTO_VMAC_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 9c75921f0c16..6299a259ed19 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -210,15 +210,25 @@ enum {
210 ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */ 210 ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
211 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */ 211 ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
212 ATA_CMD_EDD = 0x90, /* execute device diagnostic */ 212 ATA_CMD_EDD = 0x90, /* execute device diagnostic */
213 ATA_CMD_DOWNLOAD_MICRO = 0x92,
214 ATA_CMD_NOP = 0x00,
213 ATA_CMD_FLUSH = 0xE7, 215 ATA_CMD_FLUSH = 0xE7,
214 ATA_CMD_FLUSH_EXT = 0xEA, 216 ATA_CMD_FLUSH_EXT = 0xEA,
215 ATA_CMD_ID_ATA = 0xEC, 217 ATA_CMD_ID_ATA = 0xEC,
216 ATA_CMD_ID_ATAPI = 0xA1, 218 ATA_CMD_ID_ATAPI = 0xA1,
219 ATA_CMD_SERVICE = 0xA2,
217 ATA_CMD_READ = 0xC8, 220 ATA_CMD_READ = 0xC8,
218 ATA_CMD_READ_EXT = 0x25, 221 ATA_CMD_READ_EXT = 0x25,
222 ATA_CMD_READ_QUEUED = 0x26,
223 ATA_CMD_READ_STREAM_EXT = 0x2B,
224 ATA_CMD_READ_STREAM_DMA_EXT = 0x2A,
219 ATA_CMD_WRITE = 0xCA, 225 ATA_CMD_WRITE = 0xCA,
220 ATA_CMD_WRITE_EXT = 0x35, 226 ATA_CMD_WRITE_EXT = 0x35,
227 ATA_CMD_WRITE_QUEUED = 0x36,
228 ATA_CMD_WRITE_STREAM_EXT = 0x3B,
229 ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A,
221 ATA_CMD_WRITE_FUA_EXT = 0x3D, 230 ATA_CMD_WRITE_FUA_EXT = 0x3D,
231 ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
222 ATA_CMD_FPDMA_READ = 0x60, 232 ATA_CMD_FPDMA_READ = 0x60,
223 ATA_CMD_FPDMA_WRITE = 0x61, 233 ATA_CMD_FPDMA_WRITE = 0x61,
224 ATA_CMD_PIO_READ = 0x20, 234 ATA_CMD_PIO_READ = 0x20,
@@ -235,6 +245,7 @@ enum {
235 ATA_CMD_PACKET = 0xA0, 245 ATA_CMD_PACKET = 0xA0,
236 ATA_CMD_VERIFY = 0x40, 246 ATA_CMD_VERIFY = 0x40,
237 ATA_CMD_VERIFY_EXT = 0x42, 247 ATA_CMD_VERIFY_EXT = 0x42,
248 ATA_CMD_WRITE_UNCORR_EXT = 0x45,
238 ATA_CMD_STANDBYNOW1 = 0xE0, 249 ATA_CMD_STANDBYNOW1 = 0xE0,
239 ATA_CMD_IDLEIMMEDIATE = 0xE1, 250 ATA_CMD_IDLEIMMEDIATE = 0xE1,
240 ATA_CMD_SLEEP = 0xE6, 251 ATA_CMD_SLEEP = 0xE6,
@@ -243,15 +254,34 @@ enum {
243 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, 254 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
244 ATA_CMD_SET_MAX = 0xF9, 255 ATA_CMD_SET_MAX = 0xF9,
245 ATA_CMD_SET_MAX_EXT = 0x37, 256 ATA_CMD_SET_MAX_EXT = 0x37,
246 ATA_CMD_READ_LOG_EXT = 0x2f, 257 ATA_CMD_READ_LOG_EXT = 0x2F,
258 ATA_CMD_WRITE_LOG_EXT = 0x3F,
259 ATA_CMD_READ_LOG_DMA_EXT = 0x47,
260 ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
261 ATA_CMD_TRUSTED_RCV = 0x5C,
262 ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
263 ATA_CMD_TRUSTED_SND = 0x5E,
264 ATA_CMD_TRUSTED_SND_DMA = 0x5F,
247 ATA_CMD_PMP_READ = 0xE4, 265 ATA_CMD_PMP_READ = 0xE4,
248 ATA_CMD_PMP_WRITE = 0xE8, 266 ATA_CMD_PMP_WRITE = 0xE8,
249 ATA_CMD_CONF_OVERLAY = 0xB1, 267 ATA_CMD_CONF_OVERLAY = 0xB1,
268 ATA_CMD_SEC_SET_PASS = 0xF1,
269 ATA_CMD_SEC_UNLOCK = 0xF2,
270 ATA_CMD_SEC_ERASE_PREP = 0xF3,
271 ATA_CMD_SEC_ERASE_UNIT = 0xF4,
250 ATA_CMD_SEC_FREEZE_LOCK = 0xF5, 272 ATA_CMD_SEC_FREEZE_LOCK = 0xF5,
273 ATA_CMD_SEC_DISABLE_PASS = 0xF6,
274 ATA_CMD_CONFIG_STREAM = 0x51,
251 ATA_CMD_SMART = 0xB0, 275 ATA_CMD_SMART = 0xB0,
252 ATA_CMD_MEDIA_LOCK = 0xDE, 276 ATA_CMD_MEDIA_LOCK = 0xDE,
253 ATA_CMD_MEDIA_UNLOCK = 0xDF, 277 ATA_CMD_MEDIA_UNLOCK = 0xDF,
254 ATA_CMD_DSM = 0x06, 278 ATA_CMD_DSM = 0x06,
279 ATA_CMD_CHK_MED_CRD_TYP = 0xD1,
280 ATA_CMD_CFA_REQ_EXT_ERR = 0x03,
281 ATA_CMD_CFA_WRITE_NE = 0x38,
282 ATA_CMD_CFA_TRANS_SECT = 0x87,
283 ATA_CMD_CFA_ERASE = 0xC0,
284 ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
255 /* marked obsolete in the ATA/ATAPI-7 spec */ 285 /* marked obsolete in the ATA/ATAPI-7 spec */
256 ATA_CMD_RESTORE = 0x10, 286 ATA_CMD_RESTORE = 0x10,
257 287
@@ -306,6 +336,7 @@ enum {
306 /* SETFEATURE Sector counts for SATA features */ 336 /* SETFEATURE Sector counts for SATA features */
307 SATA_AN = 0x05, /* Asynchronous Notification */ 337 SATA_AN = 0x05, /* Asynchronous Notification */
308 SATA_DIPM = 0x03, /* Device Initiated Power Management */ 338 SATA_DIPM = 0x03, /* Device Initiated Power Management */
339 SATA_FPDMA_AA = 0x02, /* DMA Setup FIS Auto-Activate */
309 340
310 /* feature values for SET_MAX */ 341 /* feature values for SET_MAX */
311 ATA_SET_MAX_ADDR = 0x00, 342 ATA_SET_MAX_ADDR = 0x00,
@@ -525,6 +556,9 @@ static inline int ata_is_data(u8 prot)
525#define ata_id_has_atapi_AN(id) \ 556#define ata_id_has_atapi_AN(id) \
526 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \ 557 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
527 ((id)[78] & (1 << 5)) ) 558 ((id)[78] & (1 << 5)) )
559#define ata_id_has_fpdma_aa(id) \
560 ( (((id)[76] != 0x0000) && ((id)[76] != 0xffff)) && \
561 ((id)[78] & (1 << 2)) )
528#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10)) 562#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
529#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11)) 563#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
530#define ata_id_u32(id,n) \ 564#define ata_id_u32(id,n) \
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 1d52425a6118..f169bcb90b58 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,8 @@
13#include <linux/proportions.h> 13#include <linux/proportions.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/fs.h> 15#include <linux/fs.h>
16#include <linux/sched.h>
17#include <linux/writeback.h>
16#include <asm/atomic.h> 18#include <asm/atomic.h>
17 19
18struct page; 20struct page;
@@ -23,9 +25,11 @@ struct dentry;
23 * Bits in backing_dev_info.state 25 * Bits in backing_dev_info.state
24 */ 26 */
25enum bdi_state { 27enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */ 28 BDI_pending, /* On its way to being activated */
29 BDI_wb_alloc, /* Default embedded wb allocated */
27 BDI_async_congested, /* The async (write) queue is getting full */ 30 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_sync_congested, /* The sync queue is getting full */ 31 BDI_sync_congested, /* The sync queue is getting full */
32 BDI_registered, /* bdi_register() was done */
29 BDI_unused, /* Available bits start here */ 33 BDI_unused, /* Available bits start here */
30}; 34};
31 35
@@ -39,7 +43,22 @@ enum bdi_stat_item {
39 43
40#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 44#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
41 45
46struct bdi_writeback {
47 struct list_head list; /* hangs off the bdi */
48
49 struct backing_dev_info *bdi; /* our parent bdi */
50 unsigned int nr;
51
52 unsigned long last_old_flush; /* last old data flush */
53
54 struct task_struct *task; /* writeback task */
55 struct list_head b_dirty; /* dirty inodes */
56 struct list_head b_io; /* parked for writeback */
57 struct list_head b_more_io; /* parked for more writeback */
58};
59
42struct backing_dev_info { 60struct backing_dev_info {
61 struct list_head bdi_list;
43 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 62 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
44 unsigned long state; /* Always use atomic bitops on this */ 63 unsigned long state; /* Always use atomic bitops on this */
45 unsigned int capabilities; /* Device capabilities */ 64 unsigned int capabilities; /* Device capabilities */
@@ -48,6 +67,8 @@ struct backing_dev_info {
48 void (*unplug_io_fn)(struct backing_dev_info *, struct page *); 67 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
49 void *unplug_io_data; 68 void *unplug_io_data;
50 69
70 char *name;
71
51 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; 72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
52 73
53 struct prop_local_percpu completions; 74 struct prop_local_percpu completions;
@@ -56,6 +77,14 @@ struct backing_dev_info {
56 unsigned int min_ratio; 77 unsigned int min_ratio;
57 unsigned int max_ratio, max_prop_frac; 78 unsigned int max_ratio, max_prop_frac;
58 79
80 struct bdi_writeback wb; /* default writeback info for this bdi */
81 spinlock_t wb_lock; /* protects update side of wb_list */
82 struct list_head wb_list; /* the flusher threads hanging off this bdi */
83 unsigned long wb_mask; /* bitmask of registered tasks */
84 unsigned int wb_cnt; /* number of registered tasks */
85
86 struct list_head work_list;
87
59 struct device *dev; 88 struct device *dev;
60 89
61#ifdef CONFIG_DEBUG_FS 90#ifdef CONFIG_DEBUG_FS
@@ -71,6 +100,19 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
71 const char *fmt, ...); 100 const char *fmt, ...);
72int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 101int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
73void bdi_unregister(struct backing_dev_info *bdi); 102void bdi_unregister(struct backing_dev_info *bdi);
103void bdi_start_writeback(struct writeback_control *wbc);
104int bdi_writeback_task(struct bdi_writeback *wb);
105int bdi_has_dirty_io(struct backing_dev_info *bdi);
106
107extern spinlock_t bdi_lock;
108extern struct list_head bdi_list;
109
110static inline int wb_has_dirty_io(struct bdi_writeback *wb)
111{
112 return !list_empty(&wb->b_dirty) ||
113 !list_empty(&wb->b_io) ||
114 !list_empty(&wb->b_more_io);
115}
74 116
75static inline void __add_bdi_stat(struct backing_dev_info *bdi, 117static inline void __add_bdi_stat(struct backing_dev_info *bdi,
76 enum bdi_stat_item item, s64 amount) 118 enum bdi_stat_item item, s64 amount)
@@ -261,6 +303,11 @@ static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
261 return bdi->capabilities & BDI_CAP_SWAP_BACKED; 303 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
262} 304}
263 305
306static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
307{
308 return bdi == &default_backing_dev_info;
309}
310
264static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 311static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
265{ 312{
266 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 313 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -276,4 +323,10 @@ static inline bool mapping_cap_swap_backed(struct address_space *mapping)
276 return bdi_cap_swap_backed(mapping->backing_dev_info); 323 return bdi_cap_swap_backed(mapping->backing_dev_info);
277} 324}
278 325
326static inline int bdi_sched_wait(void *word)
327{
328 schedule();
329 return 0;
330}
331
279#endif /* _LINUX_BACKING_DEV_H */ 332#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61ee18c1bdb4..2046b5b8af48 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
117 int executable_stack); 117 int executable_stack);
118extern int bprm_mm_init(struct linux_binprm *bprm); 118extern int bprm_mm_init(struct linux_binprm *bprm);
119extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); 119extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
120extern int prepare_bprm_creds(struct linux_binprm *bprm);
120extern void install_exec_creds(struct linux_binprm *bprm); 121extern void install_exec_creds(struct linux_binprm *bprm);
121extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); 122extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
122extern int set_binfmt(struct linux_binfmt *new); 123extern int set_binfmt(struct linux_binfmt *new);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4d668e05d458..47536197ffdd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -48,6 +48,15 @@ struct notifier_block;
48 48
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50/* Need to know about CPUs going up/down? */ 50/* Need to know about CPUs going up/down? */
51#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
52#define cpu_notifier(fn, pri) { \
53 static struct notifier_block fn##_nb __cpuinitdata = \
54 { .notifier_call = fn, .priority = pri }; \
55 register_cpu_notifier(&fn##_nb); \
56}
57#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
58#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
59#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
51#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
52extern int register_cpu_notifier(struct notifier_block *nb); 61extern int register_cpu_notifier(struct notifier_block *nb);
53extern void unregister_cpu_notifier(struct notifier_block *nb); 62extern void unregister_cpu_notifier(struct notifier_block *nb);
@@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void);
74 83
75#else /* CONFIG_SMP */ 84#else /* CONFIG_SMP */
76 85
86#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
87
77static inline int register_cpu_notifier(struct notifier_block *nb) 88static inline int register_cpu_notifier(struct notifier_block *nb)
78{ 89{
79 return 0; 90 return 0;
@@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class;
99 110
100extern void get_online_cpus(void); 111extern void get_online_cpus(void);
101extern void put_online_cpus(void); 112extern void put_online_cpus(void);
102#define hotcpu_notifier(fn, pri) { \ 113#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
103 static struct notifier_block fn##_nb __cpuinitdata = \
104 { .notifier_call = fn, .priority = pri }; \
105 register_cpu_notifier(&fn##_nb); \
106}
107#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 114#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
108#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 115#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
109int cpu_down(unsigned int cpu); 116int cpu_down(unsigned int cpu);
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 4fa999696310..24520a539c6f 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -114,6 +114,13 @@ struct thread_group_cred {
114 */ 114 */
115struct cred { 115struct cred {
116 atomic_t usage; 116 atomic_t usage;
117#ifdef CONFIG_DEBUG_CREDENTIALS
118 atomic_t subscribers; /* number of processes subscribed */
119 void *put_addr;
120 unsigned magic;
121#define CRED_MAGIC 0x43736564
122#define CRED_MAGIC_DEAD 0x44656144
123#endif
117 uid_t uid; /* real UID of the task */ 124 uid_t uid; /* real UID of the task */
118 gid_t gid; /* real GID of the task */ 125 gid_t gid; /* real GID of the task */
119 uid_t suid; /* saved UID of the task */ 126 uid_t suid; /* saved UID of the task */
@@ -143,7 +150,9 @@ struct cred {
143}; 150};
144 151
145extern void __put_cred(struct cred *); 152extern void __put_cred(struct cred *);
153extern void exit_creds(struct task_struct *);
146extern int copy_creds(struct task_struct *, unsigned long); 154extern int copy_creds(struct task_struct *, unsigned long);
155extern struct cred *cred_alloc_blank(void);
147extern struct cred *prepare_creds(void); 156extern struct cred *prepare_creds(void);
148extern struct cred *prepare_exec_creds(void); 157extern struct cred *prepare_exec_creds(void);
149extern struct cred *prepare_usermodehelper_creds(void); 158extern struct cred *prepare_usermodehelper_creds(void);
@@ -158,6 +167,60 @@ extern int set_security_override_from_ctx(struct cred *, const char *);
158extern int set_create_files_as(struct cred *, struct inode *); 167extern int set_create_files_as(struct cred *, struct inode *);
159extern void __init cred_init(void); 168extern void __init cred_init(void);
160 169
170/*
171 * check for validity of credentials
172 */
173#ifdef CONFIG_DEBUG_CREDENTIALS
174extern void __invalid_creds(const struct cred *, const char *, unsigned);
175extern void __validate_process_creds(struct task_struct *,
176 const char *, unsigned);
177
178static inline bool creds_are_invalid(const struct cred *cred)
179{
180 if (cred->magic != CRED_MAGIC)
181 return true;
182 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
183 return true;
184#ifdef CONFIG_SECURITY_SELINUX
185 if ((unsigned long) cred->security < PAGE_SIZE)
186 return true;
187 if ((*(u32*)cred->security & 0xffffff00) ==
188 (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8))
189 return true;
190#endif
191 return false;
192}
193
194static inline void __validate_creds(const struct cred *cred,
195 const char *file, unsigned line)
196{
197 if (unlikely(creds_are_invalid(cred)))
198 __invalid_creds(cred, file, line);
199}
200
201#define validate_creds(cred) \
202do { \
203 __validate_creds((cred), __FILE__, __LINE__); \
204} while(0)
205
206#define validate_process_creds() \
207do { \
208 __validate_process_creds(current, __FILE__, __LINE__); \
209} while(0)
210
211extern void validate_creds_for_do_exit(struct task_struct *);
212#else
213static inline void validate_creds(const struct cred *cred)
214{
215}
216static inline void validate_creds_for_do_exit(struct task_struct *tsk)
217{
218}
219static inline void validate_process_creds(void)
220{
221}
222#endif
223
161/** 224/**
162 * get_new_cred - Get a reference on a new set of credentials 225 * get_new_cred - Get a reference on a new set of credentials
163 * @cred: The new credentials to reference 226 * @cred: The new credentials to reference
@@ -186,7 +249,9 @@ static inline struct cred *get_new_cred(struct cred *cred)
186 */ 249 */
187static inline const struct cred *get_cred(const struct cred *cred) 250static inline const struct cred *get_cred(const struct cred *cred)
188{ 251{
189 return get_new_cred((struct cred *) cred); 252 struct cred *nonconst_cred = (struct cred *) cred;
253 validate_creds(cred);
254 return get_new_cred(nonconst_cred);
190} 255}
191 256
192/** 257/**
@@ -204,7 +269,7 @@ static inline void put_cred(const struct cred *_cred)
204{ 269{
205 struct cred *cred = (struct cred *) _cred; 270 struct cred *cred = (struct cred *) _cred;
206 271
207 BUG_ON(atomic_read(&(cred)->usage) <= 0); 272 validate_creds(cred);
208 if (atomic_dec_and_test(&(cred)->usage)) 273 if (atomic_dec_and_test(&(cred)->usage))
209 __put_cred(cred); 274 __put_cred(cred);
210} 275}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index ec29fa268b94..fd929889e8dc 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -115,7 +115,6 @@ struct crypto_async_request;
115struct crypto_aead; 115struct crypto_aead;
116struct crypto_blkcipher; 116struct crypto_blkcipher;
117struct crypto_hash; 117struct crypto_hash;
118struct crypto_ahash;
119struct crypto_rng; 118struct crypto_rng;
120struct crypto_tfm; 119struct crypto_tfm;
121struct crypto_type; 120struct crypto_type;
@@ -146,16 +145,6 @@ struct ablkcipher_request {
146 void *__ctx[] CRYPTO_MINALIGN_ATTR; 145 void *__ctx[] CRYPTO_MINALIGN_ATTR;
147}; 146};
148 147
149struct ahash_request {
150 struct crypto_async_request base;
151
152 unsigned int nbytes;
153 struct scatterlist *src;
154 u8 *result;
155
156 void *__ctx[] CRYPTO_MINALIGN_ATTR;
157};
158
159/** 148/**
160 * struct aead_request - AEAD request 149 * struct aead_request - AEAD request
161 * @base: Common attributes for async crypto requests 150 * @base: Common attributes for async crypto requests
@@ -220,18 +209,6 @@ struct ablkcipher_alg {
220 unsigned int ivsize; 209 unsigned int ivsize;
221}; 210};
222 211
223struct ahash_alg {
224 int (*init)(struct ahash_request *req);
225 int (*reinit)(struct ahash_request *req);
226 int (*update)(struct ahash_request *req);
227 int (*final)(struct ahash_request *req);
228 int (*digest)(struct ahash_request *req);
229 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
230 unsigned int keylen);
231
232 unsigned int digestsize;
233};
234
235struct aead_alg { 212struct aead_alg {
236 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 213 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
237 unsigned int keylen); 214 unsigned int keylen);
@@ -318,7 +295,6 @@ struct rng_alg {
318#define cra_cipher cra_u.cipher 295#define cra_cipher cra_u.cipher
319#define cra_digest cra_u.digest 296#define cra_digest cra_u.digest
320#define cra_hash cra_u.hash 297#define cra_hash cra_u.hash
321#define cra_ahash cra_u.ahash
322#define cra_compress cra_u.compress 298#define cra_compress cra_u.compress
323#define cra_rng cra_u.rng 299#define cra_rng cra_u.rng
324 300
@@ -346,7 +322,6 @@ struct crypto_alg {
346 struct cipher_alg cipher; 322 struct cipher_alg cipher;
347 struct digest_alg digest; 323 struct digest_alg digest;
348 struct hash_alg hash; 324 struct hash_alg hash;
349 struct ahash_alg ahash;
350 struct compress_alg compress; 325 struct compress_alg compress;
351 struct rng_alg rng; 326 struct rng_alg rng;
352 } cra_u; 327 } cra_u;
@@ -433,18 +408,6 @@ struct hash_tfm {
433 unsigned int digestsize; 408 unsigned int digestsize;
434}; 409};
435 410
436struct ahash_tfm {
437 int (*init)(struct ahash_request *req);
438 int (*update)(struct ahash_request *req);
439 int (*final)(struct ahash_request *req);
440 int (*digest)(struct ahash_request *req);
441 int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
442 unsigned int keylen);
443
444 unsigned int digestsize;
445 unsigned int reqsize;
446};
447
448struct compress_tfm { 411struct compress_tfm {
449 int (*cot_compress)(struct crypto_tfm *tfm, 412 int (*cot_compress)(struct crypto_tfm *tfm,
450 const u8 *src, unsigned int slen, 413 const u8 *src, unsigned int slen,
@@ -465,7 +428,6 @@ struct rng_tfm {
465#define crt_blkcipher crt_u.blkcipher 428#define crt_blkcipher crt_u.blkcipher
466#define crt_cipher crt_u.cipher 429#define crt_cipher crt_u.cipher
467#define crt_hash crt_u.hash 430#define crt_hash crt_u.hash
468#define crt_ahash crt_u.ahash
469#define crt_compress crt_u.compress 431#define crt_compress crt_u.compress
470#define crt_rng crt_u.rng 432#define crt_rng crt_u.rng
471 433
@@ -479,7 +441,6 @@ struct crypto_tfm {
479 struct blkcipher_tfm blkcipher; 441 struct blkcipher_tfm blkcipher;
480 struct cipher_tfm cipher; 442 struct cipher_tfm cipher;
481 struct hash_tfm hash; 443 struct hash_tfm hash;
482 struct ahash_tfm ahash;
483 struct compress_tfm compress; 444 struct compress_tfm compress;
484 struct rng_tfm rng; 445 struct rng_tfm rng;
485 } crt_u; 446 } crt_u;
@@ -770,7 +731,7 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
770 731
771static inline void ablkcipher_request_free(struct ablkcipher_request *req) 732static inline void ablkcipher_request_free(struct ablkcipher_request *req)
772{ 733{
773 kfree(req); 734 kzfree(req);
774} 735}
775 736
776static inline void ablkcipher_request_set_callback( 737static inline void ablkcipher_request_set_callback(
@@ -901,7 +862,7 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
901 862
902static inline void aead_request_free(struct aead_request *req) 863static inline void aead_request_free(struct aead_request *req)
903{ 864{
904 kfree(req); 865 kzfree(req);
905} 866}
906 867
907static inline void aead_request_set_callback(struct aead_request *req, 868static inline void aead_request_set_callback(struct aead_request *req,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 655e7721580a..df7607e6dce8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -91,6 +91,9 @@ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
91 iterate_devices_callout_fn fn, 91 iterate_devices_callout_fn fn,
92 void *data); 92 void *data);
93 93
94typedef void (*dm_io_hints_fn) (struct dm_target *ti,
95 struct queue_limits *limits);
96
94/* 97/*
95 * Returns: 98 * Returns:
96 * 0: The target can handle the next I/O immediately. 99 * 0: The target can handle the next I/O immediately.
@@ -151,6 +154,7 @@ struct target_type {
151 dm_merge_fn merge; 154 dm_merge_fn merge;
152 dm_busy_fn busy; 155 dm_busy_fn busy;
153 dm_iterate_devices_fn iterate_devices; 156 dm_iterate_devices_fn iterate_devices;
157 dm_io_hints_fn io_hints;
154 158
155 /* For internal device-mapper use. */ 159 /* For internal device-mapper use. */
156 struct list_head list; 160 struct list_head list;
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h
index 642e3017b51f..8a1f972c0fe9 100644
--- a/include/linux/dm-log-userspace.h
+++ b/include/linux/dm-log-userspace.h
@@ -371,7 +371,18 @@
371 (DM_ULOG_REQUEST_MASK & (request_type)) 371 (DM_ULOG_REQUEST_MASK & (request_type))
372 372
373struct dm_ulog_request { 373struct dm_ulog_request {
374 char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */ 374 /*
375 * The local unique identifier (luid) and the universally unique
376 * identifier (uuid) are used to tie a request to a specific
377 * mirror log. A single machine log could probably make due with
378 * just the 'luid', but a cluster-aware log must use the 'uuid' and
379 * the 'luid'. The uuid is what is required for node to node
380 * communication concerning a particular log, but the 'luid' helps
381 * differentiate between logs that are being swapped and have the
382 * same 'uuid'. (Think "live" and "inactive" device-mapper tables.)
383 */
384 uint64_t luid;
385 char uuid[DM_UUID_LEN];
375 char padding[7]; /* Padding because DM_UUID_LEN = 129 */ 386 char padding[7]; /* Padding because DM_UUID_LEN = 129 */
376 387
377 int32_t error; /* Used to report back processing errors */ 388 int32_t error; /* Used to report back processing errors */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 07dfd460d286..c0f6c3cd788c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -98,11 +98,6 @@ static inline int is_device_dma_capable(struct device *dev)
98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 98 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
99} 99}
100 100
101static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
102{
103 return addr + size <= mask;
104}
105
106#ifdef CONFIG_HAS_DMA 101#ifdef CONFIG_HAS_DMA
107#include <asm/dma-mapping.h> 102#include <asm/dma-mapping.h>
108#else 103#else
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index bb5489c82c99..a8a3e1ac281d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -43,7 +43,7 @@ extern const char * dmi_get_system_info(int field);
43extern const struct dmi_device * dmi_find_device(int type, const char *name, 43extern const struct dmi_device * dmi_find_device(int type, const char *name,
44 const struct dmi_device *from); 44 const struct dmi_device *from);
45extern void dmi_scan_machine(void); 45extern void dmi_scan_machine(void);
46extern int dmi_get_year(int field); 46extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
47extern int dmi_name_in_vendors(const char *str); 47extern int dmi_name_in_vendors(const char *str);
48extern int dmi_name_in_serial(const char *str); 48extern int dmi_name_in_serial(const char *str);
49extern int dmi_available; 49extern int dmi_available;
@@ -58,7 +58,16 @@ static inline const char * dmi_get_system_info(int field) { return NULL; }
58static inline const struct dmi_device * dmi_find_device(int type, const char *name, 58static inline const struct dmi_device * dmi_find_device(int type, const char *name,
59 const struct dmi_device *from) { return NULL; } 59 const struct dmi_device *from) { return NULL; }
60static inline void dmi_scan_machine(void) { return; } 60static inline void dmi_scan_machine(void) { return; }
61static inline int dmi_get_year(int year) { return 0; } 61static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
62{
63 if (yearp)
64 *yearp = 0;
65 if (monthp)
66 *monthp = 0;
67 if (dayp)
68 *dayp = 0;
69 return false;
70}
62static inline int dmi_name_in_vendors(const char *s) { return 0; } 71static inline int dmi_name_in_vendors(const char *s) { return 0; }
63static inline int dmi_name_in_serial(const char *s) { return 0; } 72static inline int dmi_name_in_serial(const char *s) { return 0; }
64#define dmi_available 0 73#define dmi_available 0
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 000000000000..f8fb07b0b6b8
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
1#ifndef _FIPS_H
2#define _FIPS_H
3
4#ifdef CONFIG_CRYPTO_FIPS
5extern int fips_enabled;
6#else
7#define fips_enabled 0
8#endif
9
10#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 73e9b643e455..a79f48373e7e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -715,7 +715,7 @@ struct posix_acl;
715 715
716struct inode { 716struct inode {
717 struct hlist_node i_hash; 717 struct hlist_node i_hash;
718 struct list_head i_list; 718 struct list_head i_list; /* backing dev IO list */
719 struct list_head i_sb_list; 719 struct list_head i_sb_list;
720 struct list_head i_dentry; 720 struct list_head i_dentry;
721 unsigned long i_ino; 721 unsigned long i_ino;
@@ -1336,9 +1336,6 @@ struct super_block {
1336 struct xattr_handler **s_xattr; 1336 struct xattr_handler **s_xattr;
1337 1337
1338 struct list_head s_inodes; /* all inodes */ 1338 struct list_head s_inodes; /* all inodes */
1339 struct list_head s_dirty; /* dirty inodes */
1340 struct list_head s_io; /* parked for writeback */
1341 struct list_head s_more_io; /* parked for more writeback */
1342 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ 1339 struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
1343 struct list_head s_files; 1340 struct list_head s_files;
1344 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ 1341 /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
@@ -1528,6 +1525,7 @@ struct inode_operations {
1528 void (*put_link) (struct dentry *, struct nameidata *, void *); 1525 void (*put_link) (struct dentry *, struct nameidata *, void *);
1529 void (*truncate) (struct inode *); 1526 void (*truncate) (struct inode *);
1530 int (*permission) (struct inode *, int); 1527 int (*permission) (struct inode *, int);
1528 int (*check_acl)(struct inode *, int);
1531 int (*setattr) (struct dentry *, struct iattr *); 1529 int (*setattr) (struct dentry *, struct iattr *);
1532 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); 1530 int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
1533 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); 1531 int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
@@ -1788,6 +1786,7 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
1788 struct vfsmount *mnt); 1786 struct vfsmount *mnt);
1789extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1787extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
1790int __put_super_and_need_restart(struct super_block *sb); 1788int __put_super_and_need_restart(struct super_block *sb);
1789void put_super(struct super_block *sb);
1791 1790
1792/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1791/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1793#define fops_get(fops) \ 1792#define fops_get(fops) \
@@ -1998,12 +1997,25 @@ extern void bd_release_from_disk(struct block_device *, struct gendisk *);
1998#define CHRDEV_MAJOR_HASH_SIZE 255 1997#define CHRDEV_MAJOR_HASH_SIZE 255
1999extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); 1998extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
2000extern int register_chrdev_region(dev_t, unsigned, const char *); 1999extern int register_chrdev_region(dev_t, unsigned, const char *);
2001extern int register_chrdev(unsigned int, const char *, 2000extern int __register_chrdev(unsigned int major, unsigned int baseminor,
2002 const struct file_operations *); 2001 unsigned int count, const char *name,
2003extern void unregister_chrdev(unsigned int, const char *); 2002 const struct file_operations *fops);
2003extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
2004 unsigned int count, const char *name);
2004extern void unregister_chrdev_region(dev_t, unsigned); 2005extern void unregister_chrdev_region(dev_t, unsigned);
2005extern void chrdev_show(struct seq_file *,off_t); 2006extern void chrdev_show(struct seq_file *,off_t);
2006 2007
2008static inline int register_chrdev(unsigned int major, const char *name,
2009 const struct file_operations *fops)
2010{
2011 return __register_chrdev(major, 0, 256, name, fops);
2012}
2013
2014static inline void unregister_chrdev(unsigned int major, const char *name)
2015{
2016 __unregister_chrdev(major, 0, 256, name);
2017}
2018
2007/* fs/block_dev.c */ 2019/* fs/block_dev.c */
2008#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ 2020#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
2009#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ 2021#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
@@ -2070,8 +2082,6 @@ static inline void invalidate_remote_inode(struct inode *inode)
2070extern int invalidate_inode_pages2(struct address_space *mapping); 2082extern int invalidate_inode_pages2(struct address_space *mapping);
2071extern int invalidate_inode_pages2_range(struct address_space *mapping, 2083extern int invalidate_inode_pages2_range(struct address_space *mapping,
2072 pgoff_t start, pgoff_t end); 2084 pgoff_t start, pgoff_t end);
2073extern void generic_sync_sb_inodes(struct super_block *sb,
2074 struct writeback_control *wbc);
2075extern int write_inode_now(struct inode *, int); 2085extern int write_inode_now(struct inode *, int);
2076extern int filemap_fdatawrite(struct address_space *); 2086extern int filemap_fdatawrite(struct address_space *);
2077extern int filemap_flush(struct address_space *); 2087extern int filemap_flush(struct address_space *);
@@ -2186,7 +2196,6 @@ extern int bdev_read_only(struct block_device *);
2186extern int set_blocksize(struct block_device *, int); 2196extern int set_blocksize(struct block_device *, int);
2187extern int sb_set_blocksize(struct super_block *, int); 2197extern int sb_set_blocksize(struct super_block *, int);
2188extern int sb_min_blocksize(struct super_block *, int); 2198extern int sb_min_blocksize(struct super_block *, int);
2189extern int sb_has_dirty_inodes(struct super_block *);
2190 2199
2191extern int generic_file_mmap(struct file *, struct vm_area_struct *); 2200extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2192extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); 2201extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index a81170de7f6b..23f7179bf74e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -93,16 +93,22 @@ void tracing_generic_entry_update(struct trace_entry *entry,
93 unsigned long flags, 93 unsigned long flags,
94 int pc); 94 int pc);
95struct ring_buffer_event * 95struct ring_buffer_event *
96trace_current_buffer_lock_reserve(int type, unsigned long len, 96trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
97 int type, unsigned long len,
97 unsigned long flags, int pc); 98 unsigned long flags, int pc);
98void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 99void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
100 struct ring_buffer_event *event,
99 unsigned long flags, int pc); 101 unsigned long flags, int pc);
100void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 102void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
103 struct ring_buffer_event *event,
101 unsigned long flags, int pc); 104 unsigned long flags, int pc);
102void trace_current_buffer_discard_commit(struct ring_buffer_event *event); 105void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
106 struct ring_buffer_event *event);
103 107
104void tracing_record_cmdline(struct task_struct *tsk); 108void tracing_record_cmdline(struct task_struct *tsk);
105 109
110struct event_filter;
111
106struct ftrace_event_call { 112struct ftrace_event_call {
107 struct list_head list; 113 struct list_head list;
108 char *name; 114 char *name;
@@ -110,16 +116,18 @@ struct ftrace_event_call {
110 struct dentry *dir; 116 struct dentry *dir;
111 struct trace_event *event; 117 struct trace_event *event;
112 int enabled; 118 int enabled;
113 int (*regfunc)(void); 119 int (*regfunc)(void *);
114 void (*unregfunc)(void); 120 void (*unregfunc)(void *);
115 int id; 121 int id;
116 int (*raw_init)(void); 122 int (*raw_init)(void);
117 int (*show_format)(struct trace_seq *s); 123 int (*show_format)(struct ftrace_event_call *call,
118 int (*define_fields)(void); 124 struct trace_seq *s);
125 int (*define_fields)(struct ftrace_event_call *);
119 struct list_head fields; 126 struct list_head fields;
120 int filter_active; 127 int filter_active;
121 void *filter; 128 struct event_filter *filter;
122 void *mod; 129 void *mod;
130 void *data;
123 131
124 atomic_t profile_count; 132 atomic_t profile_count;
125 int (*profile_enable)(struct ftrace_event_call *); 133 int (*profile_enable)(struct ftrace_event_call *);
@@ -129,15 +137,25 @@ struct ftrace_event_call {
129#define MAX_FILTER_PRED 32 137#define MAX_FILTER_PRED 32
130#define MAX_FILTER_STR_VAL 128 138#define MAX_FILTER_STR_VAL 128
131 139
132extern int init_preds(struct ftrace_event_call *call);
133extern void destroy_preds(struct ftrace_event_call *call); 140extern void destroy_preds(struct ftrace_event_call *call);
134extern int filter_match_preds(struct ftrace_event_call *call, void *rec); 141extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
135extern int filter_current_check_discard(struct ftrace_event_call *call, 142extern int filter_current_check_discard(struct ring_buffer *buffer,
143 struct ftrace_event_call *call,
136 void *rec, 144 void *rec,
137 struct ring_buffer_event *event); 145 struct ring_buffer_event *event);
138 146
139extern int trace_define_field(struct ftrace_event_call *call, char *type, 147enum {
140 char *name, int offset, int size, int is_signed); 148 FILTER_OTHER = 0,
149 FILTER_STATIC_STRING,
150 FILTER_DYN_STRING,
151 FILTER_PTR_STRING,
152};
153
154extern int trace_define_field(struct ftrace_event_call *call,
155 const char *type, const char *name,
156 int offset, int size, int is_signed,
157 int filter_type);
158extern int trace_define_common_fields(struct ftrace_event_call *call);
141 159
142#define is_signed_type(type) (((type)(-1)) < 0) 160#define is_signed_type(type) (((type)(-1)) < 0)
143 161
@@ -162,11 +180,4 @@ do { \
162 __trace_printk(ip, fmt, ##args); \ 180 __trace_printk(ip, fmt, ##args); \
163} while (0) 181} while (0)
164 182
165#define __common_field(type, item, is_signed) \
166 ret = trace_define_field(event_call, #type, "common_" #item, \
167 offsetof(typeof(field.ent), item), \
168 sizeof(field.ent.item), is_signed); \
169 if (ret) \
170 return ret;
171
172#endif /* _LINUX_FTRACE_EVENT_H */ 183#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8246c697863d..6d527ee82b2b 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -64,6 +64,12 @@
64#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 64#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
65#define NMI_OFFSET (1UL << NMI_SHIFT) 65#define NMI_OFFSET (1UL << NMI_SHIFT)
66 66
67#ifndef PREEMPT_ACTIVE
68#define PREEMPT_ACTIVE_BITS 1
69#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
70#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
71#endif
72
67#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) 73#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
68#error PREEMPT_ACTIVE is too low! 74#error PREEMPT_ACTIVE is too low!
69#endif 75#endif
@@ -132,7 +138,7 @@ static inline void account_system_vtime(struct task_struct *tsk)
132} 138}
133#endif 139#endif
134 140
135#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) 141#if defined(CONFIG_NO_HZ)
136extern void rcu_irq_enter(void); 142extern void rcu_irq_enter(void);
137extern void rcu_irq_exit(void); 143extern void rcu_irq_exit(void);
138extern void rcu_nmi_enter(void); 144extern void rcu_nmi_enter(void);
@@ -142,7 +148,7 @@ extern void rcu_nmi_exit(void);
142# define rcu_irq_exit() do { } while (0) 148# define rcu_irq_exit() do { } while (0)
143# define rcu_nmi_enter() do { } while (0) 149# define rcu_nmi_enter() do { } while (0)
144# define rcu_nmi_exit() do { } while (0) 150# define rcu_nmi_exit() do { } while (0)
145#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ 151#endif /* #if defined(CONFIG_NO_HZ) */
146 152
147/* 153/*
148 * It is safe to do non-atomic ops on ->hardirq_context, 154 * It is safe to do non-atomic ops on ->hardirq_context,
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7fc01b13be43..9e7f2e8fc66e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -94,6 +94,16 @@ extern struct group_info init_groups;
94# define CAP_INIT_BSET CAP_INIT_EFF_SET 94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif 95#endif
96 96
97#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \
99 .rcu_read_lock_nesting = 0, \
100 .rcu_read_unlock_special = 0, \
101 .rcu_blocked_node = NULL, \
102 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),
103#else
104#define INIT_TASK_RCU_PREEMPT(tsk)
105#endif
106
97extern struct cred init_cred; 107extern struct cred init_cred;
98 108
99#ifdef CONFIG_PERF_COUNTERS 109#ifdef CONFIG_PERF_COUNTERS
@@ -173,6 +183,7 @@ extern struct cred init_cred;
173 INIT_LOCKDEP \ 183 INIT_LOCKDEP \
174 INIT_FTRACE_GRAPH \ 184 INIT_FTRACE_GRAPH \
175 INIT_TRACE_RECURSION \ 185 INIT_TRACE_RECURSION \
186 INIT_TASK_RCU_PREEMPT(tsk) \
176} 187}
177 188
178 189
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 35e7df1e9f30..1ac57e522a1f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -50,6 +50,9 @@
50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is 50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51 * registered first in an shared interrupt is considered for 51 * registered first in an shared interrupt is considered for
52 * performance reasons) 52 * performance reasons)
53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run.
53 */ 56 */
54#define IRQF_DISABLED 0x00000020 57#define IRQF_DISABLED 0x00000020
55#define IRQF_SAMPLE_RANDOM 0x00000040 58#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -59,6 +62,7 @@
59#define IRQF_PERCPU 0x00000400 62#define IRQF_PERCPU 0x00000400
60#define IRQF_NOBALANCING 0x00000800 63#define IRQF_NOBALANCING 0x00000800
61#define IRQF_IRQPOLL 0x00001000 64#define IRQF_IRQPOLL 0x00001000
65#define IRQF_ONESHOT 0x00002000
62 66
63/* 67/*
64 * Bits used by threaded handlers: 68 * Bits used by threaded handlers:
diff --git a/include/linux/irq.h b/include/linux/irq.h
index cb2e77a3f7f7..ae9653dbcd78 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -69,6 +69,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
69#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ 69#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
70#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ 70#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
71#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ 71#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
72#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
73#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
72 74
73#ifdef CONFIG_IRQ_PER_CPU 75#ifdef CONFIG_IRQ_PER_CPU
74# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 76# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -100,6 +102,9 @@ struct msi_desc;
100 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ 102 * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
101 * @set_wake: enable/disable power-management wake-on of an IRQ 103 * @set_wake: enable/disable power-management wake-on of an IRQ
102 * 104 *
105 * @bus_lock: function to lock access to slow bus (i2c) chips
106 * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
107 *
103 * @release: release function solely used by UML 108 * @release: release function solely used by UML
104 * @typename: obsoleted by name, kept as migration helper 109 * @typename: obsoleted by name, kept as migration helper
105 */ 110 */
@@ -123,6 +128,9 @@ struct irq_chip {
123 int (*set_type)(unsigned int irq, unsigned int flow_type); 128 int (*set_type)(unsigned int irq, unsigned int flow_type);
124 int (*set_wake)(unsigned int irq, unsigned int on); 129 int (*set_wake)(unsigned int irq, unsigned int on);
125 130
131 void (*bus_lock)(unsigned int irq);
132 void (*bus_sync_unlock)(unsigned int irq);
133
126 /* Currently used only by UML, might disappear one day.*/ 134 /* Currently used only by UML, might disappear one day.*/
127#ifdef CONFIG_IRQ_RELEASE_METHOD 135#ifdef CONFIG_IRQ_RELEASE_METHOD
128 void (*release)(unsigned int irq, void *dev_id); 136 void (*release)(unsigned int irq, void *dev_id);
@@ -220,13 +228,6 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
220extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); 228extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
221 229
222/* 230/*
223 * Migration helpers for obsolete names, they will go away:
224 */
225#define hw_interrupt_type irq_chip
226#define no_irq_type no_irq_chip
227typedef struct irq_desc irq_desc_t;
228
229/*
230 * Pick up the arch-dependent methods: 231 * Pick up the arch-dependent methods:
231 */ 232 */
232#include <asm/hw_irq.h> 233#include <asm/hw_irq.h>
@@ -289,6 +290,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
289extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); 290extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
290extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); 291extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
291extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 292extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
293extern void handle_nested_irq(unsigned int irq);
292 294
293/* 295/*
294 * Monolithic do_IRQ implementation. 296 * Monolithic do_IRQ implementation.
@@ -379,6 +381,8 @@ set_irq_chained_handler(unsigned int irq,
379 __set_irq_handler(irq, handle, 1, NULL); 381 __set_irq_handler(irq, handle, 1, NULL);
380} 382}
381 383
384extern void set_irq_nested_thread(unsigned int irq, int nest);
385
382extern void set_irq_noprobe(unsigned int irq); 386extern void set_irq_noprobe(unsigned int irq);
383extern void set_irq_probe(unsigned int irq); 387extern void set_irq_probe(unsigned int irq);
384 388
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index ec87b212ff7d..7bf89bc8cbca 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -41,6 +41,12 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
41 ; \ 41 ; \
42 else 42 else
43 43
44#ifdef CONFIG_SMP
45#define irq_node(irq) (irq_to_desc(irq)->node)
46#else
47#define irq_node(irq) 0
48#endif
49
44#endif /* CONFIG_GENERIC_HARDIRQS */ 50#endif /* CONFIG_GENERIC_HARDIRQS */
45 51
46#define for_each_irq_nr(irq) \ 52#define for_each_irq_nr(irq) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d6320a3e8def..2b5b1e0899a8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -125,7 +125,7 @@ extern int _cond_resched(void);
125#endif 125#endif
126 126
127#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 127#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
128 void __might_sleep(char *file, int line); 128 void __might_sleep(char *file, int line, int preempt_offset);
129/** 129/**
130 * might_sleep - annotation for functions that can sleep 130 * might_sleep - annotation for functions that can sleep
131 * 131 *
@@ -137,8 +137,9 @@ extern int _cond_resched(void);
137 * supposed to. 137 * supposed to.
138 */ 138 */
139# define might_sleep() \ 139# define might_sleep() \
140 do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) 140 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
141#else 141#else
142 static inline void __might_sleep(char *file, int line, int preempt_offset) { }
142# define might_sleep() do { might_resched(); } while (0) 143# define might_sleep() do { might_resched(); } while (0)
143#endif 144#endif
144 145
diff --git a/include/linux/key.h b/include/linux/key.h
index e544f466d69a..cd50dfa1d4c2 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -129,7 +129,10 @@ struct key {
129 struct rw_semaphore sem; /* change vs change sem */ 129 struct rw_semaphore sem; /* change vs change sem */
130 struct key_user *user; /* owner of this key */ 130 struct key_user *user; /* owner of this key */
131 void *security; /* security data for this key */ 131 void *security; /* security data for this key */
132 time_t expiry; /* time at which key expires (or 0) */ 132 union {
133 time_t expiry; /* time at which key expires (or 0) */
134 time_t revoked_at; /* time at which key was revoked */
135 };
133 uid_t uid; 136 uid_t uid;
134 gid_t gid; 137 gid_t gid;
135 key_perm_t perm; /* access permissions */ 138 key_perm_t perm; /* access permissions */
@@ -275,6 +278,8 @@ static inline key_serial_t key_serial(struct key *key)
275extern ctl_table key_sysctls[]; 278extern ctl_table key_sysctls[];
276#endif 279#endif
277 280
281extern void key_replace_session_keyring(void);
282
278/* 283/*
279 * the userspace interface 284 * the userspace interface
280 */ 285 */
@@ -297,6 +302,7 @@ extern void key_init(void);
297#define key_fsuid_changed(t) do { } while(0) 302#define key_fsuid_changed(t) do { } while(0)
298#define key_fsgid_changed(t) do { } while(0) 303#define key_fsgid_changed(t) do { } while(0)
299#define key_init() do { } while(0) 304#define key_init() do { } while(0)
305#define key_replace_session_keyring() do { } while(0)
300 306
301#endif /* CONFIG_KEYS */ 307#endif /* CONFIG_KEYS */
302#endif /* __KERNEL__ */ 308#endif /* __KERNEL__ */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index c0688eb72093..bd383f1944fb 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -52,5 +52,6 @@
52#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ 52#define KEYCTL_SET_TIMEOUT 15 /* set key timeout */
53#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ 53#define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */
54#define KEYCTL_GET_SECURITY 17 /* get key security label */ 54#define KEYCTL_GET_SECURITY 17 /* get key security label */
55#define KEYCTL_SESSION_TO_PARENT 18 /* apply session keyring to parent process */
55 56
56#endif /* _LINUX_KEYCTL_H */ 57#endif /* _LINUX_KEYCTL_H */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 47b39b7c7e84..dc2fd545db00 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -34,6 +34,8 @@ void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
34int kmemcheck_show_addr(unsigned long address); 34int kmemcheck_show_addr(unsigned long address);
35int kmemcheck_hide_addr(unsigned long address); 35int kmemcheck_hide_addr(unsigned long address);
36 36
37bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38
37#else 39#else
38#define kmemcheck_enabled 0 40#define kmemcheck_enabled 0
39 41
@@ -99,6 +101,11 @@ static inline void kmemcheck_mark_initialized_pages(struct page *p,
99{ 101{
100} 102}
101 103
104static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
105{
106 return true;
107}
108
102#endif /* CONFIG_KMEMCHECK */ 109#endif /* CONFIG_KMEMCHECK */
103 110
104/* 111/*
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 6a63807f714e..3c7497d46ee9 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -23,18 +23,18 @@
23 23
24#ifdef CONFIG_DEBUG_KMEMLEAK 24#ifdef CONFIG_DEBUG_KMEMLEAK
25 25
26extern void kmemleak_init(void); 26extern void kmemleak_init(void) __ref;
27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, 27extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
28 gfp_t gfp); 28 gfp_t gfp) __ref;
29extern void kmemleak_free(const void *ptr); 29extern void kmemleak_free(const void *ptr) __ref;
30extern void kmemleak_free_part(const void *ptr, size_t size); 30extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
31extern void kmemleak_padding(const void *ptr, unsigned long offset, 31extern void kmemleak_padding(const void *ptr, unsigned long offset,
32 size_t size); 32 size_t size) __ref;
33extern void kmemleak_not_leak(const void *ptr); 33extern void kmemleak_not_leak(const void *ptr) __ref;
34extern void kmemleak_ignore(const void *ptr); 34extern void kmemleak_ignore(const void *ptr) __ref;
35extern void kmemleak_scan_area(const void *ptr, unsigned long offset, 35extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
36 size_t length, gfp_t gfp); 36 size_t length, gfp_t gfp) __ref;
37extern void kmemleak_no_scan(const void *ptr); 37extern void kmemleak_no_scan(const void *ptr) __ref;
38 38
39static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, 39static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
40 int min_count, unsigned long flags, 40 int min_count, unsigned long flags,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e5b6e33c6571..76319bf03e37 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -143,7 +143,6 @@ enum {
143 143
144 ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */ 144 ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
145 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */ 145 ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
146 ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
147 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ 146 ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
148 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ 147 ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
149 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ 148 ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
@@ -190,6 +189,7 @@ enum {
190 ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */ 189 ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */
191 ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */ 190 ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */
192 ATA_FLAG_DEBUGMSG = (1 << 13), 191 ATA_FLAG_DEBUGMSG = (1 << 13),
192 ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */
193 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ 193 ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
194 ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ 194 ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
195 ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ 195 ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
@@ -386,6 +386,7 @@ enum {
386 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ 386 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
387 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 387 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
388 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ 388 ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
389 ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
389 390
390 /* DMA mask for user DMA control: User visible values; DO NOT 391 /* DMA mask for user DMA control: User visible values; DO NOT
391 renumber */ 392 renumber */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b53df0d..9ccf0e286b2a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
149 struct lock_class *class; 149 struct lock_class *class;
150 struct stack_trace trace; 150 struct stack_trace trace;
151 int distance; 151 int distance;
152
153 /*
154 * The parent field is used to implement breadth-first search, and the
155 * bit 0 is reused to indicate if the lock has been accessed in BFS.
156 */
157 struct lock_list *parent;
152}; 158};
153 159
154/* 160/*
@@ -208,10 +214,12 @@ struct held_lock {
208 * interrupt context: 214 * interrupt context:
209 */ 215 */
210 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 216 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
211 unsigned int trylock:1; 217 unsigned int trylock:1; /* 16 bits */
218
212 unsigned int read:2; /* see lock_acquire() comment */ 219 unsigned int read:2; /* see lock_acquire() comment */
213 unsigned int check:2; /* see lock_acquire() comment */ 220 unsigned int check:2; /* see lock_acquire() comment */
214 unsigned int hardirqs_off:1; 221 unsigned int hardirqs_off:1;
222 unsigned int references:11; /* 32 bits */
215}; 223};
216 224
217/* 225/*
@@ -291,6 +299,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
291extern void lock_release(struct lockdep_map *lock, int nested, 299extern void lock_release(struct lockdep_map *lock, int nested,
292 unsigned long ip); 300 unsigned long ip);
293 301
302#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
303
304extern int lock_is_held(struct lockdep_map *lock);
305
294extern void lock_set_class(struct lockdep_map *lock, const char *name, 306extern void lock_set_class(struct lockdep_map *lock, const char *name,
295 struct lock_class_key *key, unsigned int subclass, 307 struct lock_class_key *key, unsigned int subclass,
296 unsigned long ip); 308 unsigned long ip);
@@ -309,6 +321,8 @@ extern void lockdep_trace_alloc(gfp_t mask);
309 321
310#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 322#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
311 323
324#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
325
312#else /* !LOCKDEP */ 326#else /* !LOCKDEP */
313 327
314static inline void lockdep_off(void) 328static inline void lockdep_off(void)
@@ -353,6 +367,8 @@ struct lock_class_key { };
353 367
354#define lockdep_depth(tsk) (0) 368#define lockdep_depth(tsk) (0)
355 369
370#define lockdep_assert_held(l) do { } while (0)
371
356#endif /* !LOCKDEP */ 372#endif /* !LOCKDEP */
357 373
358#ifdef CONFIG_LOCK_STAT 374#ifdef CONFIG_LOCK_STAT
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index e461b2c3d711..190c37854870 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -33,6 +33,7 @@ struct common_audit_data {
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
34#define LSM_AUDIT_DATA_TASK 5 34#define LSM_AUDIT_DATA_TASK 5
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_NO_AUDIT 7
36 struct task_struct *tsk; 37 struct task_struct *tsk;
37 union { 38 union {
38 struct { 39 struct {
@@ -66,16 +67,19 @@ struct common_audit_data {
66 } key_struct; 67 } key_struct;
67#endif 68#endif
68 } u; 69 } u;
69 const char *function;
70 /* this union contains LSM specific data */ 70 /* this union contains LSM specific data */
71 union { 71 union {
72#ifdef CONFIG_SECURITY_SMACK
72 /* SMACK data */ 73 /* SMACK data */
73 struct smack_audit_data { 74 struct smack_audit_data {
75 const char *function;
74 char *subject; 76 char *subject;
75 char *object; 77 char *object;
76 char *request; 78 char *request;
77 int result; 79 int result;
78 } smack_audit_data; 80 } smack_audit_data;
81#endif
82#ifdef CONFIG_SECURITY_SELINUX
79 /* SELinux data */ 83 /* SELinux data */
80 struct { 84 struct {
81 u32 ssid; 85 u32 ssid;
@@ -83,10 +87,12 @@ struct common_audit_data {
83 u16 tclass; 87 u16 tclass;
84 u32 requested; 88 u32 requested;
85 u32 audited; 89 u32 audited;
90 u32 denied;
86 struct av_decision *avd; 91 struct av_decision *avd;
87 int result; 92 int result;
88 } selinux_audit_data; 93 } selinux_audit_data;
89 } lsm_priv; 94#endif
95 };
90 /* these callback will be implemented by a specific LSM */ 96 /* these callback will be implemented by a specific LSM */
91 void (*lsm_pre_audit)(struct audit_buffer *, void *); 97 void (*lsm_pre_audit)(struct audit_buffer *, void *);
92 void (*lsm_post_audit)(struct audit_buffer *, void *); 98 void (*lsm_post_audit)(struct audit_buffer *, void *);
@@ -104,7 +110,7 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
104/* Initialize an LSM audit data structure. */ 110/* Initialize an LSM audit data structure. */
105#define COMMON_AUDIT_DATA_INIT(_d, _t) \ 111#define COMMON_AUDIT_DATA_INIT(_d, _t) \
106 { memset((_d), 0, sizeof(struct common_audit_data)); \ 112 { memset((_d), 0, sizeof(struct common_audit_data)); \
107 (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } 113 (_d)->type = LSM_AUDIT_DATA_##_t; }
108 114
109void common_lsm_audit(struct common_audit_data *a); 115void common_lsm_audit(struct common_audit_data *a);
110 116
diff --git a/include/linux/module.h b/include/linux/module.h
index 098bdb7bfacf..f8f92d015efe 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,10 +17,12 @@
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/marker.h> 18#include <linux/marker.h>
19#include <linux/tracepoint.h> 19#include <linux/tracepoint.h>
20#include <asm/local.h>
21 20
21#include <asm/local.h>
22#include <asm/module.h> 22#include <asm/module.h>
23 23
24#include <trace/events/module.h>
25
24/* Not Yet Implemented */ 26/* Not Yet Implemented */
25#define MODULE_SUPPORTED_DEVICE(name) 27#define MODULE_SUPPORTED_DEVICE(name)
26 28
@@ -462,7 +464,10 @@ static inline local_t *__module_ref_addr(struct module *mod, int cpu)
462static inline void __module_get(struct module *module) 464static inline void __module_get(struct module *module)
463{ 465{
464 if (module) { 466 if (module) {
465 local_inc(__module_ref_addr(module, get_cpu())); 467 unsigned int cpu = get_cpu();
468 local_inc(__module_ref_addr(module, cpu));
469 trace_module_get(module, _THIS_IP_,
470 local_read(__module_ref_addr(module, cpu)));
466 put_cpu(); 471 put_cpu();
467 } 472 }
468} 473}
@@ -473,8 +478,11 @@ static inline int try_module_get(struct module *module)
473 478
474 if (module) { 479 if (module) {
475 unsigned int cpu = get_cpu(); 480 unsigned int cpu = get_cpu();
476 if (likely(module_is_live(module))) 481 if (likely(module_is_live(module))) {
477 local_inc(__module_ref_addr(module, cpu)); 482 local_inc(__module_ref_addr(module, cpu));
483 trace_module_get(module, _THIS_IP_,
484 local_read(__module_ref_addr(module, cpu)));
485 }
478 else 486 else
479 ret = 0; 487 ret = 0;
480 put_cpu(); 488 put_cpu();
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index bd2eba530667..33b283601f62 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -472,6 +472,7 @@ enum lock_type4 {
472 472
473#define NFSPROC4_NULL 0 473#define NFSPROC4_NULL 0
474#define NFSPROC4_COMPOUND 1 474#define NFSPROC4_COMPOUND 1
475#define NFS4_VERSION 4
475#define NFS4_MINOR_VERSION 0 476#define NFS4_MINOR_VERSION 0
476 477
477#if defined(CONFIG_NFS_V4_1) 478#if defined(CONFIG_NFS_V4_1)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 19fe15d12042..320569eabe3b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -167,6 +167,15 @@ struct nfs_server {
167#define NFS_CAP_SYMLINKS (1U << 2) 167#define NFS_CAP_SYMLINKS (1U << 2)
168#define NFS_CAP_ACLS (1U << 3) 168#define NFS_CAP_ACLS (1U << 3)
169#define NFS_CAP_ATOMIC_OPEN (1U << 4) 169#define NFS_CAP_ATOMIC_OPEN (1U << 4)
170#define NFS_CAP_CHANGE_ATTR (1U << 5)
171#define NFS_CAP_FILEID (1U << 6)
172#define NFS_CAP_MODE (1U << 7)
173#define NFS_CAP_NLINK (1U << 8)
174#define NFS_CAP_OWNER (1U << 9)
175#define NFS_CAP_OWNER_GROUP (1U << 10)
176#define NFS_CAP_ATIME (1U << 11)
177#define NFS_CAP_CTIME (1U << 12)
178#define NFS_CAP_MTIME (1U << 13)
170 179
171 180
172/* maximum number of slots to use */ 181/* maximum number of slots to use */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 29af2d5df097..b752e807adde 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -28,8 +28,23 @@ static inline void acpi_nmi_disable(void) { }
28static inline void acpi_nmi_enable(void) { } 28static inline void acpi_nmi_enable(void) { }
29#endif 29#endif
30 30
31#ifndef trigger_all_cpu_backtrace 31/*
32#define trigger_all_cpu_backtrace() do { } while (0) 32 * Create trigger_all_cpu_backtrace() out of the arch-provided
33 * base function. Return whether such support was available,
34 * to allow calling code to fall back to some other mechanism:
35 */
36#ifdef arch_trigger_all_cpu_backtrace
37static inline bool trigger_all_cpu_backtrace(void)
38{
39 arch_trigger_all_cpu_backtrace();
40
41 return true;
42}
43#else
44static inline bool trigger_all_cpu_backtrace(void)
45{
46 return false;
47}
33#endif 48#endif
34 49
35#endif 50#endif
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 1d9518bc4c58..5171639ecf0f 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -67,6 +67,9 @@ struct oprofile_operations {
67 67
68 /* Initiate a stack backtrace. Optional. */ 68 /* Initiate a stack backtrace. Optional. */
69 void (*backtrace)(struct pt_regs * const regs, unsigned int depth); 69 void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
70
71 /* Multiplex between different events. Optional. */
72 int (*switch_events)(void);
70 /* CPU identification string. */ 73 /* CPU identification string. */
71 char * cpu_type; 74 char * cpu_type;
72}; 75};
@@ -171,7 +174,6 @@ struct op_sample;
171struct op_entry { 174struct op_entry {
172 struct ring_buffer_event *event; 175 struct ring_buffer_event *event;
173 struct op_sample *sample; 176 struct op_sample *sample;
174 unsigned long irq_flags;
175 unsigned long size; 177 unsigned long size;
176 unsigned long *data; 178 unsigned long *data;
177}; 179};
@@ -180,6 +182,7 @@ void oprofile_write_reserve(struct op_entry *entry,
180 struct pt_regs * const regs, 182 struct pt_regs * const regs,
181 unsigned long pc, int code, int size); 183 unsigned long pc, int code, int size);
182int oprofile_add_data(struct op_entry *entry, unsigned long val); 184int oprofile_add_data(struct op_entry *entry, unsigned long val);
185int oprofile_add_data64(struct op_entry *entry, u64 val);
183int oprofile_write_commit(struct op_entry *entry); 186int oprofile_write_commit(struct op_entry *entry);
184 187
185#endif /* OPROFILE_H */ 188#endif /* OPROFILE_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index aec3252afcf5..ed5d7501e181 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page)
132{ 132{
133 VM_BUG_ON(in_interrupt()); 133 VM_BUG_ON(in_interrupt());
134 134
135#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 135#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
136# ifdef CONFIG_PREEMPT 136# ifdef CONFIG_PREEMPT
137 VM_BUG_ON(!in_atomic()); 137 VM_BUG_ON(!in_atomic());
138# endif 138# endif
@@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
170{ 170{
171 VM_BUG_ON(in_interrupt()); 171 VM_BUG_ON(in_interrupt());
172 172
173#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) 173#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
174# ifdef CONFIG_PREEMPT 174# ifdef CONFIG_PREEMPT
175 VM_BUG_ON(!in_atomic()); 175 VM_BUG_ON(!in_atomic());
176# endif 176# endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 73b46b6b904f..c8fdcadce437 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -376,6 +376,9 @@
376#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c 376#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
377#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390 377#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
378#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c 378#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
379/* AMD SB Chipset */
380#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c
381#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800
379 382
380#define PCI_VENDOR_ID_VLSI 0x1004 383#define PCI_VENDOR_ID_VLSI 0x1004
381#define PCI_DEVICE_ID_VLSI_82C592 0x0005 384#define PCI_DEVICE_ID_VLSI_82C592 0x0005
@@ -537,6 +540,7 @@
537#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 540#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
538#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 541#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
539#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 542#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
543#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
540#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 544#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
541#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 545#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
542#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093 546#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index b53f7006cc4e..972f90d7a32f 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -216,6 +216,7 @@ struct perf_counter_attr {
216#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) 216#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
217#define PERF_COUNTER_IOC_RESET _IO ('$', 3) 217#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
218#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) 218#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
219#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
219 220
220enum perf_counter_ioc_flags { 221enum perf_counter_ioc_flags {
221 PERF_IOC_FLAG_GROUP = 1U << 0, 222 PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -415,6 +416,9 @@ enum perf_callchain_context {
415 PERF_CONTEXT_MAX = (__u64)-4095, 416 PERF_CONTEXT_MAX = (__u64)-4095,
416}; 417};
417 418
419#define PERF_FLAG_FD_NO_GROUP (1U << 0)
420#define PERF_FLAG_FD_OUTPUT (1U << 1)
421
418#ifdef __KERNEL__ 422#ifdef __KERNEL__
419/* 423/*
420 * Kernel-internal data types and definitions: 424 * Kernel-internal data types and definitions:
@@ -536,6 +540,7 @@ struct perf_counter {
536 struct list_head sibling_list; 540 struct list_head sibling_list;
537 int nr_siblings; 541 int nr_siblings;
538 struct perf_counter *group_leader; 542 struct perf_counter *group_leader;
543 struct perf_counter *output;
539 const struct pmu *pmu; 544 const struct pmu *pmu;
540 545
541 enum perf_counter_active_state state; 546 enum perf_counter_active_state state;
@@ -761,6 +766,8 @@ extern int sysctl_perf_counter_mlock;
761extern int sysctl_perf_counter_sample_rate; 766extern int sysctl_perf_counter_sample_rate;
762 767
763extern void perf_counter_init(void); 768extern void perf_counter_init(void);
769extern void perf_tpcounter_event(int event_id, u64 addr, u64 count,
770 void *record, int entry_size);
764 771
765#ifndef perf_misc_flags 772#ifndef perf_misc_flags
766#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ 773#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
deleted file mode 100644
index bfd92e1e5d2c..000000000000
--- a/include/linux/rcuclassic.h
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (classic version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUCLASSIC_H
34#define __LINUX_RCUCLASSIC_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/cpumask.h>
40#include <linux/seqlock.h>
41
42#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
43#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
44#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
45#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
46
47/* Global control variables for rcupdate callback mechanism. */
48struct rcu_ctrlblk {
49 long cur; /* Current batch number. */
50 long completed; /* Number of the last completed batch */
51 long pending; /* Number of the last pending batch */
52#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
53 unsigned long gp_start; /* Time at which GP started in jiffies. */
54 unsigned long jiffies_stall;
55 /* Time at which to check for CPU stalls. */
56#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
57
58 int signaled;
59
60 spinlock_t lock ____cacheline_internodealigned_in_smp;
61 DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */
62 /* current batch to proceed. */
63} ____cacheline_internodealigned_in_smp;
64
65/* Is batch a before batch b ? */
66static inline int rcu_batch_before(long a, long b)
67{
68 return (a - b) < 0;
69}
70
71/* Is batch a after batch b ? */
72static inline int rcu_batch_after(long a, long b)
73{
74 return (a - b) > 0;
75}
76
77/* Per-CPU data for Read-Copy UPdate. */
78struct rcu_data {
79 /* 1) quiescent state handling : */
80 long quiescbatch; /* Batch # for grace period */
81 int passed_quiesc; /* User-mode/idle loop etc. */
82 int qs_pending; /* core waits for quiesc state */
83
84 /* 2) batch handling */
85 /*
86 * if nxtlist is not NULL, then:
87 * batch:
88 * The batch # for the last entry of nxtlist
89 * [*nxttail[1], NULL = *nxttail[2]):
90 * Entries that batch # <= batch
91 * [*nxttail[0], *nxttail[1]):
92 * Entries that batch # <= batch - 1
93 * [nxtlist, *nxttail[0]):
94 * Entries that batch # <= batch - 2
95 * The grace period for these entries has completed, and
96 * the other grace-period-completed entries may be moved
97 * here temporarily in rcu_process_callbacks().
98 */
99 long batch;
100 struct rcu_head *nxtlist;
101 struct rcu_head **nxttail[3];
102 long qlen; /* # of queued callbacks */
103 struct rcu_head *donelist;
104 struct rcu_head **donetail;
105 long blimit; /* Upper limit on a processed batch */
106 int cpu;
107 struct rcu_head barrier;
108};
109
110/*
111 * Increment the quiescent state counter.
112 * The counter is a bit degenerated: We do not need to know
113 * how many quiescent states passed, just if there was at least
114 * one since the start of the grace period. Thus just a flag.
115 */
116extern void rcu_qsctr_inc(int cpu);
117extern void rcu_bh_qsctr_inc(int cpu);
118
119extern int rcu_pending(int cpu);
120extern int rcu_needs_cpu(int cpu);
121
122#ifdef CONFIG_DEBUG_LOCK_ALLOC
123extern struct lockdep_map rcu_lock_map;
124# define rcu_read_acquire() \
125 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
126# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
127#else
128# define rcu_read_acquire() do { } while (0)
129# define rcu_read_release() do { } while (0)
130#endif
131
132#define __rcu_read_lock() \
133 do { \
134 preempt_disable(); \
135 __acquire(RCU); \
136 rcu_read_acquire(); \
137 } while (0)
138#define __rcu_read_unlock() \
139 do { \
140 rcu_read_release(); \
141 __release(RCU); \
142 preempt_enable(); \
143 } while (0)
144#define __rcu_read_lock_bh() \
145 do { \
146 local_bh_disable(); \
147 __acquire(RCU_BH); \
148 rcu_read_acquire(); \
149 } while (0)
150#define __rcu_read_unlock_bh() \
151 do { \
152 rcu_read_release(); \
153 __release(RCU_BH); \
154 local_bh_enable(); \
155 } while (0)
156
157#define __synchronize_sched() synchronize_rcu()
158
159#define call_rcu_sched(head, func) call_rcu(head, func)
160
161extern void __rcu_init(void);
162#define rcu_init_sched() do { } while (0)
163extern void rcu_check_callbacks(int cpu, int user);
164extern void rcu_restart_cpu(int cpu);
165
166extern long rcu_batches_completed(void);
167extern long rcu_batches_completed_bh(void);
168
169#define rcu_enter_nohz() do { } while (0)
170#define rcu_exit_nohz() do { } while (0)
171
172/* A context switch is a grace period for rcuclassic. */
173static inline int rcu_blocking_is_gp(void)
174{
175 return num_online_cpus() == 1;
176}
177
178#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 15fbb3ca634d..95e0615f4d75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -51,18 +51,26 @@ struct rcu_head {
51 void (*func)(struct rcu_head *head); 51 void (*func)(struct rcu_head *head);
52}; 52};
53 53
54/* Internal to kernel, but needed by rcupreempt.h. */ 54/* Exported common interfaces */
55extern void synchronize_rcu(void);
56extern void synchronize_rcu_bh(void);
57extern void rcu_barrier(void);
58extern void rcu_barrier_bh(void);
59extern void rcu_barrier_sched(void);
60extern void synchronize_sched_expedited(void);
61extern int sched_expedited_torture_stats(char *page);
62
63/* Internal to kernel */
64extern void rcu_init(void);
65extern void rcu_scheduler_starting(void);
66extern int rcu_needs_cpu(int cpu);
55extern int rcu_scheduler_active; 67extern int rcu_scheduler_active;
56 68
57#if defined(CONFIG_CLASSIC_RCU) 69#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
58#include <linux/rcuclassic.h>
59#elif defined(CONFIG_TREE_RCU)
60#include <linux/rcutree.h> 70#include <linux/rcutree.h>
61#elif defined(CONFIG_PREEMPT_RCU)
62#include <linux/rcupreempt.h>
63#else 71#else
64#error "Unknown RCU implementation specified to kernel configuration" 72#error "Unknown RCU implementation specified to kernel configuration"
65#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ 73#endif
66 74
67#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 75#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
68#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 76#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -70,6 +78,16 @@ extern int rcu_scheduler_active;
70 (ptr)->next = NULL; (ptr)->func = NULL; \ 78 (ptr)->next = NULL; (ptr)->func = NULL; \
71} while (0) 79} while (0)
72 80
81#ifdef CONFIG_DEBUG_LOCK_ALLOC
82extern struct lockdep_map rcu_lock_map;
83# define rcu_read_acquire() \
84 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
85# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
86#else
87# define rcu_read_acquire() do { } while (0)
88# define rcu_read_release() do { } while (0)
89#endif
90
73/** 91/**
74 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 92 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
75 * 93 *
@@ -99,7 +117,12 @@ extern int rcu_scheduler_active;
99 * 117 *
100 * It is illegal to block while in an RCU read-side critical section. 118 * It is illegal to block while in an RCU read-side critical section.
101 */ 119 */
102#define rcu_read_lock() __rcu_read_lock() 120static inline void rcu_read_lock(void)
121{
122 __rcu_read_lock();
123 __acquire(RCU);
124 rcu_read_acquire();
125}
103 126
104/** 127/**
105 * rcu_read_unlock - marks the end of an RCU read-side critical section. 128 * rcu_read_unlock - marks the end of an RCU read-side critical section.
@@ -116,7 +139,12 @@ extern int rcu_scheduler_active;
116 * used as well. RCU does not care how the writers keep out of each 139 * used as well. RCU does not care how the writers keep out of each
117 * others' way, as long as they do so. 140 * others' way, as long as they do so.
118 */ 141 */
119#define rcu_read_unlock() __rcu_read_unlock() 142static inline void rcu_read_unlock(void)
143{
144 rcu_read_release();
145 __release(RCU);
146 __rcu_read_unlock();
147}
120 148
121/** 149/**
122 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 150 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
@@ -129,14 +157,24 @@ extern int rcu_scheduler_active;
129 * can use just rcu_read_lock(). 157 * can use just rcu_read_lock().
130 * 158 *
131 */ 159 */
132#define rcu_read_lock_bh() __rcu_read_lock_bh() 160static inline void rcu_read_lock_bh(void)
161{
162 __rcu_read_lock_bh();
163 __acquire(RCU_BH);
164 rcu_read_acquire();
165}
133 166
134/* 167/*
135 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 168 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
136 * 169 *
137 * See rcu_read_lock_bh() for more information. 170 * See rcu_read_lock_bh() for more information.
138 */ 171 */
139#define rcu_read_unlock_bh() __rcu_read_unlock_bh() 172static inline void rcu_read_unlock_bh(void)
173{
174 rcu_read_release();
175 __release(RCU_BH);
176 __rcu_read_unlock_bh();
177}
140 178
141/** 179/**
142 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 180 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
@@ -147,17 +185,34 @@ extern int rcu_scheduler_active;
147 * - call_rcu_sched() and rcu_barrier_sched() 185 * - call_rcu_sched() and rcu_barrier_sched()
148 * on the write-side to insure proper synchronization. 186 * on the write-side to insure proper synchronization.
149 */ 187 */
150#define rcu_read_lock_sched() preempt_disable() 188static inline void rcu_read_lock_sched(void)
151#define rcu_read_lock_sched_notrace() preempt_disable_notrace() 189{
190 preempt_disable();
191 __acquire(RCU_SCHED);
192 rcu_read_acquire();
193}
194static inline notrace void rcu_read_lock_sched_notrace(void)
195{
196 preempt_disable_notrace();
197 __acquire(RCU_SCHED);
198}
152 199
153/* 200/*
154 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 201 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
155 * 202 *
156 * See rcu_read_lock_sched for more information. 203 * See rcu_read_lock_sched for more information.
157 */ 204 */
158#define rcu_read_unlock_sched() preempt_enable() 205static inline void rcu_read_unlock_sched(void)
159#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 206{
160 207 rcu_read_release();
208 __release(RCU_SCHED);
209 preempt_enable();
210}
211static inline notrace void rcu_read_unlock_sched_notrace(void)
212{
213 __release(RCU_SCHED);
214 preempt_enable_notrace();
215}
161 216
162 217
163/** 218/**
@@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head,
259extern void call_rcu_bh(struct rcu_head *head, 314extern void call_rcu_bh(struct rcu_head *head,
260 void (*func)(struct rcu_head *head)); 315 void (*func)(struct rcu_head *head));
261 316
262/* Exported common interfaces */
263extern void synchronize_rcu(void);
264extern void rcu_barrier(void);
265extern void rcu_barrier_bh(void);
266extern void rcu_barrier_sched(void);
267
268/* Internal to kernel */
269extern void rcu_init(void);
270extern void rcu_scheduler_starting(void);
271extern int rcu_needs_cpu(int cpu);
272
273#endif /* __LINUX_RCUPDATE_H */ 317#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
deleted file mode 100644
index fce522782ffa..000000000000
--- a/include/linux/rcupreempt.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * Documentation/RCU
30 *
31 */
32
33#ifndef __LINUX_RCUPREEMPT_H
34#define __LINUX_RCUPREEMPT_H
35
36#include <linux/cache.h>
37#include <linux/spinlock.h>
38#include <linux/threads.h>
39#include <linux/smp.h>
40#include <linux/cpumask.h>
41#include <linux/seqlock.h>
42
43extern void rcu_qsctr_inc(int cpu);
44static inline void rcu_bh_qsctr_inc(int cpu) { }
45
46/*
47 * Someone might want to pass call_rcu_bh as a function pointer.
48 * So this needs to just be a rename and not a macro function.
49 * (no parentheses)
50 */
51#define call_rcu_bh call_rcu
52
53/**
54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
55 * @head: structure to be used for queueing the RCU updates.
56 * @func: actual update function to be invoked after the grace period
57 *
58 * The update function will be invoked some time after a full
59 * synchronize_sched()-style grace period elapses, in other words after
60 * all currently executing preempt-disabled sections of code (including
61 * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
62 * completed.
63 */
64extern void call_rcu_sched(struct rcu_head *head,
65 void (*func)(struct rcu_head *head));
66
67extern void __rcu_read_lock(void) __acquires(RCU);
68extern void __rcu_read_unlock(void) __releases(RCU);
69extern int rcu_pending(int cpu);
70extern int rcu_needs_cpu(int cpu);
71
72#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); }
73#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); }
74
75extern void __synchronize_sched(void);
76
77extern void __rcu_init(void);
78extern void rcu_init_sched(void);
79extern void rcu_check_callbacks(int cpu, int user);
80extern void rcu_restart_cpu(int cpu);
81extern long rcu_batches_completed(void);
82
83/*
84 * Return the number of RCU batches processed thus far. Useful for debug
85 * and statistic. The _bh variant is identifcal to straight RCU
86 */
87static inline long rcu_batches_completed_bh(void)
88{
89 return rcu_batches_completed();
90}
91
92#ifdef CONFIG_RCU_TRACE
93struct rcupreempt_trace;
94extern long *rcupreempt_flipctr(int cpu);
95extern long rcupreempt_data_completed(void);
96extern int rcupreempt_flip_flag(int cpu);
97extern int rcupreempt_mb_flag(int cpu);
98extern char *rcupreempt_try_flip_state_name(void);
99extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
100#endif
101
102struct softirq_action;
103
104#ifdef CONFIG_NO_HZ
105extern void rcu_enter_nohz(void);
106extern void rcu_exit_nohz(void);
107#else
108# define rcu_enter_nohz() do { } while (0)
109# define rcu_exit_nohz() do { } while (0)
110#endif
111
112/*
113 * A context switch is a grace period for rcupreempt synchronize_rcu()
114 * only during early boot, before the scheduler has been initialized.
115 * So, how the heck do we get a context switch? Well, if the caller
116 * invokes synchronize_rcu(), they are willing to accept a context
117 * switch, so we simply pretend that one happened.
118 *
119 * After boot, there might be a blocked or preempted task in an RCU
120 * read-side critical section, so we cannot then take the fastpath.
121 */
122static inline int rcu_blocking_is_gp(void)
123{
124 return num_online_cpus() == 1 && !rcu_scheduler_active;
125}
126
127#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h
deleted file mode 100644
index b99ae073192a..000000000000
--- a/include/linux/rcupreempt_trace.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27 *
28 * For detailed explanation of the Preemptible Read-Copy Update mechanism see -
29 * http://lwn.net/Articles/253651/
30 */
31
32#ifndef __LINUX_RCUPREEMPT_TRACE_H
33#define __LINUX_RCUPREEMPT_TRACE_H
34
35#include <linux/types.h>
36#include <linux/kernel.h>
37
38#include <asm/atomic.h>
39
40/*
41 * PREEMPT_RCU data structures.
42 */
43
44struct rcupreempt_trace {
45 long next_length;
46 long next_add;
47 long wait_length;
48 long wait_add;
49 long done_length;
50 long done_add;
51 long done_remove;
52 atomic_t done_invoked;
53 long rcu_check_callbacks;
54 atomic_t rcu_try_flip_1;
55 atomic_t rcu_try_flip_e1;
56 long rcu_try_flip_i1;
57 long rcu_try_flip_ie1;
58 long rcu_try_flip_g1;
59 long rcu_try_flip_a1;
60 long rcu_try_flip_ae1;
61 long rcu_try_flip_a2;
62 long rcu_try_flip_z1;
63 long rcu_try_flip_ze1;
64 long rcu_try_flip_z2;
65 long rcu_try_flip_m1;
66 long rcu_try_flip_me1;
67 long rcu_try_flip_m2;
68};
69
70#ifdef CONFIG_RCU_TRACE
71#define RCU_TRACE(fn, arg) fn(arg);
72#else
73#define RCU_TRACE(fn, arg)
74#endif
75
76extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace);
77extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace);
78extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace);
79extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace);
80extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace);
81extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace);
82extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace);
83extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace);
84extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace);
85extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace);
86extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace);
87extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace);
88extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace);
89extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace);
90extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace);
91extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace);
92extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace);
93extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
94extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
95extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
96
97#endif /* __LINUX_RCUPREEMPT_TRACE_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5a5153806c42..a89307717825 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,264 +30,57 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33#include <linux/cache.h> 33extern void rcu_sched_qs(int cpu);
34#include <linux/spinlock.h> 34extern void rcu_bh_qs(int cpu);
35#include <linux/threads.h>
36#include <linux/cpumask.h>
37#include <linux/seqlock.h>
38 35
39/* 36extern int rcu_needs_cpu(int cpu);
40 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
41 * In theory, it should be possible to add more levels straightforwardly.
42 * In practice, this has not been tested, so there is probably some
43 * bug somewhere.
44 */
45#define MAX_RCU_LVLS 3
46#define RCU_FANOUT (CONFIG_RCU_FANOUT)
47#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
48#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
49
50#if NR_CPUS <= RCU_FANOUT
51# define NUM_RCU_LVLS 1
52# define NUM_RCU_LVL_0 1
53# define NUM_RCU_LVL_1 (NR_CPUS)
54# define NUM_RCU_LVL_2 0
55# define NUM_RCU_LVL_3 0
56#elif NR_CPUS <= RCU_FANOUT_SQ
57# define NUM_RCU_LVLS 2
58# define NUM_RCU_LVL_0 1
59# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
60# define NUM_RCU_LVL_2 (NR_CPUS)
61# define NUM_RCU_LVL_3 0
62#elif NR_CPUS <= RCU_FANOUT_CUBE
63# define NUM_RCU_LVLS 3
64# define NUM_RCU_LVL_0 1
65# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
66# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
67# define NUM_RCU_LVL_3 NR_CPUS
68#else
69# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
70#endif /* #if (NR_CPUS) <= RCU_FANOUT */
71
72#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
73#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
74
75/*
76 * Dynticks per-CPU state.
77 */
78struct rcu_dynticks {
79 int dynticks_nesting; /* Track nesting level, sort of. */
80 int dynticks; /* Even value for dynticks-idle, else odd. */
81 int dynticks_nmi; /* Even value for either dynticks-idle or */
82 /* not in nmi handler, else odd. So this */
83 /* remains even for nmi from irq handler. */
84};
85
86/*
87 * Definition for node within the RCU grace-period-detection hierarchy.
88 */
89struct rcu_node {
90 spinlock_t lock;
91 unsigned long qsmask; /* CPUs or groups that need to switch in */
92 /* order for current grace period to proceed.*/
93 unsigned long qsmaskinit;
94 /* Per-GP initialization for qsmask. */
95 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 int grplo; /* lowest-numbered CPU or group here. */
97 int grphi; /* highest-numbered CPU or group here. */
98 u8 grpnum; /* CPU/group number for next level up. */
99 u8 level; /* root is at level 0. */
100 struct rcu_node *parent;
101} ____cacheline_internodealigned_in_smp;
102
103/* Index values for nxttail array in struct rcu_data. */
104#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
105#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
106#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
107#define RCU_NEXT_TAIL 3
108#define RCU_NEXT_SIZE 4
109
110/* Per-CPU data for read-copy update. */
111struct rcu_data {
112 /* 1) quiescent-state and grace-period handling : */
113 long completed; /* Track rsp->completed gp number */
114 /* in order to detect GP end. */
115 long gpnum; /* Highest gp number that this CPU */
116 /* is aware of having started. */
117 long passed_quiesc_completed;
118 /* Value of completed at time of qs. */
119 bool passed_quiesc; /* User-mode/idle loop etc. */
120 bool qs_pending; /* Core waits for quiesc state. */
121 bool beenonline; /* CPU online at least once. */
122 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
123 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
124
125 /* 2) batch handling */
126 /*
127 * If nxtlist is not NULL, it is partitioned as follows.
128 * Any of the partitions might be empty, in which case the
129 * pointer to that partition will be equal to the pointer for
130 * the following partition. When the list is empty, all of
131 * the nxttail elements point to nxtlist, which is NULL.
132 *
133 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
134 * Entries that might have arrived after current GP ended
135 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
136 * Entries known to have arrived before current GP ended
137 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
138 * Entries that batch # <= ->completed - 1: waiting for current GP
139 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
140 * Entries that batch # <= ->completed
141 * The grace period for these entries has completed, and
142 * the other grace-period-completed entries may be moved
143 * here temporarily in rcu_process_callbacks().
144 */
145 struct rcu_head *nxtlist;
146 struct rcu_head **nxttail[RCU_NEXT_SIZE];
147 long qlen; /* # of queued callbacks */
148 long blimit; /* Upper limit on a processed batch */
149
150#ifdef CONFIG_NO_HZ
151 /* 3) dynticks interface. */
152 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
153 int dynticks_snap; /* Per-GP tracking for dynticks. */
154 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
155#endif /* #ifdef CONFIG_NO_HZ */
156
157 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
158#ifdef CONFIG_NO_HZ
159 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
160#endif /* #ifdef CONFIG_NO_HZ */
161 unsigned long offline_fqs; /* Kicked due to being offline. */
162 unsigned long resched_ipi; /* Sent a resched IPI. */
163
164 /* 5) __rcu_pending() statistics. */
165 long n_rcu_pending; /* rcu_pending() calls since boot. */
166 long n_rp_qs_pending;
167 long n_rp_cb_ready;
168 long n_rp_cpu_needs_gp;
169 long n_rp_gp_completed;
170 long n_rp_gp_started;
171 long n_rp_need_fqs;
172 long n_rp_need_nothing;
173
174 int cpu;
175};
176
177/* Values for signaled field in struct rcu_state. */
178#define RCU_GP_INIT 0 /* Grace period being initialized. */
179#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
180#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
181#ifdef CONFIG_NO_HZ
182#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
183#else /* #ifdef CONFIG_NO_HZ */
184#define RCU_SIGNAL_INIT RCU_FORCE_QS
185#endif /* #else #ifdef CONFIG_NO_HZ */
186
187#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
188#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
189#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
190#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
191#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
192 /* to take at least one */
193 /* scheduling clock irq */
194 /* before ratting on them. */
195
196#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
197
198/*
199 * RCU global state, including node hierarchy. This hierarchy is
200 * represented in "heap" form in a dense array. The root (first level)
201 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
202 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
203 * and the third level in ->node[m+1] and following (->node[m+1] referenced
204 * by ->level[2]). The number of levels is determined by the number of
205 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
206 * consisting of a single rcu_node.
207 */
208struct rcu_state {
209 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
210 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
211 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
212 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
213 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
214
215 /* The following fields are guarded by the root rcu_node's lock. */
216
217 u8 signaled ____cacheline_internodealigned_in_smp;
218 /* Force QS state. */
219 long gpnum; /* Current gp number. */
220 long completed; /* # of last completed gp. */
221 spinlock_t onofflock; /* exclude on/offline and */
222 /* starting new GP. */
223 spinlock_t fqslock; /* Only one task forcing */
224 /* quiescent states. */
225 unsigned long jiffies_force_qs; /* Time at which to invoke */
226 /* force_quiescent_state(). */
227 unsigned long n_force_qs; /* Number of calls to */
228 /* force_quiescent_state(). */
229 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
230 /* due to lock unavailable. */
231 unsigned long n_force_qs_ngp; /* Number of calls leaving */
232 /* due to no GP active. */
233#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
234 unsigned long gp_start; /* Time at which GP started, */
235 /* but in jiffies. */
236 unsigned long jiffies_stall; /* Time at which to check */
237 /* for CPU stalls. */
238#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
239#ifdef CONFIG_NO_HZ
240 long dynticks_completed; /* Value of completed @ snap. */
241#endif /* #ifdef CONFIG_NO_HZ */
242};
243 37
244extern void rcu_qsctr_inc(int cpu); 38#ifdef CONFIG_TREE_PREEMPT_RCU
245extern void rcu_bh_qsctr_inc(int cpu);
246 39
247extern int rcu_pending(int cpu); 40extern void __rcu_read_lock(void);
248extern int rcu_needs_cpu(int cpu); 41extern void __rcu_read_unlock(void);
42extern void exit_rcu(void);
249 43
250#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
251extern struct lockdep_map rcu_lock_map;
252# define rcu_read_acquire() \
253 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
254# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
255#else
256# define rcu_read_acquire() do { } while (0)
257# define rcu_read_release() do { } while (0)
258#endif
259 45
260static inline void __rcu_read_lock(void) 46static inline void __rcu_read_lock(void)
261{ 47{
262 preempt_disable(); 48 preempt_disable();
263 __acquire(RCU);
264 rcu_read_acquire();
265} 49}
50
266static inline void __rcu_read_unlock(void) 51static inline void __rcu_read_unlock(void)
267{ 52{
268 rcu_read_release();
269 __release(RCU);
270 preempt_enable(); 53 preempt_enable();
271} 54}
55
56static inline void exit_rcu(void)
57{
58}
59
60#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
61
272static inline void __rcu_read_lock_bh(void) 62static inline void __rcu_read_lock_bh(void)
273{ 63{
274 local_bh_disable(); 64 local_bh_disable();
275 __acquire(RCU_BH);
276 rcu_read_acquire();
277} 65}
278static inline void __rcu_read_unlock_bh(void) 66static inline void __rcu_read_unlock_bh(void)
279{ 67{
280 rcu_read_release();
281 __release(RCU_BH);
282 local_bh_enable(); 68 local_bh_enable();
283} 69}
284 70
285#define __synchronize_sched() synchronize_rcu() 71#define __synchronize_sched() synchronize_rcu()
286 72
287#define call_rcu_sched(head, func) call_rcu(head, func) 73extern void call_rcu_sched(struct rcu_head *head,
74 void (*func)(struct rcu_head *rcu));
288 75
289static inline void rcu_init_sched(void) 76static inline void synchronize_rcu_expedited(void)
290{ 77{
78 synchronize_sched_expedited();
79}
80
81static inline void synchronize_rcu_bh_expedited(void)
82{
83 synchronize_sched_expedited();
291} 84}
292 85
293extern void __rcu_init(void); 86extern void __rcu_init(void);
@@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu);
296 89
297extern long rcu_batches_completed(void); 90extern long rcu_batches_completed(void);
298extern long rcu_batches_completed_bh(void); 91extern long rcu_batches_completed_bh(void);
92extern long rcu_batches_completed_sched(void);
93
94static inline void rcu_init_sched(void)
95{
96}
299 97
300#ifdef CONFIG_NO_HZ 98#ifdef CONFIG_NO_HZ
301void rcu_enter_nohz(void); 99void rcu_enter_nohz(void);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 29f8599e6bea..5fcc31ed5771 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -75,20 +75,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
75} 75}
76 76
77/* 77/*
78 * ring_buffer_event_discard can discard any event in the ring buffer.
79 * it is up to the caller to protect against a reader from
80 * consuming it or a writer from wrapping and replacing it.
81 *
82 * No external protection is needed if this is called before
83 * the event is commited. But in that case it would be better to
84 * use ring_buffer_discard_commit.
85 *
86 * Note, if an event that has not been committed is discarded
87 * with ring_buffer_event_discard, it must still be committed.
88 */
89void ring_buffer_event_discard(struct ring_buffer_event *event);
90
91/*
92 * ring_buffer_discard_commit will remove an event that has not 78 * ring_buffer_discard_commit will remove an event that has not
93 * ben committed yet. If this is used, then ring_buffer_unlock_commit 79 * ben committed yet. If this is used, then ring_buffer_unlock_commit
94 * must not be called on the discarded event. This function 80 * must not be called on the discarded event. This function
@@ -154,8 +140,17 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer);
154void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); 140void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
155void ring_buffer_reset(struct ring_buffer *buffer); 141void ring_buffer_reset(struct ring_buffer *buffer);
156 142
143#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
157int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, 144int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
158 struct ring_buffer *buffer_b, int cpu); 145 struct ring_buffer *buffer_b, int cpu);
146#else
147static inline int
148ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
149 struct ring_buffer *buffer_b, int cpu)
150{
151 return -ENODEV;
152}
153#endif
159 154
160int ring_buffer_empty(struct ring_buffer *buffer); 155int ring_buffer_empty(struct ring_buffer *buffer);
161int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); 156int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
@@ -170,7 +165,6 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
170unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 165unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
171unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 166unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
172unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); 167unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
173unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
174 168
175u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); 169u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
176void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, 170void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f1ea4a66957..f3d74bd04d18 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -38,6 +38,8 @@
38#define SCHED_BATCH 3 38#define SCHED_BATCH 3
39/* SCHED_ISO: reserved but not implemented yet */ 39/* SCHED_ISO: reserved but not implemented yet */
40#define SCHED_IDLE 5 40#define SCHED_IDLE 5
41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
42#define SCHED_RESET_ON_FORK 0x40000000
41 43
42#ifdef __KERNEL__ 44#ifdef __KERNEL__
43 45
@@ -796,18 +798,19 @@ enum cpu_idle_type {
796#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE 798#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
797 799
798#ifdef CONFIG_SMP 800#ifdef CONFIG_SMP
799#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ 801#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
800#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ 802#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
801#define SD_BALANCE_EXEC 4 /* Balance on exec */ 803#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
802#define SD_BALANCE_FORK 8 /* Balance on fork, clone */ 804#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
803#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */ 805#define SD_WAKE_IDLE 0x0010 /* Wake to idle CPU on task wakeup */
804#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ 806#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
805#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ 807#define SD_WAKE_BALANCE 0x0040 /* Perform balancing at task wakeup */
806#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ 808#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
807#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ 809#define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */
808#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */ 810#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
809#define SD_SERIALIZE 1024 /* Only a single load balancing instance */ 811#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
810#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ 812#define SD_WAKE_IDLE_FAR 0x0800 /* Gain latency sacrificing cache hit */
813#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
811 814
812enum powersavings_balance_level { 815enum powersavings_balance_level {
813 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ 816 POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -827,7 +830,7 @@ static inline int sd_balance_for_mc_power(void)
827 if (sched_smt_power_savings) 830 if (sched_smt_power_savings)
828 return SD_POWERSAVINGS_BALANCE; 831 return SD_POWERSAVINGS_BALANCE;
829 832
830 return 0; 833 return SD_PREFER_SIBLING;
831} 834}
832 835
833static inline int sd_balance_for_package_power(void) 836static inline int sd_balance_for_package_power(void)
@@ -835,7 +838,7 @@ static inline int sd_balance_for_package_power(void)
835 if (sched_mc_power_savings | sched_smt_power_savings) 838 if (sched_mc_power_savings | sched_smt_power_savings)
836 return SD_POWERSAVINGS_BALANCE; 839 return SD_POWERSAVINGS_BALANCE;
837 840
838 return 0; 841 return SD_PREFER_SIBLING;
839} 842}
840 843
841/* 844/*
@@ -857,15 +860,9 @@ struct sched_group {
857 860
858 /* 861 /*
859 * CPU power of this group, SCHED_LOAD_SCALE being max power for a 862 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
860 * single CPU. This is read only (except for setup, hotplug CPU). 863 * single CPU.
861 * Note : Never change cpu_power without recompute its reciprocal
862 */ 864 */
863 unsigned int __cpu_power; 865 unsigned int cpu_power;
864 /*
865 * reciprocal value of cpu_power to avoid expensive divides
866 * (see include/linux/reciprocal_div.h)
867 */
868 u32 reciprocal_cpu_power;
869 866
870 /* 867 /*
871 * The CPUs this group covers. 868 * The CPUs this group covers.
@@ -918,6 +915,7 @@ struct sched_domain {
918 unsigned int newidle_idx; 915 unsigned int newidle_idx;
919 unsigned int wake_idx; 916 unsigned int wake_idx;
920 unsigned int forkexec_idx; 917 unsigned int forkexec_idx;
918 unsigned int smt_gain;
921 int flags; /* See SD_* */ 919 int flags; /* See SD_* */
922 enum sched_domain_level level; 920 enum sched_domain_level level;
923 921
@@ -1045,7 +1043,6 @@ struct sched_class {
1045 struct rq *busiest, struct sched_domain *sd, 1043 struct rq *busiest, struct sched_domain *sd,
1046 enum cpu_idle_type idle); 1044 enum cpu_idle_type idle);
1047 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1045 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1048 int (*needs_post_schedule) (struct rq *this_rq);
1049 void (*post_schedule) (struct rq *this_rq); 1046 void (*post_schedule) (struct rq *this_rq);
1050 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1047 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1051 1048
@@ -1110,6 +1107,8 @@ struct sched_entity {
1110 u64 wait_max; 1107 u64 wait_max;
1111 u64 wait_count; 1108 u64 wait_count;
1112 u64 wait_sum; 1109 u64 wait_sum;
1110 u64 iowait_count;
1111 u64 iowait_sum;
1113 1112
1114 u64 sleep_start; 1113 u64 sleep_start;
1115 u64 sleep_max; 1114 u64 sleep_max;
@@ -1163,6 +1162,8 @@ struct sched_rt_entity {
1163#endif 1162#endif
1164}; 1163};
1165 1164
1165struct rcu_node;
1166
1166struct task_struct { 1167struct task_struct {
1167 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 1168 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
1168 void *stack; 1169 void *stack;
@@ -1206,10 +1207,12 @@ struct task_struct {
1206 unsigned int policy; 1207 unsigned int policy;
1207 cpumask_t cpus_allowed; 1208 cpumask_t cpus_allowed;
1208 1209
1209#ifdef CONFIG_PREEMPT_RCU 1210#ifdef CONFIG_TREE_PREEMPT_RCU
1210 int rcu_read_lock_nesting; 1211 int rcu_read_lock_nesting;
1211 int rcu_flipctr_idx; 1212 char rcu_read_unlock_special;
1212#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1213 struct rcu_node *rcu_blocked_node;
1214 struct list_head rcu_node_entry;
1215#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1213 1216
1214#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1217#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1215 struct sched_info sched_info; 1218 struct sched_info sched_info;
@@ -1230,11 +1233,19 @@ struct task_struct {
1230 unsigned did_exec:1; 1233 unsigned did_exec:1;
1231 unsigned in_execve:1; /* Tell the LSMs that the process is doing an 1234 unsigned in_execve:1; /* Tell the LSMs that the process is doing an
1232 * execve */ 1235 * execve */
1236 unsigned in_iowait:1;
1237
1238
1239 /* Revert to default priority/policy when forking */
1240 unsigned sched_reset_on_fork:1;
1241
1233 pid_t pid; 1242 pid_t pid;
1234 pid_t tgid; 1243 pid_t tgid;
1235 1244
1245#ifdef CONFIG_CC_STACKPROTECTOR
1236 /* Canary value for the -fstack-protector gcc feature */ 1246 /* Canary value for the -fstack-protector gcc feature */
1237 unsigned long stack_canary; 1247 unsigned long stack_canary;
1248#endif
1238 1249
1239 /* 1250 /*
1240 * pointers to (original) parent process, youngest child, younger sibling, 1251 * pointers to (original) parent process, youngest child, younger sibling,
@@ -1292,6 +1303,7 @@ struct task_struct {
1292 struct mutex cred_guard_mutex; /* guard against foreign influences on 1303 struct mutex cred_guard_mutex; /* guard against foreign influences on
1293 * credential calculations 1304 * credential calculations
1294 * (notably. ptrace) */ 1305 * (notably. ptrace) */
1306 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1295 1307
1296 char comm[TASK_COMM_LEN]; /* executable name excluding path 1308 char comm[TASK_COMM_LEN]; /* executable name excluding path
1297 - access with [gs]et_task_comm (which lock 1309 - access with [gs]et_task_comm (which lock
@@ -1724,6 +1736,28 @@ extern cputime_t task_gtime(struct task_struct *p);
1724#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1736#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1725#define used_math() tsk_used_math(current) 1737#define used_math() tsk_used_math(current)
1726 1738
1739#ifdef CONFIG_TREE_PREEMPT_RCU
1740
1741#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1742#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1743#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */
1744
1745static inline void rcu_copy_process(struct task_struct *p)
1746{
1747 p->rcu_read_lock_nesting = 0;
1748 p->rcu_read_unlock_special = 0;
1749 p->rcu_blocked_node = NULL;
1750 INIT_LIST_HEAD(&p->rcu_node_entry);
1751}
1752
1753#else
1754
1755static inline void rcu_copy_process(struct task_struct *p)
1756{
1757}
1758
1759#endif
1760
1727#ifdef CONFIG_SMP 1761#ifdef CONFIG_SMP
1728extern int set_cpus_allowed_ptr(struct task_struct *p, 1762extern int set_cpus_allowed_ptr(struct task_struct *p,
1729 const struct cpumask *new_mask); 1763 const struct cpumask *new_mask);
@@ -1813,11 +1847,12 @@ extern unsigned int sysctl_sched_min_granularity;
1813extern unsigned int sysctl_sched_wakeup_granularity; 1847extern unsigned int sysctl_sched_wakeup_granularity;
1814extern unsigned int sysctl_sched_shares_ratelimit; 1848extern unsigned int sysctl_sched_shares_ratelimit;
1815extern unsigned int sysctl_sched_shares_thresh; 1849extern unsigned int sysctl_sched_shares_thresh;
1816#ifdef CONFIG_SCHED_DEBUG
1817extern unsigned int sysctl_sched_child_runs_first; 1850extern unsigned int sysctl_sched_child_runs_first;
1851#ifdef CONFIG_SCHED_DEBUG
1818extern unsigned int sysctl_sched_features; 1852extern unsigned int sysctl_sched_features;
1819extern unsigned int sysctl_sched_migration_cost; 1853extern unsigned int sysctl_sched_migration_cost;
1820extern unsigned int sysctl_sched_nr_migrate; 1854extern unsigned int sysctl_sched_nr_migrate;
1855extern unsigned int sysctl_sched_time_avg;
1821extern unsigned int sysctl_timer_migration; 1856extern unsigned int sysctl_timer_migration;
1822 1857
1823int sched_nr_latency_handler(struct ctl_table *table, int write, 1858int sched_nr_latency_handler(struct ctl_table *table, int write,
@@ -2077,7 +2112,7 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
2077#define for_each_process(p) \ 2112#define for_each_process(p) \
2078 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 2113 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2079 2114
2080extern bool is_single_threaded(struct task_struct *); 2115extern bool current_is_single_threaded(void);
2081 2116
2082/* 2117/*
2083 * Careful: do_each_thread/while_each_thread is a double loop so 2118 * Careful: do_each_thread/while_each_thread is a double loop so
@@ -2281,23 +2316,31 @@ static inline int need_resched(void)
2281 * cond_resched_softirq() will enable bhs before scheduling. 2316 * cond_resched_softirq() will enable bhs before scheduling.
2282 */ 2317 */
2283extern int _cond_resched(void); 2318extern int _cond_resched(void);
2284#ifdef CONFIG_PREEMPT_BKL 2319
2285static inline int cond_resched(void) 2320#define cond_resched() ({ \
2286{ 2321 __might_sleep(__FILE__, __LINE__, 0); \
2287 return 0; 2322 _cond_resched(); \
2288} 2323})
2324
2325extern int __cond_resched_lock(spinlock_t *lock);
2326
2327#ifdef CONFIG_PREEMPT
2328#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
2289#else 2329#else
2290static inline int cond_resched(void) 2330#define PREEMPT_LOCK_OFFSET 0
2291{
2292 return _cond_resched();
2293}
2294#endif 2331#endif
2295extern int cond_resched_lock(spinlock_t * lock); 2332
2296extern int cond_resched_softirq(void); 2333#define cond_resched_lock(lock) ({ \
2297static inline int cond_resched_bkl(void) 2334 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2298{ 2335 __cond_resched_lock(lock); \
2299 return _cond_resched(); 2336})
2300} 2337
2338extern int __cond_resched_softirq(void);
2339
2340#define cond_resched_softirq() ({ \
2341 __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
2342 __cond_resched_softirq(); \
2343})
2301 2344
2302/* 2345/*
2303 * Does a critical section need to be broken due to another 2346 * Does a critical section need to be broken due to another
diff --git a/include/linux/security.h b/include/linux/security.h
index 1f16eea2017b..d050b66ab9ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -53,7 +53,7 @@ struct audit_krule;
53extern int cap_capable(struct task_struct *tsk, const struct cred *cred, 53extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
54 int cap, int audit); 54 int cap, int audit);
55extern int cap_settime(struct timespec *ts, struct timezone *tz); 55extern int cap_settime(struct timespec *ts, struct timezone *tz);
56extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); 56extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
57extern int cap_ptrace_traceme(struct task_struct *parent); 57extern int cap_ptrace_traceme(struct task_struct *parent);
58extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 58extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
59extern int cap_capset(struct cred *new, const struct cred *old, 59extern int cap_capset(struct cred *new, const struct cred *old,
@@ -653,6 +653,11 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
653 * manual page for definitions of the @clone_flags. 653 * manual page for definitions of the @clone_flags.
654 * @clone_flags contains the flags indicating what should be shared. 654 * @clone_flags contains the flags indicating what should be shared.
655 * Return 0 if permission is granted. 655 * Return 0 if permission is granted.
656 * @cred_alloc_blank:
657 * @cred points to the credentials.
658 * @gfp indicates the atomicity of any memory allocations.
659 * Only allocate sufficient memory and attach to @cred such that
660 * cred_transfer() will not get ENOMEM.
656 * @cred_free: 661 * @cred_free:
657 * @cred points to the credentials. 662 * @cred points to the credentials.
658 * Deallocate and clear the cred->security field in a set of credentials. 663 * Deallocate and clear the cred->security field in a set of credentials.
@@ -665,6 +670,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
665 * @new points to the new credentials. 670 * @new points to the new credentials.
666 * @old points to the original credentials. 671 * @old points to the original credentials.
667 * Install a new set of credentials. 672 * Install a new set of credentials.
673 * @cred_transfer:
674 * @new points to the new credentials.
675 * @old points to the original credentials.
676 * Transfer data from original creds to new creds
668 * @kernel_act_as: 677 * @kernel_act_as:
669 * Set the credentials for a kernel service to act as (subjective context). 678 * Set the credentials for a kernel service to act as (subjective context).
670 * @new points to the credentials to be modified. 679 * @new points to the credentials to be modified.
@@ -678,6 +687,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
678 * @inode points to the inode to use as a reference. 687 * @inode points to the inode to use as a reference.
679 * The current task must be the one that nominated @inode. 688 * The current task must be the one that nominated @inode.
680 * Return 0 if successful. 689 * Return 0 if successful.
690 * @kernel_module_request:
691 * Ability to trigger the kernel to automatically upcall to userspace for
692 * userspace to load a kernel module with the given name.
693 * Return 0 if successful.
681 * @task_setuid: 694 * @task_setuid:
682 * Check permission before setting one or more of the user identity 695 * Check permission before setting one or more of the user identity
683 * attributes of the current process. The @flags parameter indicates 696 * attributes of the current process. The @flags parameter indicates
@@ -994,6 +1007,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
994 * Sets the connection's peersid to the secmark on skb. 1007 * Sets the connection's peersid to the secmark on skb.
995 * @req_classify_flow: 1008 * @req_classify_flow:
996 * Sets the flow's sid to the openreq sid. 1009 * Sets the flow's sid to the openreq sid.
1010 * @tun_dev_create:
1011 * Check permissions prior to creating a new TUN device.
1012 * @tun_dev_post_create:
1013 * This hook allows a module to update or allocate a per-socket security
1014 * structure.
1015 * @sk contains the newly created sock structure.
1016 * @tun_dev_attach:
1017 * Check permissions prior to attaching to a persistent TUN device. This
1018 * hook can also be used by the module to update any security state
1019 * associated with the TUN device's sock structure.
1020 * @sk contains the existing sock structure.
997 * 1021 *
998 * Security hooks for XFRM operations. 1022 * Security hooks for XFRM operations.
999 * 1023 *
@@ -1088,6 +1112,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1088 * Return the length of the string (including terminating NUL) or -ve if 1112 * Return the length of the string (including terminating NUL) or -ve if
1089 * an error. 1113 * an error.
1090 * May also return 0 (and a NULL buffer pointer) if there is no label. 1114 * May also return 0 (and a NULL buffer pointer) if there is no label.
1115 * @key_session_to_parent:
1116 * Forcibly assign the session keyring from a process to its parent
1117 * process.
1118 * @cred: Pointer to process's credentials
1119 * @parent_cred: Pointer to parent process's credentials
1120 * @keyring: Proposed new session keyring
1121 * Return 0 if permission is granted, -ve error otherwise.
1091 * 1122 *
1092 * Security hooks affecting all System V IPC operations. 1123 * Security hooks affecting all System V IPC operations.
1093 * 1124 *
@@ -1229,7 +1260,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1229 * @alter contains the flag indicating whether changes are to be made. 1260 * @alter contains the flag indicating whether changes are to be made.
1230 * Return 0 if permission is granted. 1261 * Return 0 if permission is granted.
1231 * 1262 *
1232 * @ptrace_may_access: 1263 * @ptrace_access_check:
1233 * Check permission before allowing the current process to trace the 1264 * Check permission before allowing the current process to trace the
1234 * @child process. 1265 * @child process.
1235 * Security modules may also want to perform a process tracing check 1266 * Security modules may also want to perform a process tracing check
@@ -1244,7 +1275,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1244 * Check that the @parent process has sufficient permission to trace the 1275 * Check that the @parent process has sufficient permission to trace the
1245 * current process before allowing the current process to present itself 1276 * current process before allowing the current process to present itself
1246 * to the @parent process for tracing. 1277 * to the @parent process for tracing.
1247 * The parent process will still have to undergo the ptrace_may_access 1278 * The parent process will still have to undergo the ptrace_access_check
1248 * checks before it is allowed to trace this one. 1279 * checks before it is allowed to trace this one.
1249 * @parent contains the task_struct structure for debugger process. 1280 * @parent contains the task_struct structure for debugger process.
1250 * Return 0 if permission is granted. 1281 * Return 0 if permission is granted.
@@ -1351,12 +1382,47 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1351 * audit_rule_init. 1382 * audit_rule_init.
1352 * @rule contains the allocated rule 1383 * @rule contains the allocated rule
1353 * 1384 *
1385 * @inode_notifysecctx:
1386 * Notify the security module of what the security context of an inode
1387 * should be. Initializes the incore security context managed by the
1388 * security module for this inode. Example usage: NFS client invokes
1389 * this hook to initialize the security context in its incore inode to the
1390 * value provided by the server for the file when the server returned the
1391 * file's attributes to the client.
1392 *
1393 * Must be called with inode->i_mutex locked.
1394 *
1395 * @inode we wish to set the security context of.
1396 * @ctx contains the string which we wish to set in the inode.
1397 * @ctxlen contains the length of @ctx.
1398 *
1399 * @inode_setsecctx:
1400 * Change the security context of an inode. Updates the
1401 * incore security context managed by the security module and invokes the
1402 * fs code as needed (via __vfs_setxattr_noperm) to update any backing
1403 * xattrs that represent the context. Example usage: NFS server invokes
1404 * this hook to change the security context in its incore inode and on the
1405 * backing filesystem to a value provided by the client on a SETATTR
1406 * operation.
1407 *
1408 * Must be called with inode->i_mutex locked.
1409 *
1410 * @dentry contains the inode we wish to set the security context of.
1411 * @ctx contains the string which we wish to set in the inode.
1412 * @ctxlen contains the length of @ctx.
1413 *
1414 * @inode_getsecctx:
1415 * Returns a string containing all relavent security context information
1416 *
1417 * @inode we wish to set the security context of.
1418 * @ctx is a pointer in which to place the allocated security context.
1419 * @ctxlen points to the place to put the length of @ctx.
1354 * This is the main security structure. 1420 * This is the main security structure.
1355 */ 1421 */
1356struct security_operations { 1422struct security_operations {
1357 char name[SECURITY_NAME_MAX + 1]; 1423 char name[SECURITY_NAME_MAX + 1];
1358 1424
1359 int (*ptrace_may_access) (struct task_struct *child, unsigned int mode); 1425 int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
1360 int (*ptrace_traceme) (struct task_struct *parent); 1426 int (*ptrace_traceme) (struct task_struct *parent);
1361 int (*capget) (struct task_struct *target, 1427 int (*capget) (struct task_struct *target,
1362 kernel_cap_t *effective, 1428 kernel_cap_t *effective,
@@ -1483,12 +1549,15 @@ struct security_operations {
1483 int (*dentry_open) (struct file *file, const struct cred *cred); 1549 int (*dentry_open) (struct file *file, const struct cred *cred);
1484 1550
1485 int (*task_create) (unsigned long clone_flags); 1551 int (*task_create) (unsigned long clone_flags);
1552 int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
1486 void (*cred_free) (struct cred *cred); 1553 void (*cred_free) (struct cred *cred);
1487 int (*cred_prepare)(struct cred *new, const struct cred *old, 1554 int (*cred_prepare)(struct cred *new, const struct cred *old,
1488 gfp_t gfp); 1555 gfp_t gfp);
1489 void (*cred_commit)(struct cred *new, const struct cred *old); 1556 void (*cred_commit)(struct cred *new, const struct cred *old);
1557 void (*cred_transfer)(struct cred *new, const struct cred *old);
1490 int (*kernel_act_as)(struct cred *new, u32 secid); 1558 int (*kernel_act_as)(struct cred *new, u32 secid);
1491 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1559 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1560 int (*kernel_module_request)(void);
1492 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1561 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1493 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1562 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1494 int flags); 1563 int flags);
@@ -1556,6 +1625,10 @@ struct security_operations {
1556 int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); 1625 int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
1557 void (*release_secctx) (char *secdata, u32 seclen); 1626 void (*release_secctx) (char *secdata, u32 seclen);
1558 1627
1628 int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
1629 int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
1630 int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
1631
1559#ifdef CONFIG_SECURITY_NETWORK 1632#ifdef CONFIG_SECURITY_NETWORK
1560 int (*unix_stream_connect) (struct socket *sock, 1633 int (*unix_stream_connect) (struct socket *sock,
1561 struct socket *other, struct sock *newsk); 1634 struct socket *other, struct sock *newsk);
@@ -1592,6 +1665,9 @@ struct security_operations {
1592 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); 1665 void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
1593 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); 1666 void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
1594 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); 1667 void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
1668 int (*tun_dev_create)(void);
1669 void (*tun_dev_post_create)(struct sock *sk);
1670 int (*tun_dev_attach)(struct sock *sk);
1595#endif /* CONFIG_SECURITY_NETWORK */ 1671#endif /* CONFIG_SECURITY_NETWORK */
1596 1672
1597#ifdef CONFIG_SECURITY_NETWORK_XFRM 1673#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1620,6 +1696,9 @@ struct security_operations {
1620 const struct cred *cred, 1696 const struct cred *cred,
1621 key_perm_t perm); 1697 key_perm_t perm);
1622 int (*key_getsecurity)(struct key *key, char **_buffer); 1698 int (*key_getsecurity)(struct key *key, char **_buffer);
1699 int (*key_session_to_parent)(const struct cred *cred,
1700 const struct cred *parent_cred,
1701 struct key *key);
1623#endif /* CONFIG_KEYS */ 1702#endif /* CONFIG_KEYS */
1624 1703
1625#ifdef CONFIG_AUDIT 1704#ifdef CONFIG_AUDIT
@@ -1637,7 +1716,7 @@ extern int security_module_enable(struct security_operations *ops);
1637extern int register_security(struct security_operations *ops); 1716extern int register_security(struct security_operations *ops);
1638 1717
1639/* Security operations */ 1718/* Security operations */
1640int security_ptrace_may_access(struct task_struct *child, unsigned int mode); 1719int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
1641int security_ptrace_traceme(struct task_struct *parent); 1720int security_ptrace_traceme(struct task_struct *parent);
1642int security_capget(struct task_struct *target, 1721int security_capget(struct task_struct *target,
1643 kernel_cap_t *effective, 1722 kernel_cap_t *effective,
@@ -1736,11 +1815,14 @@ int security_file_send_sigiotask(struct task_struct *tsk,
1736int security_file_receive(struct file *file); 1815int security_file_receive(struct file *file);
1737int security_dentry_open(struct file *file, const struct cred *cred); 1816int security_dentry_open(struct file *file, const struct cred *cred);
1738int security_task_create(unsigned long clone_flags); 1817int security_task_create(unsigned long clone_flags);
1818int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
1739void security_cred_free(struct cred *cred); 1819void security_cred_free(struct cred *cred);
1740int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); 1820int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
1741void security_commit_creds(struct cred *new, const struct cred *old); 1821void security_commit_creds(struct cred *new, const struct cred *old);
1822void security_transfer_creds(struct cred *new, const struct cred *old);
1742int security_kernel_act_as(struct cred *new, u32 secid); 1823int security_kernel_act_as(struct cred *new, u32 secid);
1743int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1824int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1825int security_kernel_module_request(void);
1744int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1826int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1745int security_task_fix_setuid(struct cred *new, const struct cred *old, 1827int security_task_fix_setuid(struct cred *new, const struct cred *old,
1746 int flags); 1828 int flags);
@@ -1796,6 +1878,9 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
1796int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); 1878int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
1797void security_release_secctx(char *secdata, u32 seclen); 1879void security_release_secctx(char *secdata, u32 seclen);
1798 1880
1881int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
1882int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
1883int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
1799#else /* CONFIG_SECURITY */ 1884#else /* CONFIG_SECURITY */
1800struct security_mnt_opts { 1885struct security_mnt_opts {
1801}; 1886};
@@ -1818,10 +1903,10 @@ static inline int security_init(void)
1818 return 0; 1903 return 0;
1819} 1904}
1820 1905
1821static inline int security_ptrace_may_access(struct task_struct *child, 1906static inline int security_ptrace_access_check(struct task_struct *child,
1822 unsigned int mode) 1907 unsigned int mode)
1823{ 1908{
1824 return cap_ptrace_may_access(child, mode); 1909 return cap_ptrace_access_check(child, mode);
1825} 1910}
1826 1911
1827static inline int security_ptrace_traceme(struct task_struct *parent) 1912static inline int security_ptrace_traceme(struct task_struct *parent)
@@ -2266,6 +2351,11 @@ static inline int security_task_create(unsigned long clone_flags)
2266 return 0; 2351 return 0;
2267} 2352}
2268 2353
2354static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
2355{
2356 return 0;
2357}
2358
2269static inline void security_cred_free(struct cred *cred) 2359static inline void security_cred_free(struct cred *cred)
2270{ } 2360{ }
2271 2361
@@ -2281,6 +2371,11 @@ static inline void security_commit_creds(struct cred *new,
2281{ 2371{
2282} 2372}
2283 2373
2374static inline void security_transfer_creds(struct cred *new,
2375 const struct cred *old)
2376{
2377}
2378
2284static inline int security_kernel_act_as(struct cred *cred, u32 secid) 2379static inline int security_kernel_act_as(struct cred *cred, u32 secid)
2285{ 2380{
2286 return 0; 2381 return 0;
@@ -2292,6 +2387,11 @@ static inline int security_kernel_create_files_as(struct cred *cred,
2292 return 0; 2387 return 0;
2293} 2388}
2294 2389
2390static inline int security_kernel_module_request(void)
2391{
2392 return 0;
2393}
2394
2295static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, 2395static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
2296 int flags) 2396 int flags)
2297{ 2397{
@@ -2537,6 +2637,19 @@ static inline int security_secctx_to_secid(const char *secdata,
2537static inline void security_release_secctx(char *secdata, u32 seclen) 2637static inline void security_release_secctx(char *secdata, u32 seclen)
2538{ 2638{
2539} 2639}
2640
2641static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
2642{
2643 return -EOPNOTSUPP;
2644}
2645static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
2646{
2647 return -EOPNOTSUPP;
2648}
2649static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
2650{
2651 return -EOPNOTSUPP;
2652}
2540#endif /* CONFIG_SECURITY */ 2653#endif /* CONFIG_SECURITY */
2541 2654
2542#ifdef CONFIG_SECURITY_NETWORK 2655#ifdef CONFIG_SECURITY_NETWORK
@@ -2575,6 +2688,9 @@ void security_inet_csk_clone(struct sock *newsk,
2575 const struct request_sock *req); 2688 const struct request_sock *req);
2576void security_inet_conn_established(struct sock *sk, 2689void security_inet_conn_established(struct sock *sk,
2577 struct sk_buff *skb); 2690 struct sk_buff *skb);
2691int security_tun_dev_create(void);
2692void security_tun_dev_post_create(struct sock *sk);
2693int security_tun_dev_attach(struct sock *sk);
2578 2694
2579#else /* CONFIG_SECURITY_NETWORK */ 2695#else /* CONFIG_SECURITY_NETWORK */
2580static inline int security_unix_stream_connect(struct socket *sock, 2696static inline int security_unix_stream_connect(struct socket *sock,
@@ -2725,6 +2841,20 @@ static inline void security_inet_conn_established(struct sock *sk,
2725 struct sk_buff *skb) 2841 struct sk_buff *skb)
2726{ 2842{
2727} 2843}
2844
2845static inline int security_tun_dev_create(void)
2846{
2847 return 0;
2848}
2849
2850static inline void security_tun_dev_post_create(struct sock *sk)
2851{
2852}
2853
2854static inline int security_tun_dev_attach(struct sock *sk)
2855{
2856 return 0;
2857}
2728#endif /* CONFIG_SECURITY_NETWORK */ 2858#endif /* CONFIG_SECURITY_NETWORK */
2729 2859
2730#ifdef CONFIG_SECURITY_NETWORK_XFRM 2860#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -2881,6 +3011,9 @@ void security_key_free(struct key *key);
2881int security_key_permission(key_ref_t key_ref, 3011int security_key_permission(key_ref_t key_ref,
2882 const struct cred *cred, key_perm_t perm); 3012 const struct cred *cred, key_perm_t perm);
2883int security_key_getsecurity(struct key *key, char **_buffer); 3013int security_key_getsecurity(struct key *key, char **_buffer);
3014int security_key_session_to_parent(const struct cred *cred,
3015 const struct cred *parent_cred,
3016 struct key *key);
2884 3017
2885#else 3018#else
2886 3019
@@ -2908,6 +3041,13 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
2908 return 0; 3041 return 0;
2909} 3042}
2910 3043
3044static inline int security_key_session_to_parent(const struct cred *cred,
3045 const struct cred *parent_cred,
3046 struct key *key)
3047{
3048 return 0;
3049}
3050
2911#endif 3051#endif
2912#endif /* CONFIG_KEYS */ 3052#endif /* CONFIG_KEYS */
2913 3053
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index abff6c9b413c..6d3f2f449ead 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -39,7 +39,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
39} 39}
40 40
41#ifdef CONFIG_TMPFS_POSIX_ACL 41#ifdef CONFIG_TMPFS_POSIX_ACL
42int shmem_permission(struct inode *, int); 42int shmem_check_acl(struct inode *, int);
43int shmem_acl_init(struct inode *, struct inode *); 43int shmem_acl_init(struct inode *, struct inode *);
44 44
45extern struct xattr_handler shmem_xattr_acl_access_handler; 45extern struct xattr_handler shmem_xattr_acl_access_handler;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 4be57ab03478..f0ca7a7a1757 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -143,15 +143,6 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
143 */ 143 */
144#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 144#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
145 145
146/*
147 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
148 */
149#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
150# include <linux/spinlock_api_smp.h>
151#else
152# include <linux/spinlock_api_up.h>
153#endif
154
155#ifdef CONFIG_DEBUG_SPINLOCK 146#ifdef CONFIG_DEBUG_SPINLOCK
156 extern void _raw_spin_lock(spinlock_t *lock); 147 extern void _raw_spin_lock(spinlock_t *lock);
157#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 148#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
@@ -268,50 +259,16 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
268 259
269#define spin_lock_irq(lock) _spin_lock_irq(lock) 260#define spin_lock_irq(lock) _spin_lock_irq(lock)
270#define spin_lock_bh(lock) _spin_lock_bh(lock) 261#define spin_lock_bh(lock) _spin_lock_bh(lock)
271
272#define read_lock_irq(lock) _read_lock_irq(lock) 262#define read_lock_irq(lock) _read_lock_irq(lock)
273#define read_lock_bh(lock) _read_lock_bh(lock) 263#define read_lock_bh(lock) _read_lock_bh(lock)
274
275#define write_lock_irq(lock) _write_lock_irq(lock) 264#define write_lock_irq(lock) _write_lock_irq(lock)
276#define write_lock_bh(lock) _write_lock_bh(lock) 265#define write_lock_bh(lock) _write_lock_bh(lock)
277 266#define spin_unlock(lock) _spin_unlock(lock)
278/* 267#define read_unlock(lock) _read_unlock(lock)
279 * We inline the unlock functions in the nondebug case: 268#define write_unlock(lock) _write_unlock(lock)
280 */ 269#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
281#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ 270#define read_unlock_irq(lock) _read_unlock_irq(lock)
282 !defined(CONFIG_SMP) 271#define write_unlock_irq(lock) _write_unlock_irq(lock)
283# define spin_unlock(lock) _spin_unlock(lock)
284# define read_unlock(lock) _read_unlock(lock)
285# define write_unlock(lock) _write_unlock(lock)
286# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
287# define read_unlock_irq(lock) _read_unlock_irq(lock)
288# define write_unlock_irq(lock) _write_unlock_irq(lock)
289#else
290# define spin_unlock(lock) \
291 do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
292# define read_unlock(lock) \
293 do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
294# define write_unlock(lock) \
295 do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
296# define spin_unlock_irq(lock) \
297do { \
298 __raw_spin_unlock(&(lock)->raw_lock); \
299 __release(lock); \
300 local_irq_enable(); \
301} while (0)
302# define read_unlock_irq(lock) \
303do { \
304 __raw_read_unlock(&(lock)->raw_lock); \
305 __release(lock); \
306 local_irq_enable(); \
307} while (0)
308# define write_unlock_irq(lock) \
309do { \
310 __raw_write_unlock(&(lock)->raw_lock); \
311 __release(lock); \
312 local_irq_enable(); \
313} while (0)
314#endif
315 272
316#define spin_unlock_irqrestore(lock, flags) \ 273#define spin_unlock_irqrestore(lock, flags) \
317 do { \ 274 do { \
@@ -380,4 +337,13 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
380 */ 337 */
381#define spin_can_lock(lock) (!spin_is_locked(lock)) 338#define spin_can_lock(lock) (!spin_is_locked(lock))
382 339
340/*
341 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
342 */
343#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
344# include <linux/spinlock_api_smp.h>
345#else
346# include <linux/spinlock_api_up.h>
347#endif
348
383#endif /* __LINUX_SPINLOCK_H */ 349#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index d79845d034b5..7a7e18fc2415 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,4 +60,398 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/*
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock)
80#endif
81
82#ifdef __always_inline__read_lock
83#define _read_lock(lock) __read_lock(lock)
84#endif
85
86#ifdef __always_inline__write_lock
87#define _write_lock(lock) __write_lock(lock)
88#endif
89
90#ifdef __always_inline__spin_lock_bh
91#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif
93
94#ifdef __always_inline__read_lock_bh
95#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif
97
98#ifdef __always_inline__write_lock_bh
99#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif
101
102#ifdef __always_inline__spin_lock_irq
103#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif
105
106#ifdef __always_inline__read_lock_irq
107#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif
109
110#ifdef __always_inline__write_lock_irq
111#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif
113
114#ifdef __always_inline__spin_lock_irqsave
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif
117
118#ifdef __always_inline__read_lock_irqsave
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif
121
122#ifdef __always_inline__write_lock_irqsave
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif
125
126#endif /* !CONFIG_GENERIC_LOCKBREAK */
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock)
130#endif
131
132#ifdef __always_inline__read_trylock
133#define _read_trylock(lock) __read_trylock(lock)
134#endif
135
136#ifdef __always_inline__write_trylock
137#define _write_trylock(lock) __write_trylock(lock)
138#endif
139
140#ifdef __always_inline__spin_trylock_bh
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif
143
144#ifdef __always_inline__spin_unlock
145#define _spin_unlock(lock) __spin_unlock(lock)
146#endif
147
148#ifdef __always_inline__read_unlock
149#define _read_unlock(lock) __read_unlock(lock)
150#endif
151
152#ifdef __always_inline__write_unlock
153#define _write_unlock(lock) __write_unlock(lock)
154#endif
155
156#ifdef __always_inline__spin_unlock_bh
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif
159
160#ifdef __always_inline__read_unlock_bh
161#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif
163
164#ifdef __always_inline__write_unlock_bh
165#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif
167
168#ifdef __always_inline__spin_unlock_irq
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif
171
172#ifdef __always_inline__read_unlock_irq
173#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif
175
176#ifdef __always_inline__write_unlock_irq
177#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif
179
180#ifdef __always_inline__spin_unlock_irqrestore
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif
183
184#ifdef __always_inline__read_unlock_irqrestore
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif
187
188#ifdef __always_inline__write_unlock_irqrestore
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif
191
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock)
195{
196 preempt_disable();
197 if (_raw_spin_trylock(lock)) {
198 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
199 return 1;
200 }
201 preempt_enable();
202 return 0;
203}
204
205static inline int __read_trylock(rwlock_t *lock)
206{
207 preempt_disable();
208 if (_raw_read_trylock(lock)) {
209 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
210 return 1;
211 }
212 preempt_enable();
213 return 0;
214}
215
216static inline int __write_trylock(rwlock_t *lock)
217{
218 preempt_disable();
219 if (_raw_write_trylock(lock)) {
220 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
221 return 1;
222 }
223 preempt_enable();
224 return 0;
225}
226
227/*
228 * If lockdep is enabled then we use the non-preemption spin-ops
229 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
230 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
231 */
232#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
233
234static inline void __read_lock(rwlock_t *lock)
235{
236 preempt_disable();
237 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
238 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
239}
240
241static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
242{
243 unsigned long flags;
244
245 local_irq_save(flags);
246 preempt_disable();
247 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
248 /*
249 * On lockdep we dont want the hand-coded irq-enable of
250 * _raw_spin_lock_flags() code, because lockdep assumes
251 * that interrupts are not re-enabled during lock-acquire:
252 */
253#ifdef CONFIG_LOCKDEP
254 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
255#else
256 _raw_spin_lock_flags(lock, &flags);
257#endif
258 return flags;
259}
260
261static inline void __spin_lock_irq(spinlock_t *lock)
262{
263 local_irq_disable();
264 preempt_disable();
265 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
266 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
267}
268
269static inline void __spin_lock_bh(spinlock_t *lock)
270{
271 local_bh_disable();
272 preempt_disable();
273 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
274 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
275}
276
277static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
278{
279 unsigned long flags;
280
281 local_irq_save(flags);
282 preempt_disable();
283 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
284 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
285 _raw_read_lock_flags, &flags);
286 return flags;
287}
288
289static inline void __read_lock_irq(rwlock_t *lock)
290{
291 local_irq_disable();
292 preempt_disable();
293 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
294 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
295}
296
297static inline void __read_lock_bh(rwlock_t *lock)
298{
299 local_bh_disable();
300 preempt_disable();
301 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
302 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
303}
304
305static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
306{
307 unsigned long flags;
308
309 local_irq_save(flags);
310 preempt_disable();
311 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
312 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
313 _raw_write_lock_flags, &flags);
314 return flags;
315}
316
317static inline void __write_lock_irq(rwlock_t *lock)
318{
319 local_irq_disable();
320 preempt_disable();
321 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
322 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
323}
324
325static inline void __write_lock_bh(rwlock_t *lock)
326{
327 local_bh_disable();
328 preempt_disable();
329 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
330 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
331}
332
333static inline void __spin_lock(spinlock_t *lock)
334{
335 preempt_disable();
336 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
337 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
338}
339
340static inline void __write_lock(rwlock_t *lock)
341{
342 preempt_disable();
343 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
344 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
345}
346
347#endif /* CONFIG_PREEMPT */
348
349static inline void __spin_unlock(spinlock_t *lock)
350{
351 spin_release(&lock->dep_map, 1, _RET_IP_);
352 _raw_spin_unlock(lock);
353 preempt_enable();
354}
355
356static inline void __write_unlock(rwlock_t *lock)
357{
358 rwlock_release(&lock->dep_map, 1, _RET_IP_);
359 _raw_write_unlock(lock);
360 preempt_enable();
361}
362
363static inline void __read_unlock(rwlock_t *lock)
364{
365 rwlock_release(&lock->dep_map, 1, _RET_IP_);
366 _raw_read_unlock(lock);
367 preempt_enable();
368}
369
370static inline void __spin_unlock_irqrestore(spinlock_t *lock,
371 unsigned long flags)
372{
373 spin_release(&lock->dep_map, 1, _RET_IP_);
374 _raw_spin_unlock(lock);
375 local_irq_restore(flags);
376 preempt_enable();
377}
378
379static inline void __spin_unlock_irq(spinlock_t *lock)
380{
381 spin_release(&lock->dep_map, 1, _RET_IP_);
382 _raw_spin_unlock(lock);
383 local_irq_enable();
384 preempt_enable();
385}
386
387static inline void __spin_unlock_bh(spinlock_t *lock)
388{
389 spin_release(&lock->dep_map, 1, _RET_IP_);
390 _raw_spin_unlock(lock);
391 preempt_enable_no_resched();
392 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
393}
394
395static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
396{
397 rwlock_release(&lock->dep_map, 1, _RET_IP_);
398 _raw_read_unlock(lock);
399 local_irq_restore(flags);
400 preempt_enable();
401}
402
403static inline void __read_unlock_irq(rwlock_t *lock)
404{
405 rwlock_release(&lock->dep_map, 1, _RET_IP_);
406 _raw_read_unlock(lock);
407 local_irq_enable();
408 preempt_enable();
409}
410
411static inline void __read_unlock_bh(rwlock_t *lock)
412{
413 rwlock_release(&lock->dep_map, 1, _RET_IP_);
414 _raw_read_unlock(lock);
415 preempt_enable_no_resched();
416 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
417}
418
419static inline void __write_unlock_irqrestore(rwlock_t *lock,
420 unsigned long flags)
421{
422 rwlock_release(&lock->dep_map, 1, _RET_IP_);
423 _raw_write_unlock(lock);
424 local_irq_restore(flags);
425 preempt_enable();
426}
427
428static inline void __write_unlock_irq(rwlock_t *lock)
429{
430 rwlock_release(&lock->dep_map, 1, _RET_IP_);
431 _raw_write_unlock(lock);
432 local_irq_enable();
433 preempt_enable();
434}
435
436static inline void __write_unlock_bh(rwlock_t *lock)
437{
438 rwlock_release(&lock->dep_map, 1, _RET_IP_);
439 _raw_write_unlock(lock);
440 preempt_enable_no_resched();
441 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
442}
443
444static inline int __spin_trylock_bh(spinlock_t *lock)
445{
446 local_bh_disable();
447 preempt_disable();
448 if (_raw_spin_trylock(lock)) {
449 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
450 return 1;
451 }
452 preempt_enable_no_resched();
453 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
454 return 0;
455}
456
63#endif /* __LINUX_SPINLOCK_API_SMP_H */ 457#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 2d8b211b9324..6f52b4d7c447 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -59,6 +59,15 @@ struct cache_head {
59 59
60#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ 60#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
61 61
62struct cache_detail_procfs {
63 struct proc_dir_entry *proc_ent;
64 struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
65};
66
67struct cache_detail_pipefs {
68 struct dentry *dir;
69};
70
62struct cache_detail { 71struct cache_detail {
63 struct module * owner; 72 struct module * owner;
64 int hash_size; 73 int hash_size;
@@ -70,15 +79,17 @@ struct cache_detail {
70 char *name; 79 char *name;
71 void (*cache_put)(struct kref *); 80 void (*cache_put)(struct kref *);
72 81
73 void (*cache_request)(struct cache_detail *cd, 82 int (*cache_upcall)(struct cache_detail *,
74 struct cache_head *h, 83 struct cache_head *);
75 char **bpp, int *blen); 84
76 int (*cache_parse)(struct cache_detail *, 85 int (*cache_parse)(struct cache_detail *,
77 char *buf, int len); 86 char *buf, int len);
78 87
79 int (*cache_show)(struct seq_file *m, 88 int (*cache_show)(struct seq_file *m,
80 struct cache_detail *cd, 89 struct cache_detail *cd,
81 struct cache_head *h); 90 struct cache_head *h);
91 void (*warn_no_listener)(struct cache_detail *cd,
92 int has_died);
82 93
83 struct cache_head * (*alloc)(void); 94 struct cache_head * (*alloc)(void);
84 int (*match)(struct cache_head *orig, struct cache_head *new); 95 int (*match)(struct cache_head *orig, struct cache_head *new);
@@ -96,13 +107,15 @@ struct cache_detail {
96 107
97 /* fields for communication over channel */ 108 /* fields for communication over channel */
98 struct list_head queue; 109 struct list_head queue;
99 struct proc_dir_entry *proc_ent;
100 struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
101 110
102 atomic_t readers; /* how many time is /chennel open */ 111 atomic_t readers; /* how many time is /chennel open */
103 time_t last_close; /* if no readers, when did last close */ 112 time_t last_close; /* if no readers, when did last close */
104 time_t last_warn; /* when we last warned about no readers */ 113 time_t last_warn; /* when we last warned about no readers */
105 void (*warn_no_listener)(struct cache_detail *cd); 114
115 union {
116 struct cache_detail_procfs procfs;
117 struct cache_detail_pipefs pipefs;
118 } u;
106}; 119};
107 120
108 121
@@ -127,6 +140,10 @@ struct cache_deferred_req {
127}; 140};
128 141
129 142
143extern const struct file_operations cache_file_operations_pipefs;
144extern const struct file_operations content_file_operations_pipefs;
145extern const struct file_operations cache_flush_operations_pipefs;
146
130extern struct cache_head * 147extern struct cache_head *
131sunrpc_cache_lookup(struct cache_detail *detail, 148sunrpc_cache_lookup(struct cache_detail *detail,
132 struct cache_head *key, int hash); 149 struct cache_head *key, int hash);
@@ -134,6 +151,13 @@ extern struct cache_head *
134sunrpc_cache_update(struct cache_detail *detail, 151sunrpc_cache_update(struct cache_detail *detail,
135 struct cache_head *new, struct cache_head *old, int hash); 152 struct cache_head *new, struct cache_head *old, int hash);
136 153
154extern int
155sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
156 void (*cache_request)(struct cache_detail *,
157 struct cache_head *,
158 char **,
159 int *));
160
137 161
138extern void cache_clean_deferred(void *owner); 162extern void cache_clean_deferred(void *owner);
139 163
@@ -171,6 +195,10 @@ extern void cache_purge(struct cache_detail *detail);
171extern int cache_register(struct cache_detail *cd); 195extern int cache_register(struct cache_detail *cd);
172extern void cache_unregister(struct cache_detail *cd); 196extern void cache_unregister(struct cache_detail *cd);
173 197
198extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
199 mode_t, struct cache_detail *);
200extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
201
174extern void qword_add(char **bpp, int *lp, char *str); 202extern void qword_add(char **bpp, int *lp, char *str);
175extern void qword_addhex(char **bpp, int *lp, char *buf, int blen); 203extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
176extern int qword_get(char **bpp, char *dest, int bufsize); 204extern int qword_get(char **bpp, char *dest, int bufsize);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 37881f1a0bd7..ab3f6e90caa5 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -9,6 +9,10 @@
9#ifndef _LINUX_SUNRPC_CLNT_H 9#ifndef _LINUX_SUNRPC_CLNT_H
10#define _LINUX_SUNRPC_CLNT_H 10#define _LINUX_SUNRPC_CLNT_H
11 11
12#include <linux/socket.h>
13#include <linux/in.h>
14#include <linux/in6.h>
15
12#include <linux/sunrpc/msg_prot.h> 16#include <linux/sunrpc/msg_prot.h>
13#include <linux/sunrpc/sched.h> 17#include <linux/sunrpc/sched.h>
14#include <linux/sunrpc/xprt.h> 18#include <linux/sunrpc/xprt.h>
@@ -17,6 +21,7 @@
17#include <linux/sunrpc/xdr.h> 21#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/timer.h> 22#include <linux/sunrpc/timer.h>
19#include <asm/signal.h> 23#include <asm/signal.h>
24#include <linux/path.h>
20 25
21struct rpc_inode; 26struct rpc_inode;
22 27
@@ -50,9 +55,7 @@ struct rpc_clnt {
50 55
51 int cl_nodelen; /* nodename length */ 56 int cl_nodelen; /* nodename length */
52 char cl_nodename[UNX_MAXNODENAME]; 57 char cl_nodename[UNX_MAXNODENAME];
53 char cl_pathname[30];/* Path in rpc_pipe_fs */ 58 struct path cl_path;
54 struct vfsmount * cl_vfsmnt;
55 struct dentry * cl_dentry; /* inode */
56 struct rpc_clnt * cl_parent; /* Points to parent of clones */ 59 struct rpc_clnt * cl_parent; /* Points to parent of clones */
57 struct rpc_rtt cl_rtt_default; 60 struct rpc_rtt cl_rtt_default;
58 struct rpc_timeout cl_timeout_default; 61 struct rpc_timeout cl_timeout_default;
@@ -151,5 +154,39 @@ void rpc_force_rebind(struct rpc_clnt *);
151size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); 154size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
152const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 155const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
153 156
157size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
158size_t rpc_pton(const char *, const size_t,
159 struct sockaddr *, const size_t);
160char * rpc_sockaddr2uaddr(const struct sockaddr *);
161size_t rpc_uaddr2sockaddr(const char *, const size_t,
162 struct sockaddr *, const size_t);
163
164static inline unsigned short rpc_get_port(const struct sockaddr *sap)
165{
166 switch (sap->sa_family) {
167 case AF_INET:
168 return ntohs(((struct sockaddr_in *)sap)->sin_port);
169 case AF_INET6:
170 return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
171 }
172 return 0;
173}
174
175static inline void rpc_set_port(struct sockaddr *sap,
176 const unsigned short port)
177{
178 switch (sap->sa_family) {
179 case AF_INET:
180 ((struct sockaddr_in *)sap)->sin_port = htons(port);
181 break;
182 case AF_INET6:
183 ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
184 break;
185 }
186}
187
188#define IPV6_SCOPE_DELIMITER '%'
189#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
190
154#endif /* __KERNEL__ */ 191#endif /* __KERNEL__ */
155#endif /* _LINUX_SUNRPC_CLNT_H */ 192#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 70df4f1d8847..77e624883393 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -189,7 +189,22 @@ typedef __be32 rpc_fraghdr;
189 * Additionally, the two alternative forms specified in Section 2.2 of 189 * Additionally, the two alternative forms specified in Section 2.2 of
190 * [RFC2373] are also acceptable. 190 * [RFC2373] are also acceptable.
191 */ 191 */
192#define RPCBIND_MAXUADDRLEN (56u) 192
193#include <linux/inet.h>
194
195/* Maximum size of the port number part of a universal address */
196#define RPCBIND_MAXUADDRPLEN sizeof(".255.255")
197
198/* Maximum size of an IPv4 universal address */
199#define RPCBIND_MAXUADDR4LEN \
200 (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
201
202/* Maximum size of an IPv6 universal address */
203#define RPCBIND_MAXUADDR6LEN \
204 (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
205
206/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
207#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
193 208
194#endif /* __KERNEL__ */ 209#endif /* __KERNEL__ */
195#endif /* _LINUX_SUNRPC_MSGPROT_H_ */ 210#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cea764c2359f..cf14db975da0 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -3,6 +3,8 @@
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5 5
6#include <linux/workqueue.h>
7
6struct rpc_pipe_msg { 8struct rpc_pipe_msg {
7 struct list_head list; 9 struct list_head list;
8 void *data; 10 void *data;
@@ -32,8 +34,8 @@ struct rpc_inode {
32 wait_queue_head_t waitq; 34 wait_queue_head_t waitq;
33#define RPC_PIPE_WAIT_FOR_OPEN 1 35#define RPC_PIPE_WAIT_FOR_OPEN 1
34 int flags; 36 int flags;
35 struct rpc_pipe_ops *ops;
36 struct delayed_work queue_timeout; 37 struct delayed_work queue_timeout;
38 const struct rpc_pipe_ops *ops;
37}; 39};
38 40
39static inline struct rpc_inode * 41static inline struct rpc_inode *
@@ -44,9 +46,19 @@ RPC_I(struct inode *inode)
44 46
45extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *); 47extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
46 48
47extern struct dentry *rpc_mkdir(char *, struct rpc_clnt *); 49struct rpc_clnt;
48extern int rpc_rmdir(struct dentry *); 50extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *);
49extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *, struct rpc_pipe_ops *, int flags); 51extern int rpc_remove_client_dir(struct dentry *);
52
53struct cache_detail;
54extern struct dentry *rpc_create_cache_dir(struct dentry *,
55 struct qstr *,
56 mode_t umode,
57 struct cache_detail *);
58extern void rpc_remove_cache_dir(struct dentry *);
59
60extern struct dentry *rpc_mkpipe(struct dentry *, const char *, void *,
61 const struct rpc_pipe_ops *, int flags);
50extern int rpc_unlink(struct dentry *); 62extern int rpc_unlink(struct dentry *);
51extern struct vfsmount *rpc_get_mount(void); 63extern struct vfsmount *rpc_get_mount(void);
52extern void rpc_put_mount(void); 64extern void rpc_put_mount(void);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index b99c625fddfe..7da466ba4b0d 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -117,17 +117,15 @@ static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int le
117static inline __be32 * 117static inline __be32 *
118xdr_encode_hyper(__be32 *p, __u64 val) 118xdr_encode_hyper(__be32 *p, __u64 val)
119{ 119{
120 *p++ = htonl(val >> 32); 120 *(__be64 *)p = cpu_to_be64(val);
121 *p++ = htonl(val & 0xFFFFFFFF); 121 return p + 2;
122 return p;
123} 122}
124 123
125static inline __be32 * 124static inline __be32 *
126xdr_decode_hyper(__be32 *p, __u64 *valp) 125xdr_decode_hyper(__be32 *p, __u64 *valp)
127{ 126{
128 *valp = ((__u64) ntohl(*p++)) << 32; 127 *valp = be64_to_cpup((__be64 *)p);
129 *valp |= ntohl(*p++); 128 return p + 2;
130 return p;
131} 129}
132 130
133/* 131/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 1175d58efc2e..c090df442572 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -38,10 +38,8 @@ enum rpc_display_format_t {
38 RPC_DISPLAY_ADDR = 0, 38 RPC_DISPLAY_ADDR = 0,
39 RPC_DISPLAY_PORT, 39 RPC_DISPLAY_PORT,
40 RPC_DISPLAY_PROTO, 40 RPC_DISPLAY_PROTO,
41 RPC_DISPLAY_ALL,
42 RPC_DISPLAY_HEX_ADDR, 41 RPC_DISPLAY_HEX_ADDR,
43 RPC_DISPLAY_HEX_PORT, 42 RPC_DISPLAY_HEX_PORT,
44 RPC_DISPLAY_UNIVERSAL_ADDR,
45 RPC_DISPLAY_NETID, 43 RPC_DISPLAY_NETID,
46 RPC_DISPLAY_MAX, 44 RPC_DISPLAY_MAX,
47}; 45};
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cb1a6631b8f4..73b1f1cec423 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -14,7 +14,6 @@ struct scatterlist;
14 */ 14 */
15#define IO_TLB_SEGSIZE 128 15#define IO_TLB_SEGSIZE 128
16 16
17
18/* 17/*
19 * log of the size of each IO TLB slab. The number of slabs is command line 18 * log of the size of each IO TLB slab. The number of slabs is command line
20 * controllable. 19 * controllable.
@@ -24,16 +23,6 @@ struct scatterlist;
24extern void 23extern void
25swiotlb_init(void); 24swiotlb_init(void);
26 25
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29
30extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
31 phys_addr_t address);
32extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
33 dma_addr_t address);
34
35extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
36
37extern void 26extern void
38*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 27*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
39 dma_addr_t *dma_handle, gfp_t flags); 28 dma_addr_t *dma_handle, gfp_t flags);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 80de7003d8c2..a8e37821cc60 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -64,6 +64,7 @@ struct perf_counter_attr;
64#include <linux/sem.h> 64#include <linux/sem.h>
65#include <asm/siginfo.h> 65#include <asm/siginfo.h>
66#include <asm/signal.h> 66#include <asm/signal.h>
67#include <linux/unistd.h>
67#include <linux/quota.h> 68#include <linux/quota.h>
68#include <linux/key.h> 69#include <linux/key.h>
69#include <trace/syscall.h> 70#include <trace/syscall.h>
@@ -97,6 +98,53 @@ struct perf_counter_attr;
97#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 98#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
98#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 99#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
99 100
101#ifdef CONFIG_EVENT_PROFILE
102#define TRACE_SYS_ENTER_PROFILE(sname) \
103static int prof_sysenter_enable_##sname(struct ftrace_event_call *event_call) \
104{ \
105 int ret = 0; \
106 if (!atomic_inc_return(&event_enter_##sname.profile_count)) \
107 ret = reg_prof_syscall_enter("sys"#sname); \
108 return ret; \
109} \
110 \
111static void prof_sysenter_disable_##sname(struct ftrace_event_call *event_call)\
112{ \
113 if (atomic_add_negative(-1, &event_enter_##sname.profile_count)) \
114 unreg_prof_syscall_enter("sys"#sname); \
115}
116
117#define TRACE_SYS_EXIT_PROFILE(sname) \
118static int prof_sysexit_enable_##sname(struct ftrace_event_call *event_call) \
119{ \
120 int ret = 0; \
121 if (!atomic_inc_return(&event_exit_##sname.profile_count)) \
122 ret = reg_prof_syscall_exit("sys"#sname); \
123 return ret; \
124} \
125 \
126static void prof_sysexit_disable_##sname(struct ftrace_event_call *event_call) \
127{ \
128 if (atomic_add_negative(-1, &event_exit_##sname.profile_count)) \
129 unreg_prof_syscall_exit("sys"#sname); \
130}
131
132#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
133 .profile_count = ATOMIC_INIT(-1), \
134 .profile_enable = prof_sysenter_enable_##sname, \
135 .profile_disable = prof_sysenter_disable_##sname,
136
137#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
138 .profile_count = ATOMIC_INIT(-1), \
139 .profile_enable = prof_sysexit_enable_##sname, \
140 .profile_disable = prof_sysexit_disable_##sname,
141#else
142#define TRACE_SYS_ENTER_PROFILE(sname)
143#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
144#define TRACE_SYS_EXIT_PROFILE(sname)
145#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
146#endif
147
100#ifdef CONFIG_FTRACE_SYSCALLS 148#ifdef CONFIG_FTRACE_SYSCALLS
101#define __SC_STR_ADECL1(t, a) #a 149#define __SC_STR_ADECL1(t, a) #a
102#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) 150#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
@@ -112,7 +160,81 @@ struct perf_counter_attr;
112#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) 160#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
113#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) 161#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
114 162
163#define SYSCALL_TRACE_ENTER_EVENT(sname) \
164 static struct ftrace_event_call event_enter_##sname; \
165 struct trace_event enter_syscall_print_##sname = { \
166 .trace = print_syscall_enter, \
167 }; \
168 static int init_enter_##sname(void) \
169 { \
170 int num, id; \
171 num = syscall_name_to_nr("sys"#sname); \
172 if (num < 0) \
173 return -ENOSYS; \
174 id = register_ftrace_event(&enter_syscall_print_##sname);\
175 if (!id) \
176 return -ENODEV; \
177 event_enter_##sname.id = id; \
178 set_syscall_enter_id(num, id); \
179 INIT_LIST_HEAD(&event_enter_##sname.fields); \
180 return 0; \
181 } \
182 TRACE_SYS_ENTER_PROFILE(sname); \
183 static struct ftrace_event_call __used \
184 __attribute__((__aligned__(4))) \
185 __attribute__((section("_ftrace_events"))) \
186 event_enter_##sname = { \
187 .name = "sys_enter"#sname, \
188 .system = "syscalls", \
189 .event = &event_syscall_enter, \
190 .raw_init = init_enter_##sname, \
191 .show_format = syscall_enter_format, \
192 .define_fields = syscall_enter_define_fields, \
193 .regfunc = reg_event_syscall_enter, \
194 .unregfunc = unreg_event_syscall_enter, \
195 .data = "sys"#sname, \
196 TRACE_SYS_ENTER_PROFILE_INIT(sname) \
197 }
198
199#define SYSCALL_TRACE_EXIT_EVENT(sname) \
200 static struct ftrace_event_call event_exit_##sname; \
201 struct trace_event exit_syscall_print_##sname = { \
202 .trace = print_syscall_exit, \
203 }; \
204 static int init_exit_##sname(void) \
205 { \
206 int num, id; \
207 num = syscall_name_to_nr("sys"#sname); \
208 if (num < 0) \
209 return -ENOSYS; \
210 id = register_ftrace_event(&exit_syscall_print_##sname);\
211 if (!id) \
212 return -ENODEV; \
213 event_exit_##sname.id = id; \
214 set_syscall_exit_id(num, id); \
215 INIT_LIST_HEAD(&event_exit_##sname.fields); \
216 return 0; \
217 } \
218 TRACE_SYS_EXIT_PROFILE(sname); \
219 static struct ftrace_event_call __used \
220 __attribute__((__aligned__(4))) \
221 __attribute__((section("_ftrace_events"))) \
222 event_exit_##sname = { \
223 .name = "sys_exit"#sname, \
224 .system = "syscalls", \
225 .event = &event_syscall_exit, \
226 .raw_init = init_exit_##sname, \
227 .show_format = syscall_exit_format, \
228 .define_fields = syscall_exit_define_fields, \
229 .regfunc = reg_event_syscall_exit, \
230 .unregfunc = unreg_event_syscall_exit, \
231 .data = "sys"#sname, \
232 TRACE_SYS_EXIT_PROFILE_INIT(sname) \
233 }
234
115#define SYSCALL_METADATA(sname, nb) \ 235#define SYSCALL_METADATA(sname, nb) \
236 SYSCALL_TRACE_ENTER_EVENT(sname); \
237 SYSCALL_TRACE_EXIT_EVENT(sname); \
116 static const struct syscall_metadata __used \ 238 static const struct syscall_metadata __used \
117 __attribute__((__aligned__(4))) \ 239 __attribute__((__aligned__(4))) \
118 __attribute__((section("__syscalls_metadata"))) \ 240 __attribute__((section("__syscalls_metadata"))) \
@@ -121,18 +243,23 @@ struct perf_counter_attr;
121 .nb_args = nb, \ 243 .nb_args = nb, \
122 .types = types_##sname, \ 244 .types = types_##sname, \
123 .args = args_##sname, \ 245 .args = args_##sname, \
124 } 246 .enter_event = &event_enter_##sname, \
247 .exit_event = &event_exit_##sname, \
248 };
125 249
126#define SYSCALL_DEFINE0(sname) \ 250#define SYSCALL_DEFINE0(sname) \
251 SYSCALL_TRACE_ENTER_EVENT(_##sname); \
252 SYSCALL_TRACE_EXIT_EVENT(_##sname); \
127 static const struct syscall_metadata __used \ 253 static const struct syscall_metadata __used \
128 __attribute__((__aligned__(4))) \ 254 __attribute__((__aligned__(4))) \
129 __attribute__((section("__syscalls_metadata"))) \ 255 __attribute__((section("__syscalls_metadata"))) \
130 __syscall_meta_##sname = { \ 256 __syscall_meta_##sname = { \
131 .name = "sys_"#sname, \ 257 .name = "sys_"#sname, \
132 .nb_args = 0, \ 258 .nb_args = 0, \
259 .enter_event = &event_enter__##sname, \
260 .exit_event = &event_exit__##sname, \
133 }; \ 261 }; \
134 asmlinkage long sys_##sname(void) 262 asmlinkage long sys_##sname(void)
135
136#else 263#else
137#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) 264#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
138#endif 265#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 7402c1a27c4f..85e8cf7d393c 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -85,20 +85,29 @@ int arch_update_cpu_topology(void);
85#define ARCH_HAS_SCHED_WAKE_IDLE 85#define ARCH_HAS_SCHED_WAKE_IDLE
86/* Common values for SMT siblings */ 86/* Common values for SMT siblings */
87#ifndef SD_SIBLING_INIT 87#ifndef SD_SIBLING_INIT
88#define SD_SIBLING_INIT (struct sched_domain) { \ 88#define SD_SIBLING_INIT (struct sched_domain) { \
89 .min_interval = 1, \ 89 .min_interval = 1, \
90 .max_interval = 2, \ 90 .max_interval = 2, \
91 .busy_factor = 64, \ 91 .busy_factor = 64, \
92 .imbalance_pct = 110, \ 92 .imbalance_pct = 110, \
93 .flags = SD_LOAD_BALANCE \ 93 \
94 | SD_BALANCE_NEWIDLE \ 94 .flags = 1*SD_LOAD_BALANCE \
95 | SD_BALANCE_FORK \ 95 | 1*SD_BALANCE_NEWIDLE \
96 | SD_BALANCE_EXEC \ 96 | 1*SD_BALANCE_EXEC \
97 | SD_WAKE_AFFINE \ 97 | 1*SD_BALANCE_FORK \
98 | SD_WAKE_BALANCE \ 98 | 0*SD_WAKE_IDLE \
99 | SD_SHARE_CPUPOWER, \ 99 | 1*SD_WAKE_AFFINE \
100 .last_balance = jiffies, \ 100 | 1*SD_WAKE_BALANCE \
101 .balance_interval = 1, \ 101 | 1*SD_SHARE_CPUPOWER \
102 | 0*SD_POWERSAVINGS_BALANCE \
103 | 0*SD_SHARE_PKG_RESOURCES \
104 | 0*SD_SERIALIZE \
105 | 0*SD_WAKE_IDLE_FAR \
106 | 0*SD_PREFER_SIBLING \
107 , \
108 .last_balance = jiffies, \
109 .balance_interval = 1, \
110 .smt_gain = 1178, /* 15% */ \
102} 111}
103#endif 112#endif
104#endif /* CONFIG_SCHED_SMT */ 113#endif /* CONFIG_SCHED_SMT */
@@ -106,69 +115,94 @@ int arch_update_cpu_topology(void);
106#ifdef CONFIG_SCHED_MC 115#ifdef CONFIG_SCHED_MC
107/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */ 116/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
108#ifndef SD_MC_INIT 117#ifndef SD_MC_INIT
109#define SD_MC_INIT (struct sched_domain) { \ 118#define SD_MC_INIT (struct sched_domain) { \
110 .min_interval = 1, \ 119 .min_interval = 1, \
111 .max_interval = 4, \ 120 .max_interval = 4, \
112 .busy_factor = 64, \ 121 .busy_factor = 64, \
113 .imbalance_pct = 125, \ 122 .imbalance_pct = 125, \
114 .cache_nice_tries = 1, \ 123 .cache_nice_tries = 1, \
115 .busy_idx = 2, \ 124 .busy_idx = 2, \
116 .wake_idx = 1, \ 125 .wake_idx = 1, \
117 .forkexec_idx = 1, \ 126 .forkexec_idx = 1, \
118 .flags = SD_LOAD_BALANCE \ 127 \
119 | SD_BALANCE_FORK \ 128 .flags = 1*SD_LOAD_BALANCE \
120 | SD_BALANCE_EXEC \ 129 | 1*SD_BALANCE_NEWIDLE \
121 | SD_WAKE_AFFINE \ 130 | 1*SD_BALANCE_EXEC \
122 | SD_WAKE_BALANCE \ 131 | 1*SD_BALANCE_FORK \
123 | SD_SHARE_PKG_RESOURCES\ 132 | 1*SD_WAKE_IDLE \
124 | sd_balance_for_mc_power()\ 133 | 1*SD_WAKE_AFFINE \
125 | sd_power_saving_flags(),\ 134 | 1*SD_WAKE_BALANCE \
126 .last_balance = jiffies, \ 135 | 0*SD_SHARE_CPUPOWER \
127 .balance_interval = 1, \ 136 | 1*SD_SHARE_PKG_RESOURCES \
137 | 0*SD_SERIALIZE \
138 | 0*SD_WAKE_IDLE_FAR \
139 | sd_balance_for_mc_power() \
140 | sd_power_saving_flags() \
141 , \
142 .last_balance = jiffies, \
143 .balance_interval = 1, \
128} 144}
129#endif 145#endif
130#endif /* CONFIG_SCHED_MC */ 146#endif /* CONFIG_SCHED_MC */
131 147
132/* Common values for CPUs */ 148/* Common values for CPUs */
133#ifndef SD_CPU_INIT 149#ifndef SD_CPU_INIT
134#define SD_CPU_INIT (struct sched_domain) { \ 150#define SD_CPU_INIT (struct sched_domain) { \
135 .min_interval = 1, \ 151 .min_interval = 1, \
136 .max_interval = 4, \ 152 .max_interval = 4, \
137 .busy_factor = 64, \ 153 .busy_factor = 64, \
138 .imbalance_pct = 125, \ 154 .imbalance_pct = 125, \
139 .cache_nice_tries = 1, \ 155 .cache_nice_tries = 1, \
140 .busy_idx = 2, \ 156 .busy_idx = 2, \
141 .idle_idx = 1, \ 157 .idle_idx = 1, \
142 .newidle_idx = 2, \ 158 .newidle_idx = 2, \
143 .wake_idx = 1, \ 159 .wake_idx = 1, \
144 .forkexec_idx = 1, \ 160 .forkexec_idx = 1, \
145 .flags = SD_LOAD_BALANCE \ 161 \
146 | SD_BALANCE_EXEC \ 162 .flags = 1*SD_LOAD_BALANCE \
147 | SD_BALANCE_FORK \ 163 | 1*SD_BALANCE_NEWIDLE \
148 | SD_WAKE_AFFINE \ 164 | 1*SD_BALANCE_EXEC \
149 | SD_WAKE_BALANCE \ 165 | 1*SD_BALANCE_FORK \
150 | sd_balance_for_package_power()\ 166 | 1*SD_WAKE_IDLE \
151 | sd_power_saving_flags(),\ 167 | 0*SD_WAKE_AFFINE \
152 .last_balance = jiffies, \ 168 | 1*SD_WAKE_BALANCE \
153 .balance_interval = 1, \ 169 | 0*SD_SHARE_CPUPOWER \
170 | 0*SD_SHARE_PKG_RESOURCES \
171 | 0*SD_SERIALIZE \
172 | 0*SD_WAKE_IDLE_FAR \
173 | sd_balance_for_package_power() \
174 | sd_power_saving_flags() \
175 , \
176 .last_balance = jiffies, \
177 .balance_interval = 1, \
154} 178}
155#endif 179#endif
156 180
157/* sched_domains SD_ALLNODES_INIT for NUMA machines */ 181/* sched_domains SD_ALLNODES_INIT for NUMA machines */
158#define SD_ALLNODES_INIT (struct sched_domain) { \ 182#define SD_ALLNODES_INIT (struct sched_domain) { \
159 .min_interval = 64, \ 183 .min_interval = 64, \
160 .max_interval = 64*num_online_cpus(), \ 184 .max_interval = 64*num_online_cpus(), \
161 .busy_factor = 128, \ 185 .busy_factor = 128, \
162 .imbalance_pct = 133, \ 186 .imbalance_pct = 133, \
163 .cache_nice_tries = 1, \ 187 .cache_nice_tries = 1, \
164 .busy_idx = 3, \ 188 .busy_idx = 3, \
165 .idle_idx = 3, \ 189 .idle_idx = 3, \
166 .flags = SD_LOAD_BALANCE \ 190 .flags = 1*SD_LOAD_BALANCE \
167 | SD_BALANCE_NEWIDLE \ 191 | 1*SD_BALANCE_NEWIDLE \
168 | SD_WAKE_AFFINE \ 192 | 0*SD_BALANCE_EXEC \
169 | SD_SERIALIZE, \ 193 | 0*SD_BALANCE_FORK \
170 .last_balance = jiffies, \ 194 | 0*SD_WAKE_IDLE \
171 .balance_interval = 64, \ 195 | 1*SD_WAKE_AFFINE \
196 | 0*SD_WAKE_BALANCE \
197 | 0*SD_SHARE_CPUPOWER \
198 | 0*SD_POWERSAVINGS_BALANCE \
199 | 0*SD_SHARE_PKG_RESOURCES \
200 | 1*SD_SERIALIZE \
201 | 1*SD_WAKE_IDLE_FAR \
202 | 0*SD_PREFER_SIBLING \
203 , \
204 .last_balance = jiffies, \
205 .balance_interval = 64, \
172} 206}
173 207
174#ifdef CONFIG_NUMA 208#ifdef CONFIG_NUMA
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b9dc4ca0246f..63a3f7a80580 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -23,6 +23,8 @@ struct tracepoint;
23struct tracepoint { 23struct tracepoint {
24 const char *name; /* Tracepoint name */ 24 const char *name; /* Tracepoint name */
25 int state; /* State. */ 25 int state; /* State. */
26 void (*regfunc)(void);
27 void (*unregfunc)(void);
26 void **funcs; 28 void **funcs;
27} __attribute__((aligned(32))); /* 29} __attribute__((aligned(32))); /*
28 * Aligned on 32 bytes because it is 30 * Aligned on 32 bytes because it is
@@ -78,12 +80,16 @@ struct tracepoint {
78 return tracepoint_probe_unregister(#name, (void *)probe);\ 80 return tracepoint_probe_unregister(#name, (void *)probe);\
79 } 81 }
80 82
81#define DEFINE_TRACE(name) \ 83
84#define DEFINE_TRACE_FN(name, reg, unreg) \
82 static const char __tpstrtab_##name[] \ 85 static const char __tpstrtab_##name[] \
83 __attribute__((section("__tracepoints_strings"))) = #name; \ 86 __attribute__((section("__tracepoints_strings"))) = #name; \
84 struct tracepoint __tracepoint_##name \ 87 struct tracepoint __tracepoint_##name \
85 __attribute__((section("__tracepoints"), aligned(32))) = \ 88 __attribute__((section("__tracepoints"), aligned(32))) = \
86 { __tpstrtab_##name, 0, NULL } 89 { __tpstrtab_##name, 0, reg, unreg, NULL }
90
91#define DEFINE_TRACE(name) \
92 DEFINE_TRACE_FN(name, NULL, NULL);
87 93
88#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ 94#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
89 EXPORT_SYMBOL_GPL(__tracepoint_##name) 95 EXPORT_SYMBOL_GPL(__tracepoint_##name)
@@ -108,6 +114,7 @@ extern void tracepoint_update_probe_range(struct tracepoint *begin,
108 return -ENOSYS; \ 114 return -ENOSYS; \
109 } 115 }
110 116
117#define DEFINE_TRACE_FN(name, reg, unreg)
111#define DEFINE_TRACE(name) 118#define DEFINE_TRACE(name)
112#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) 119#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
113#define EXPORT_TRACEPOINT_SYMBOL(name) 120#define EXPORT_TRACEPOINT_SYMBOL(name)
@@ -158,6 +165,15 @@ static inline void tracepoint_synchronize_unregister(void)
158 165
159#define PARAMS(args...) args 166#define PARAMS(args...) args
160 167
168#endif /* _LINUX_TRACEPOINT_H */
169
170/*
171 * Note: we keep the TRACE_EVENT outside the include file ifdef protection.
172 * This is due to the way trace events work. If a file includes two
173 * trace event headers under one "CREATE_TRACE_POINTS" the first include
174 * will override the TRACE_EVENT and break the second include.
175 */
176
161#ifndef TRACE_EVENT 177#ifndef TRACE_EVENT
162/* 178/*
163 * For use with the TRACE_EVENT macro: 179 * For use with the TRACE_EVENT macro:
@@ -259,10 +275,15 @@ static inline void tracepoint_synchronize_unregister(void)
259 * can also by used by generic instrumentation like SystemTap), and 275 * can also by used by generic instrumentation like SystemTap), and
260 * it is also used to expose a structured trace record in 276 * it is also used to expose a structured trace record in
261 * /sys/kernel/debug/tracing/events/. 277 * /sys/kernel/debug/tracing/events/.
278 *
279 * A set of (un)registration functions can be passed to the variant
280 * TRACE_EVENT_FN to perform any (un)registration work.
262 */ 281 */
263 282
264#define TRACE_EVENT(name, proto, args, struct, assign, print) \ 283#define TRACE_EVENT(name, proto, args, struct, assign, print) \
265 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) 284 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
266#endif 285#define TRACE_EVENT_FN(name, proto, args, struct, \
286 assign, print, reg, unreg) \
287 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
267 288
268#endif 289#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8c6c9136c97..0d3974f59c53 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
23 */ 23 */
24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ 24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ 25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
26#define NR_LDISCS 19 26#define NR_LDISCS 20
27 27
28/* line disciplines */ 28/* line disciplines */
29#define N_TTY 0 29#define N_TTY 0
@@ -47,6 +47,8 @@
47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ 47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
48#define N_PPS 18 /* Pulse per Second */ 48#define N_PPS 18 /* Pulse per Second */
49 49
50#define N_V253 19 /* Codec control over voice modem */
51
50/* 52/*
51 * This character is the same as _POSIX_VDISABLE: it cannot be used as 53 * This character is the same as _POSIX_VDISABLE: it cannot be used as
52 * a c_cc[] character, but indicates that a particular special character 54 * a c_cc[] character, but indicates that a particular special character
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 13e1adf55c4c..6273fa97b527 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
240 return ret; 240 return ret;
241} 241}
242 242
243/*
244 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
245 * if it returns 0 the timer function may be running and the queueing is in
246 * progress.
247 */
248static inline int __cancel_delayed_work(struct delayed_work *work)
249{
250 int ret;
251
252 ret = del_timer(&work->timer);
253 if (ret)
254 work_clear_pending(&work->work);
255 return ret;
256}
257
243extern int cancel_delayed_work_sync(struct delayed_work *work); 258extern int cancel_delayed_work_sync(struct delayed_work *work);
244 259
245/* Obsolete. use cancel_delayed_work_sync() */ 260/* Obsolete. use cancel_delayed_work_sync() */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3224820c8514..78b1e4684cc9 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -14,17 +14,6 @@ extern struct list_head inode_in_use;
14extern struct list_head inode_unused; 14extern struct list_head inode_unused;
15 15
16/* 16/*
17 * Yes, writeback.h requires sched.h
18 * No, sched.h is not included from here.
19 */
20static inline int task_is_pdflush(struct task_struct *task)
21{
22 return task->flags & PF_FLUSHER;
23}
24
25#define current_is_pdflush() task_is_pdflush(current)
26
27/*
28 * fs/fs-writeback.c 17 * fs/fs-writeback.c
29 */ 18 */
30enum writeback_sync_modes { 19enum writeback_sync_modes {
@@ -40,6 +29,8 @@ enum writeback_sync_modes {
40struct writeback_control { 29struct writeback_control {
41 struct backing_dev_info *bdi; /* If !NULL, only write back this 30 struct backing_dev_info *bdi; /* If !NULL, only write back this
42 queue */ 31 queue */
32 struct super_block *sb; /* if !NULL, only write inodes from
33 this super_block */
43 enum writeback_sync_modes sync_mode; 34 enum writeback_sync_modes sync_mode;
44 unsigned long *older_than_this; /* If !NULL, only write back inodes 35 unsigned long *older_than_this; /* If !NULL, only write back inodes
45 older than this */ 36 older than this */
@@ -76,9 +67,13 @@ struct writeback_control {
76/* 67/*
77 * fs/fs-writeback.c 68 * fs/fs-writeback.c
78 */ 69 */
79void writeback_inodes(struct writeback_control *wbc); 70struct bdi_writeback;
80int inode_wait(void *); 71int inode_wait(void *);
81void sync_inodes_sb(struct super_block *, int wait); 72long writeback_inodes_sb(struct super_block *);
73long sync_inodes_sb(struct super_block *);
74void writeback_inodes_wbc(struct writeback_control *wbc);
75long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
76void wakeup_flusher_threads(long nr_pages);
82 77
83/* writeback.h requires fs.h; it, too, is not included from here. */ 78/* writeback.h requires fs.h; it, too, is not included from here. */
84static inline void wait_on_inode(struct inode *inode) 79static inline void wait_on_inode(struct inode *inode)
@@ -98,7 +93,6 @@ static inline void inode_sync_wait(struct inode *inode)
98/* 93/*
99 * mm/page-writeback.c 94 * mm/page-writeback.c
100 */ 95 */
101int wakeup_pdflush(long nr_pages);
102void laptop_io_completion(void); 96void laptop_io_completion(void);
103void laptop_sync_completion(void); 97void laptop_sync_completion(void);
104void throttle_vm_writeout(gfp_t gfp_mask); 98void throttle_vm_writeout(gfp_t gfp_mask);
@@ -150,7 +144,6 @@ balance_dirty_pages_ratelimited(struct address_space *mapping)
150typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 144typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
151 void *data); 145 void *data);
152 146
153int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0);
154int generic_writepages(struct address_space *mapping, 147int generic_writepages(struct address_space *mapping,
155 struct writeback_control *wbc); 148 struct writeback_control *wbc);
156int write_cache_pages(struct address_space *mapping, 149int write_cache_pages(struct address_space *mapping,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index d131e352cfe1..5c84af8c5f6f 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -49,6 +49,7 @@ struct xattr_handler {
49ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); 49ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
50ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); 50ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
51ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); 51ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
52int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
52int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); 53int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
53int vfs_removexattr(struct dentry *, const char *); 54int vfs_removexattr(struct dentry *, const char *);
54 55
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 7eafb8d54470..82a3191375f5 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
61} 61}
62 62
63struct qdisc_watchdog { 63struct qdisc_watchdog {
64 struct tasklet_hrtimer timer; 64 struct hrtimer timer;
65 struct Qdisc *qdisc; 65 struct Qdisc *qdisc;
66}; 66};
67 67
68extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); 68extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 251fc1cd5002..3dae3f799b9b 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -32,6 +32,9 @@
32#include "control.h" 32#include "control.h"
33#include "info.h" 33#include "info.h"
34 34
35/* maximum number of devices on the AC97 bus */
36#define AC97_BUS_MAX_DEVICES 4
37
35/* 38/*
36 * AC'97 codec registers 39 * AC'97 codec registers
37 */ 40 */
@@ -642,4 +645,10 @@ int snd_ac97_pcm_double_rate_rules(struct snd_pcm_runtime *runtime);
642/* ad hoc AC97 device driver access */ 645/* ad hoc AC97 device driver access */
643extern struct bus_type ac97_bus_type; 646extern struct bus_type ac97_bus_type;
644 647
648/* AC97 platform_data adding function */
649static inline void snd_ac97_dev_add_pdata(struct snd_ac97 *ac97, void *data)
650{
651 ac97->dev.platform_data = data;
652}
653
645#endif /* __SOUND_AC97_CODEC_H */ 654#endif /* __SOUND_AC97_CODEC_H */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 82aed3f47534..1f57bb92eb5a 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -138,7 +138,7 @@ struct snd_hwdep_dsp_image {
138 * * 138 * *
139 *****************************************************************************/ 139 *****************************************************************************/
140 140
141#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 9) 141#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 10)
142 142
143typedef unsigned long snd_pcm_uframes_t; 143typedef unsigned long snd_pcm_uframes_t;
144typedef signed long snd_pcm_sframes_t; 144typedef signed long snd_pcm_sframes_t;
diff --git a/include/sound/core.h b/include/sound/core.h
index 309cb9659a05..a61499c22b0b 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -93,15 +93,6 @@ struct snd_device {
93 93
94#define snd_device(n) list_entry(n, struct snd_device, list) 94#define snd_device(n) list_entry(n, struct snd_device, list)
95 95
96/* monitor files for graceful shutdown (hotplug) */
97
98struct snd_monitor_file {
99 struct file *file;
100 const struct file_operations *disconnected_f_op;
101 struct list_head shutdown_list; /* still need to shutdown */
102 struct list_head list; /* link of monitor files */
103};
104
105/* main structure for soundcard */ 96/* main structure for soundcard */
106 97
107struct snd_card { 98struct snd_card {
@@ -311,9 +302,7 @@ int snd_component_add(struct snd_card *card, const char *component);
311int snd_card_file_add(struct snd_card *card, struct file *file); 302int snd_card_file_add(struct snd_card *card, struct file *file);
312int snd_card_file_remove(struct snd_card *card, struct file *file); 303int snd_card_file_remove(struct snd_card *card, struct file *file);
313 304
314#ifndef snd_card_set_dev
315#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr)) 305#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
316#endif
317 306
318/* device.c */ 307/* device.c */
319 308
@@ -340,18 +329,17 @@ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size);
340struct resource; 329struct resource;
341void release_and_free_resource(struct resource *res); 330void release_and_free_resource(struct resource *res);
342 331
343#ifdef CONFIG_SND_VERBOSE_PRINTK
344void snd_verbose_printk(const char *file, int line, const char *format, ...)
345 __attribute__ ((format (printf, 3, 4)));
346#endif
347#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_SND_VERBOSE_PRINTK)
348void snd_verbose_printd(const char *file, int line, const char *format, ...)
349 __attribute__ ((format (printf, 3, 4)));
350#endif
351
352/* --- */ 332/* --- */
353 333
354#ifdef CONFIG_SND_VERBOSE_PRINTK 334#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
335void __snd_printk(unsigned int level, const char *file, int line,
336 const char *format, ...)
337 __attribute__ ((format (printf, 4, 5)));
338#else
339#define __snd_printk(level, file, line, format, args...) \
340 printk(format, ##args)
341#endif
342
355/** 343/**
356 * snd_printk - printk wrapper 344 * snd_printk - printk wrapper
357 * @fmt: format string 345 * @fmt: format string
@@ -360,15 +348,9 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
360 * when configured with CONFIG_SND_VERBOSE_PRINTK. 348 * when configured with CONFIG_SND_VERBOSE_PRINTK.
361 */ 349 */
362#define snd_printk(fmt, args...) \ 350#define snd_printk(fmt, args...) \
363 snd_verbose_printk(__FILE__, __LINE__, fmt ,##args) 351 __snd_printk(0, __FILE__, __LINE__, fmt, ##args)
364#else
365#define snd_printk(fmt, args...) \
366 printk(fmt ,##args)
367#endif
368 352
369#ifdef CONFIG_SND_DEBUG 353#ifdef CONFIG_SND_DEBUG
370
371#ifdef CONFIG_SND_VERBOSE_PRINTK
372/** 354/**
373 * snd_printd - debug printk 355 * snd_printd - debug printk
374 * @fmt: format string 356 * @fmt: format string
@@ -377,11 +359,7 @@ void snd_verbose_printd(const char *file, int line, const char *format, ...)
377 * Ignored when CONFIG_SND_DEBUG is not set. 359 * Ignored when CONFIG_SND_DEBUG is not set.
378 */ 360 */
379#define snd_printd(fmt, args...) \ 361#define snd_printd(fmt, args...) \
380 snd_verbose_printd(__FILE__, __LINE__, fmt ,##args) 362 __snd_printk(1, __FILE__, __LINE__, fmt, ##args)
381#else
382#define snd_printd(fmt, args...) \
383 printk(fmt ,##args)
384#endif
385 363
386/** 364/**
387 * snd_BUG - give a BUG warning message and stack trace 365 * snd_BUG - give a BUG warning message and stack trace
@@ -428,9 +406,10 @@ static inline int __snd_bug_on(int cond)
428 * Works like snd_printk() for debugging purposes. 406 * Works like snd_printk() for debugging purposes.
429 * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set. 407 * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set.
430 */ 408 */
431#define snd_printdd(format, args...) snd_printk(format, ##args) 409#define snd_printdd(format, args...) \
410 __snd_printk(2, __FILE__, __LINE__, format, ##args)
432#else 411#else
433#define snd_printdd(format, args...) /* nothing */ 412#define snd_printdd(format, args...) do { } while (0)
434#endif 413#endif
435 414
436 415
@@ -438,12 +417,10 @@ static inline int __snd_bug_on(int cond)
438 417
439/* for easier backward-porting */ 418/* for easier backward-porting */
440#if defined(CONFIG_GAMEPORT) || defined(CONFIG_GAMEPORT_MODULE) 419#if defined(CONFIG_GAMEPORT) || defined(CONFIG_GAMEPORT_MODULE)
441#ifndef gameport_set_dev_parent
442#define gameport_set_dev_parent(gp,xdev) ((gp)->dev.parent = (xdev)) 420#define gameport_set_dev_parent(gp,xdev) ((gp)->dev.parent = (xdev))
443#define gameport_set_port_data(gp,r) ((gp)->port_data = (r)) 421#define gameport_set_port_data(gp,r) ((gp)->port_data = (r))
444#define gameport_get_port_data(gp) (gp)->port_data 422#define gameport_get_port_data(gp) (gp)->port_data
445#endif 423#endif
446#endif
447 424
448/* PCI quirk list helper */ 425/* PCI quirk list helper */
449struct snd_pci_quirk { 426struct snd_pci_quirk {
diff --git a/include/sound/info.h b/include/sound/info.h
index 7c2ee1a21b00..112e8949e1a7 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -110,13 +110,13 @@ void snd_card_info_read_oss(struct snd_info_buffer *buffer);
110static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {} 110static inline void snd_card_info_read_oss(struct snd_info_buffer *buffer) {}
111#endif 111#endif
112 112
113int snd_iprintf(struct snd_info_buffer *buffer, char *fmt, ...) \ 113int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...) \
114 __attribute__ ((format (printf, 2, 3))); 114 __attribute__ ((format (printf, 2, 3)));
115int snd_info_init(void); 115int snd_info_init(void);
116int snd_info_done(void); 116int snd_info_done(void);
117 117
118int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len); 118int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len);
119char *snd_info_get_str(char *dest, char *src, int len); 119const char *snd_info_get_str(char *dest, const char *src, int len);
120struct snd_info_entry *snd_info_create_module_entry(struct module *module, 120struct snd_info_entry *snd_info_create_module_entry(struct module *module,
121 const char *name, 121 const char *name,
122 struct snd_info_entry *parent); 122 struct snd_info_entry *parent);
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 7ccce94a5255..c42506212649 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -47,7 +47,11 @@ struct snd_dma_device {
47#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */ 47#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
48#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */ 48#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
49#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */ 49#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
50#ifdef CONFIG_SND_DMA_SGBUF
50#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */ 51#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
52#else
53#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
54#endif
51 55
52/* 56/*
53 * info for buffer allocation 57 * info for buffer allocation
@@ -60,6 +64,7 @@ struct snd_dma_buffer {
60 void *private_data; /* private for allocator; don't touch */ 64 void *private_data; /* private for allocator; don't touch */
61}; 65};
62 66
67#ifdef CONFIG_SND_DMA_SGBUF
63/* 68/*
64 * Scatter-Gather generic device pages 69 * Scatter-Gather generic device pages
65 */ 70 */
@@ -107,6 +112,7 @@ static inline void *snd_sgbuf_get_ptr(struct snd_sg_buf *sgbuf, size_t offset)
107{ 112{
108 return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; 113 return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
109} 114}
115#endif /* CONFIG_SND_DMA_SGBUF */
110 116
111/* allocate/release a buffer */ 117/* allocate/release a buffer */
112int snd_dma_alloc_pages(int type, struct device *dev, size_t size, 118int snd_dma_alloc_pages(int type, struct device *dev, size_t size,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 23893523dc8c..de6d981de5d6 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -902,6 +902,7 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
902int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size); 902int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
903int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream); 903int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
904 904
905#ifdef CONFIG_SND_DMA_SGBUF
905/* 906/*
906 * SG-buffer handling 907 * SG-buffer handling
907 */ 908 */
@@ -927,6 +928,28 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
927unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, 928unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
928 unsigned int ofs, unsigned int size); 929 unsigned int ofs, unsigned int size);
929 930
931#else /* !SND_DMA_SGBUF */
932/*
933 * fake using a continuous buffer
934 */
935static inline dma_addr_t
936snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs)
937{
938 return substream->runtime->dma_addr + ofs;
939}
940
941static inline void *
942snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs)
943{
944 return substream->runtime->dma_area + ofs;
945}
946
947#define snd_pcm_sgbuf_ops_page NULL
948
949#define snd_pcm_sgbuf_get_chunk_size(subs, ofs, size) (size)
950
951#endif /* SND_DMA_SGBUF */
952
930/* handle mmap counter - PCM mmap callback should handle this counter properly */ 953/* handle mmap counter - PCM mmap callback should handle this counter properly */
931static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area) 954static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area)
932{ 955{
@@ -965,4 +988,6 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
965 988
966#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime) 989#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
967 990
991const char *snd_pcm_format_name(snd_pcm_format_t format);
992
968#endif /* __SOUND_PCM_H */ 993#endif /* __SOUND_PCM_H */
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
new file mode 100644
index 000000000000..c0227361a876
--- /dev/null
+++ b/include/sound/sh_fsi.h
@@ -0,0 +1,83 @@
1#ifndef __SOUND_FSI_H
2#define __SOUND_FSI_H
3
4/*
5 * Fifo-attached Serial Interface (FSI) support for SH7724
6 *
7 * Copyright (C) 2009 Renesas Solutions Corp.
8 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15/* flags format
16
17 * 0xABCDEEFF
18 *
19 * A: channel size for TDM (input)
20 * B: channel size for TDM (ooutput)
21 * C: inversion
22 * D: mode
23 * E: input format
24 * F: output format
25 */
26
27#include <linux/clk.h>
28#include <sound/soc.h>
29
30/* TDM channel */
31#define SH_FSI_SET_CH_I(x) ((x & 0xF) << 28)
32#define SH_FSI_SET_CH_O(x) ((x & 0xF) << 24)
33
34#define SH_FSI_CH_IMASK 0xF0000000
35#define SH_FSI_CH_OMASK 0x0F000000
36#define SH_FSI_GET_CH_I(x) ((x & SH_FSI_CH_IMASK) >> 28)
37#define SH_FSI_GET_CH_O(x) ((x & SH_FSI_CH_OMASK) >> 24)
38
39/* clock inversion */
40#define SH_FSI_INVERSION_MASK 0x00F00000
41#define SH_FSI_LRM_INV (1 << 20)
42#define SH_FSI_BRM_INV (1 << 21)
43#define SH_FSI_LRS_INV (1 << 22)
44#define SH_FSI_BRS_INV (1 << 23)
45
46/* mode */
47#define SH_FSI_MODE_MASK 0x000F0000
48#define SH_FSI_IN_SLAVE_MODE (1 << 16) /* default master mode */
49#define SH_FSI_OUT_SLAVE_MODE (1 << 17) /* default master mode */
50
51/* DI format */
52#define SH_FSI_FMT_MASK 0x000000FF
53#define SH_FSI_IFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 8)
54#define SH_FSI_OFMT(x) (((SH_FSI_FMT_ ## x) & SH_FSI_FMT_MASK) << 0)
55#define SH_FSI_GET_IFMT(x) ((x >> 8) & SH_FSI_FMT_MASK)
56#define SH_FSI_GET_OFMT(x) ((x >> 0) & SH_FSI_FMT_MASK)
57
58#define SH_FSI_FMT_MONO (1 << 0)
59#define SH_FSI_FMT_MONO_DELAY (1 << 1)
60#define SH_FSI_FMT_PCM (1 << 2)
61#define SH_FSI_FMT_I2S (1 << 3)
62#define SH_FSI_FMT_TDM (1 << 4)
63#define SH_FSI_FMT_TDM_DELAY (1 << 5)
64
65#define SH_FSI_IFMT_TDM_CH(x) \
66 (SH_FSI_IFMT(TDM) | SH_FSI_SET_CH_I(x))
67#define SH_FSI_IFMT_TDM_DELAY_CH(x) \
68 (SH_FSI_IFMT(TDM_DELAY) | SH_FSI_SET_CH_I(x))
69
70#define SH_FSI_OFMT_TDM_CH(x) \
71 (SH_FSI_OFMT(TDM) | SH_FSI_SET_CH_O(x))
72#define SH_FSI_OFMT_TDM_DELAY_CH(x) \
73 (SH_FSI_OFMT(TDM_DELAY) | SH_FSI_SET_CH_O(x))
74
75struct sh_fsi_platform_info {
76 unsigned long porta_flags;
77 unsigned long portb_flags;
78};
79
80extern struct snd_soc_dai fsi_soc_dai[2];
81extern struct snd_soc_platform fsi_soc_platform;
82
83#endif /* __SOUND_FSI_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 352d7eee9b6d..97ca9af414dc 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -27,8 +27,8 @@ struct snd_pcm_substream;
27#define SND_SOC_DAIFMT_I2S 0 /* I2S mode */ 27#define SND_SOC_DAIFMT_I2S 0 /* I2S mode */
28#define SND_SOC_DAIFMT_RIGHT_J 1 /* Right Justified mode */ 28#define SND_SOC_DAIFMT_RIGHT_J 1 /* Right Justified mode */
29#define SND_SOC_DAIFMT_LEFT_J 2 /* Left Justified mode */ 29#define SND_SOC_DAIFMT_LEFT_J 2 /* Left Justified mode */
30#define SND_SOC_DAIFMT_DSP_A 3 /* L data msb after FRM LRC */ 30#define SND_SOC_DAIFMT_DSP_A 3 /* L data MSB after FRM LRC */
31#define SND_SOC_DAIFMT_DSP_B 4 /* L data msb during FRM LRC */ 31#define SND_SOC_DAIFMT_DSP_B 4 /* L data MSB during FRM LRC */
32#define SND_SOC_DAIFMT_AC97 5 /* AC97 */ 32#define SND_SOC_DAIFMT_AC97 5 /* AC97 */
33 33
34/* left and right justified also known as MSB and LSB respectively */ 34/* left and right justified also known as MSB and LSB respectively */
@@ -38,7 +38,7 @@ struct snd_pcm_substream;
38/* 38/*
39 * DAI Clock gating. 39 * DAI Clock gating.
40 * 40 *
41 * DAI bit clocks can be be gated (disabled) when not the DAI is not 41 * DAI bit clocks can be be gated (disabled) when the DAI is not
42 * sending or receiving PCM data in a frame. This can be used to save power. 42 * sending or receiving PCM data in a frame. This can be used to save power.
43 */ 43 */
44#define SND_SOC_DAIFMT_CONT (0 << 4) /* continuous clock */ 44#define SND_SOC_DAIFMT_CONT (0 << 4) /* continuous clock */
@@ -51,21 +51,21 @@ struct snd_pcm_substream;
51 * format. 51 * format.
52 */ 52 */
53#define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */ 53#define SND_SOC_DAIFMT_NB_NF (0 << 8) /* normal bit clock + frame */
54#define SND_SOC_DAIFMT_NB_IF (1 << 8) /* normal bclk + inv frm */ 54#define SND_SOC_DAIFMT_NB_IF (1 << 8) /* normal BCLK + inv FRM */
55#define SND_SOC_DAIFMT_IB_NF (2 << 8) /* invert bclk + nor frm */ 55#define SND_SOC_DAIFMT_IB_NF (2 << 8) /* invert BCLK + nor FRM */
56#define SND_SOC_DAIFMT_IB_IF (3 << 8) /* invert bclk + frm */ 56#define SND_SOC_DAIFMT_IB_IF (3 << 8) /* invert BCLK + FRM */
57 57
58/* 58/*
59 * DAI hardware clock masters. 59 * DAI hardware clock masters.
60 * 60 *
61 * This is wrt the codec, the inverse is true for the interface 61 * This is wrt the codec, the inverse is true for the interface
62 * i.e. if the codec is clk and frm master then the interface is 62 * i.e. if the codec is clk and FRM master then the interface is
63 * clk and frame slave. 63 * clk and frame slave.
64 */ 64 */
65#define SND_SOC_DAIFMT_CBM_CFM (0 << 12) /* codec clk & frm master */ 65#define SND_SOC_DAIFMT_CBM_CFM (0 << 12) /* codec clk & FRM master */
66#define SND_SOC_DAIFMT_CBS_CFM (1 << 12) /* codec clk slave & frm master */ 66#define SND_SOC_DAIFMT_CBS_CFM (1 << 12) /* codec clk slave & FRM master */
67#define SND_SOC_DAIFMT_CBM_CFS (2 << 12) /* codec clk master & frame slave */ 67#define SND_SOC_DAIFMT_CBM_CFS (2 << 12) /* codec clk master & frame slave */
68#define SND_SOC_DAIFMT_CBS_CFS (3 << 12) /* codec clk & frm slave */ 68#define SND_SOC_DAIFMT_CBS_CFS (3 << 12) /* codec clk & FRM slave */
69 69
70#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f 70#define SND_SOC_DAIFMT_FORMAT_MASK 0x000f
71#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0 71#define SND_SOC_DAIFMT_CLOCK_MASK 0x00f0
@@ -78,7 +78,13 @@ struct snd_pcm_substream;
78#define SND_SOC_CLOCK_IN 0 78#define SND_SOC_CLOCK_IN 0
79#define SND_SOC_CLOCK_OUT 1 79#define SND_SOC_CLOCK_OUT 1
80 80
81#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S16_LE |\ 81#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S8 |\
82 SNDRV_PCM_FMTBIT_S16_LE |\
83 SNDRV_PCM_FMTBIT_S16_BE |\
84 SNDRV_PCM_FMTBIT_S20_3LE |\
85 SNDRV_PCM_FMTBIT_S20_3BE |\
86 SNDRV_PCM_FMTBIT_S24_3LE |\
87 SNDRV_PCM_FMTBIT_S24_3BE |\
82 SNDRV_PCM_FMTBIT_S32_LE |\ 88 SNDRV_PCM_FMTBIT_S32_LE |\
83 SNDRV_PCM_FMTBIT_S32_BE) 89 SNDRV_PCM_FMTBIT_S32_BE)
84 90
@@ -106,7 +112,7 @@ int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
106int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt); 112int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
107 113
108int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, 114int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
109 unsigned int mask, int slots); 115 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width);
110 116
111int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate); 117int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
112 118
@@ -116,12 +122,12 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute);
116/* 122/*
117 * Digital Audio Interface. 123 * Digital Audio Interface.
118 * 124 *
119 * Describes the Digital Audio Interface in terms of it's ALSA, DAI and AC97 125 * Describes the Digital Audio Interface in terms of its ALSA, DAI and AC97
120 * operations an capabilities. Codec and platfom drivers will register a this 126 * operations and capabilities. Codec and platform drivers will register this
121 * structure for every DAI they have. 127 * structure for every DAI they have.
122 * 128 *
123 * This structure covers the clocking, formating and ALSA operations for each 129 * This structure covers the clocking, formating and ALSA operations for each
124 * interface a 130 * interface.
125 */ 131 */
126struct snd_soc_dai_ops { 132struct snd_soc_dai_ops {
127 /* 133 /*
@@ -140,7 +146,8 @@ struct snd_soc_dai_ops {
140 */ 146 */
141 int (*set_fmt)(struct snd_soc_dai *dai, unsigned int fmt); 147 int (*set_fmt)(struct snd_soc_dai *dai, unsigned int fmt);
142 int (*set_tdm_slot)(struct snd_soc_dai *dai, 148 int (*set_tdm_slot)(struct snd_soc_dai *dai,
143 unsigned int mask, int slots); 149 unsigned int tx_mask, unsigned int rx_mask,
150 int slots, int slot_width);
144 int (*set_tristate)(struct snd_soc_dai *dai, int tristate); 151 int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
145 152
146 /* 153 /*
@@ -179,6 +186,7 @@ struct snd_soc_dai {
179 int ac97_control; 186 int ac97_control;
180 187
181 struct device *dev; 188 struct device *dev;
189 void *ac97_pdata; /* platform_data for the ac97 codec */
182 190
183 /* DAI callbacks */ 191 /* DAI callbacks */
184 int (*probe)(struct platform_device *pdev, 192 int (*probe)(struct platform_device *pdev,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index ec8a45f9a069..c1410e3191e3 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -137,6 +137,12 @@
137 .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD} 137 .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
138 138
139/* stream domain */ 139/* stream domain */
140#define SND_SOC_DAPM_AIF_IN(wname, stname, wslot, wreg, wshift, winvert) \
141{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
142 .reg = wreg, .shift = wshift, .invert = winvert }
143#define SND_SOC_DAPM_AIF_OUT(wname, stname, wslot, wreg, wshift, winvert) \
144{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
145 .reg = wreg, .shift = wshift, .invert = winvert }
140#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \ 146#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
141{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \ 147{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
142 .shift = wshift, .invert = winvert} 148 .shift = wshift, .invert = winvert}
@@ -279,9 +285,11 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
279/* dapm events */ 285/* dapm events */
280int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream, 286int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream,
281 int event); 287 int event);
288void snd_soc_dapm_shutdown(struct snd_soc_device *socdev);
282 289
283/* dapm sys fs - used by the core */ 290/* dapm sys fs - used by the core */
284int snd_soc_dapm_sys_add(struct device *dev); 291int snd_soc_dapm_sys_add(struct device *dev);
292void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec);
285 293
286/* dapm audio pin control and status */ 294/* dapm audio pin control and status */
287int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin); 295int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin);
@@ -311,6 +319,8 @@ enum snd_soc_dapm_type {
311 snd_soc_dapm_pre, /* machine specific pre widget - exec first */ 319 snd_soc_dapm_pre, /* machine specific pre widget - exec first */
312 snd_soc_dapm_post, /* machine specific post widget - exec last */ 320 snd_soc_dapm_post, /* machine specific post widget - exec last */
313 snd_soc_dapm_supply, /* power/clock supply */ 321 snd_soc_dapm_supply, /* power/clock supply */
322 snd_soc_dapm_aif_in, /* audio interface input */
323 snd_soc_dapm_aif_out, /* audio interface output */
314}; 324};
315 325
316/* 326/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index cf6111d72b17..475cb7ed6bec 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -135,6 +135,28 @@
135 .info = snd_soc_info_volsw, \ 135 .info = snd_soc_info_volsw, \
136 .get = xhandler_get, .put = xhandler_put, \ 136 .get = xhandler_get, .put = xhandler_put, \
137 .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) } 137 .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
138#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
139 xhandler_get, xhandler_put, tlv_array) \
140{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
141 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
142 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
143 .tlv.p = (tlv_array), \
144 .info = snd_soc_info_volsw, \
145 .get = xhandler_get, .put = xhandler_put, \
146 .private_value = (unsigned long)&(struct soc_mixer_control) \
147 {.reg = xreg, .shift = shift_left, .rshift = shift_right, \
148 .max = xmax, .invert = xinvert} }
149#define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
150 xhandler_get, xhandler_put, tlv_array) \
151{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
152 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
153 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
154 .tlv.p = (tlv_array), \
155 .info = snd_soc_info_volsw_2r, \
156 .get = xhandler_get, .put = xhandler_put, \
157 .private_value = (unsigned long)&(struct soc_mixer_control) \
158 {.reg = reg_left, .rreg = reg_right, .shift = xshift, \
159 .max = xmax, .invert = xinvert} }
138#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \ 160#define SOC_SINGLE_BOOL_EXT(xname, xdata, xhandler_get, xhandler_put) \
139{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ 161{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
140 .info = snd_soc_info_bool_ext, \ 162 .info = snd_soc_info_bool_ext, \
@@ -183,14 +205,28 @@ struct snd_soc_jack_gpio;
183#endif 205#endif
184 206
185typedef int (*hw_write_t)(void *,const char* ,int); 207typedef int (*hw_write_t)(void *,const char* ,int);
186typedef int (*hw_read_t)(void *,char* ,int);
187 208
188extern struct snd_ac97_bus_ops soc_ac97_ops; 209extern struct snd_ac97_bus_ops soc_ac97_ops;
189 210
211enum snd_soc_control_type {
212 SND_SOC_CUSTOM,
213 SND_SOC_I2C,
214 SND_SOC_SPI,
215};
216
190int snd_soc_register_platform(struct snd_soc_platform *platform); 217int snd_soc_register_platform(struct snd_soc_platform *platform);
191void snd_soc_unregister_platform(struct snd_soc_platform *platform); 218void snd_soc_unregister_platform(struct snd_soc_platform *platform);
192int snd_soc_register_codec(struct snd_soc_codec *codec); 219int snd_soc_register_codec(struct snd_soc_codec *codec);
193void snd_soc_unregister_codec(struct snd_soc_codec *codec); 220void snd_soc_unregister_codec(struct snd_soc_codec *codec);
221int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
222int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
223 int addr_bits, int data_bits,
224 enum snd_soc_control_type control);
225
226#ifdef CONFIG_PM
227int snd_soc_suspend_device(struct device *dev);
228int snd_soc_resume_device(struct device *dev);
229#endif
194 230
195/* pcm <-> DAI connect */ 231/* pcm <-> DAI connect */
196void snd_soc_free_pcms(struct snd_soc_device *socdev); 232void snd_soc_free_pcms(struct snd_soc_device *socdev);
@@ -216,9 +252,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
216 252
217/* codec register bit access */ 253/* codec register bit access */
218int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, 254int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
219 unsigned short mask, unsigned short value); 255 unsigned int mask, unsigned int value);
220int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg, 256int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
221 unsigned short mask, unsigned short value); 257 unsigned int mask, unsigned int value);
222 258
223int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, 259int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
224 struct snd_ac97_bus_ops *ops, int num); 260 struct snd_ac97_bus_ops *ops, int num);
@@ -356,8 +392,10 @@ struct snd_soc_codec {
356 int (*write)(struct snd_soc_codec *, unsigned int, unsigned int); 392 int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
357 int (*display_register)(struct snd_soc_codec *, char *, 393 int (*display_register)(struct snd_soc_codec *, char *,
358 size_t, unsigned int); 394 size_t, unsigned int);
395 int (*volatile_register)(unsigned int);
396 int (*readable_register)(unsigned int);
359 hw_write_t hw_write; 397 hw_write_t hw_write;
360 hw_read_t hw_read; 398 unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
361 void *reg_cache; 399 void *reg_cache;
362 short reg_cache_size; 400 short reg_cache_size;
363 short reg_cache_step; 401 short reg_cache_step;
@@ -369,8 +407,6 @@ struct snd_soc_codec {
369 enum snd_soc_bias_level bias_level; 407 enum snd_soc_bias_level bias_level;
370 enum snd_soc_bias_level suspend_bias_level; 408 enum snd_soc_bias_level suspend_bias_level;
371 struct delayed_work delayed_work; 409 struct delayed_work delayed_work;
372 struct list_head up_list;
373 struct list_head down_list;
374 410
375 /* codec DAI's */ 411 /* codec DAI's */
376 struct snd_soc_dai *dai; 412 struct snd_soc_dai *dai;
@@ -379,6 +415,7 @@ struct snd_soc_codec {
379#ifdef CONFIG_DEBUG_FS 415#ifdef CONFIG_DEBUG_FS
380 struct dentry *debugfs_reg; 416 struct dentry *debugfs_reg;
381 struct dentry *debugfs_pop_time; 417 struct dentry *debugfs_pop_time;
418 struct dentry *debugfs_dapm;
382#endif 419#endif
383}; 420};
384 421
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index d136ea2181ed..9fd5b19ccf5c 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -35,6 +35,8 @@
35#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */ 35#define SNDRV_CTL_TLVT_DB_SCALE 1 /* dB scale */
36#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */ 36#define SNDRV_CTL_TLVT_DB_LINEAR 2 /* linear volume */
37#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */ 37#define SNDRV_CTL_TLVT_DB_RANGE 3 /* dB range container */
38#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
39#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
38 40
39#define TLV_DB_SCALE_ITEM(min, step, mute) \ 41#define TLV_DB_SCALE_ITEM(min, step, mute) \
40 SNDRV_CTL_TLVT_DB_SCALE, 2 * sizeof(unsigned int), \ 42 SNDRV_CTL_TLVT_DB_SCALE, 2 * sizeof(unsigned int), \
@@ -42,6 +44,18 @@
42#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \ 44#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
43 unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) } 45 unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
44 46
47/* dB scale specified with min/max values instead of step */
48#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
49 SNDRV_CTL_TLVT_DB_MINMAX, 2 * sizeof(unsigned int), \
50 (min_dB), (max_dB)
51#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
52 SNDRV_CTL_TLVT_DB_MINMAX_MUTE, 2 * sizeof(unsigned int), \
53 (min_dB), (max_dB)
54#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
55 unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
56#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
57 unsigned int name[] = { TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) }
58
45/* linear volume between min_dB and max_dB (.01dB unit) */ 59/* linear volume between min_dB and max_dB (.01dB unit) */
46#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \ 60#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
47 SNDRV_CTL_TLVT_DB_LINEAR, 2 * sizeof(unsigned int), \ 61 SNDRV_CTL_TLVT_DB_LINEAR, 2 * sizeof(unsigned int), \
diff --git a/include/sound/uda1380.h b/include/sound/uda1380.h
new file mode 100644
index 000000000000..381319c7000c
--- /dev/null
+++ b/include/sound/uda1380.h
@@ -0,0 +1,22 @@
1/*
2 * UDA1380 ALSA SoC Codec driver
3 *
4 * Copyright 2009 Philipp Zabel
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __UDA1380_H
12#define __UDA1380_H
13
14struct uda1380_platform_data {
15 int gpio_power;
16 int gpio_reset;
17 int dac_clk;
18#define UDA1380_DAC_CLK_SYSCLK 0
19#define UDA1380_DAC_CLK_WSPLL 1
20};
21
22#endif /* __UDA1380_H */
diff --git a/include/sound/version.h b/include/sound/version.h
index 456f1359e1c0..22939142dd23 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
1/* include/version.h */ 1/* include/version.h */
2#define CONFIG_SND_VERSION "1.0.20" 2#define CONFIG_SND_VERSION "1.0.21"
3#define CONFIG_SND_DATE "" 3#define CONFIG_SND_DATE ""
diff --git a/include/sound/wm8993.h b/include/sound/wm8993.h
new file mode 100644
index 000000000000..9c661f2f8cda
--- /dev/null
+++ b/include/sound/wm8993.h
@@ -0,0 +1,44 @@
1/*
2 * linux/sound/wm8993.h -- Platform data for WM8993
3 *
4 * Copyright 2009 Wolfson Microelectronics. PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __LINUX_SND_WM8993_H
12#define __LINUX_SND_WM8993_H
13
14/* Note that EQ1 only contains the enable/disable bit so will be
15 ignored but is included for simplicity.
16 */
17struct wm8993_retune_mobile_setting {
18 const char *name;
19 unsigned int rate;
20 u16 config[24];
21};
22
23struct wm8993_platform_data {
24 struct wm8993_retune_mobile_setting *retune_configs;
25 int num_retune_configs;
26
27 /* LINEOUT can be differential or single ended */
28 unsigned int lineout1_diff:1;
29 unsigned int lineout2_diff:1;
30
31 /* Common mode feedback */
32 unsigned int lineout1fb:1;
33 unsigned int lineout2fb:1;
34
35 /* Microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */
36 unsigned int micbias1_lvl:1;
37 unsigned int micbias2_lvl:1;
38
39 /* Jack detect threashold levels, see datasheet for values */
40 unsigned int jd_scthr:2;
41 unsigned int jd_thr:2;
42};
43
44#endif
diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
index 05ead6698434..444cd6ba0ba7 100644
--- a/include/sound/ymfpci.h
+++ b/include/sound/ymfpci.h
@@ -331,6 +331,7 @@ struct snd_ymfpci {
331 struct snd_ac97 *ac97; 331 struct snd_ac97 *ac97;
332 struct snd_rawmidi *rawmidi; 332 struct snd_rawmidi *rawmidi;
333 struct snd_timer *timer; 333 struct snd_timer *timer;
334 unsigned int timer_ticks;
334 335
335 struct pci_dev *pci; 336 struct pci_dev *pci;
336 struct snd_card *card; 337 struct snd_card *card;
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index f7a7ae1e8f90..2a4b3bf74033 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -26,6 +26,11 @@
26#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 26#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
27 DEFINE_TRACE(name) 27 DEFINE_TRACE(name)
28 28
29#undef TRACE_EVENT_FN
30#define TRACE_EVENT_FN(name, proto, args, tstruct, \
31 assign, print, reg, unreg) \
32 DEFINE_TRACE_FN(name, reg, unreg)
33
29#undef DECLARE_TRACE 34#undef DECLARE_TRACE
30#define DECLARE_TRACE(name, proto, args) \ 35#define DECLARE_TRACE(name, proto, args) \
31 DEFINE_TRACE(name) 36 DEFINE_TRACE(name)
@@ -56,6 +61,8 @@
56#include <trace/ftrace.h> 61#include <trace/ftrace.h>
57#endif 62#endif
58 63
64#undef TRACE_EVENT
65#undef TRACE_EVENT_FN
59#undef TRACE_HEADER_MULTI_READ 66#undef TRACE_HEADER_MULTI_READ
60 67
61/* Only undef what we defined in this file */ 68/* Only undef what we defined in this file */
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
new file mode 100644
index 000000000000..84160fb18478
--- /dev/null
+++ b/include/trace/events/module.h
@@ -0,0 +1,126 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM module
3
4#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_MODULE_H
6
7#include <linux/tracepoint.h>
8
9#ifdef CONFIG_MODULES
10
11struct module;
12
13#define show_module_flags(flags) __print_flags(flags, "", \
14 { (1UL << TAINT_PROPRIETARY_MODULE), "P" }, \
15 { (1UL << TAINT_FORCED_MODULE), "F" }, \
16 { (1UL << TAINT_CRAP), "C" })
17
18TRACE_EVENT(module_load,
19
20 TP_PROTO(struct module *mod),
21
22 TP_ARGS(mod),
23
24 TP_STRUCT__entry(
25 __field( unsigned int, taints )
26 __string( name, mod->name )
27 ),
28
29 TP_fast_assign(
30 __entry->taints = mod->taints;
31 __assign_str(name, mod->name);
32 ),
33
34 TP_printk("%s %s", __get_str(name), show_module_flags(__entry->taints))
35);
36
37TRACE_EVENT(module_free,
38
39 TP_PROTO(struct module *mod),
40
41 TP_ARGS(mod),
42
43 TP_STRUCT__entry(
44 __string( name, mod->name )
45 ),
46
47 TP_fast_assign(
48 __assign_str(name, mod->name);
49 ),
50
51 TP_printk("%s", __get_str(name))
52);
53
54TRACE_EVENT(module_get,
55
56 TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
57
58 TP_ARGS(mod, ip, refcnt),
59
60 TP_STRUCT__entry(
61 __field( unsigned long, ip )
62 __field( int, refcnt )
63 __string( name, mod->name )
64 ),
65
66 TP_fast_assign(
67 __entry->ip = ip;
68 __entry->refcnt = refcnt;
69 __assign_str(name, mod->name);
70 ),
71
72 TP_printk("%s call_site=%pf refcnt=%d",
73 __get_str(name), (void *)__entry->ip, __entry->refcnt)
74);
75
76TRACE_EVENT(module_put,
77
78 TP_PROTO(struct module *mod, unsigned long ip, int refcnt),
79
80 TP_ARGS(mod, ip, refcnt),
81
82 TP_STRUCT__entry(
83 __field( unsigned long, ip )
84 __field( int, refcnt )
85 __string( name, mod->name )
86 ),
87
88 TP_fast_assign(
89 __entry->ip = ip;
90 __entry->refcnt = refcnt;
91 __assign_str(name, mod->name);
92 ),
93
94 TP_printk("%s call_site=%pf refcnt=%d",
95 __get_str(name), (void *)__entry->ip, __entry->refcnt)
96);
97
98TRACE_EVENT(module_request,
99
100 TP_PROTO(char *name, bool wait, unsigned long ip),
101
102 TP_ARGS(name, wait, ip),
103
104 TP_STRUCT__entry(
105 __field( bool, wait )
106 __field( unsigned long, ip )
107 __string( name, name )
108 ),
109
110 TP_fast_assign(
111 __entry->wait = wait;
112 __entry->ip = ip;
113 __assign_str(name, name);
114 ),
115
116 TP_printk("%s wait=%d call_site=%pf",
117 __get_str(name), (int)__entry->wait, (void *)__entry->ip)
118);
119
120#endif /* CONFIG_MODULES */
121
122#endif /* _TRACE_MODULE_H */
123
124/* This part must be outside protection */
125#include <trace/define_trace.h>
126
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 8949bb7eb082..b48f1ad7c946 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -94,6 +94,7 @@ TRACE_EVENT(sched_wakeup,
94 __field( pid_t, pid ) 94 __field( pid_t, pid )
95 __field( int, prio ) 95 __field( int, prio )
96 __field( int, success ) 96 __field( int, success )
97 __field( int, cpu )
97 ), 98 ),
98 99
99 TP_fast_assign( 100 TP_fast_assign(
@@ -101,11 +102,12 @@ TRACE_EVENT(sched_wakeup,
101 __entry->pid = p->pid; 102 __entry->pid = p->pid;
102 __entry->prio = p->prio; 103 __entry->prio = p->prio;
103 __entry->success = success; 104 __entry->success = success;
105 __entry->cpu = task_cpu(p);
104 ), 106 ),
105 107
106 TP_printk("task %s:%d [%d] success=%d", 108 TP_printk("task %s:%d [%d] success=%d [%03d]",
107 __entry->comm, __entry->pid, __entry->prio, 109 __entry->comm, __entry->pid, __entry->prio,
108 __entry->success) 110 __entry->success, __entry->cpu)
109); 111);
110 112
111/* 113/*
@@ -125,6 +127,7 @@ TRACE_EVENT(sched_wakeup_new,
125 __field( pid_t, pid ) 127 __field( pid_t, pid )
126 __field( int, prio ) 128 __field( int, prio )
127 __field( int, success ) 129 __field( int, success )
130 __field( int, cpu )
128 ), 131 ),
129 132
130 TP_fast_assign( 133 TP_fast_assign(
@@ -132,11 +135,12 @@ TRACE_EVENT(sched_wakeup_new,
132 __entry->pid = p->pid; 135 __entry->pid = p->pid;
133 __entry->prio = p->prio; 136 __entry->prio = p->prio;
134 __entry->success = success; 137 __entry->success = success;
138 __entry->cpu = task_cpu(p);
135 ), 139 ),
136 140
137 TP_printk("task %s:%d [%d] success=%d", 141 TP_printk("task %s:%d [%d] success=%d [%03d]",
138 __entry->comm, __entry->pid, __entry->prio, 142 __entry->comm, __entry->pid, __entry->prio,
139 __entry->success) 143 __entry->success, __entry->cpu)
140); 144);
141 145
142/* 146/*
@@ -340,6 +344,101 @@ TRACE_EVENT(sched_signal_send,
340 __entry->sig, __entry->comm, __entry->pid) 344 __entry->sig, __entry->comm, __entry->pid)
341); 345);
342 346
347/*
348 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
349 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
350 */
351
352/*
353 * Tracepoint for accounting wait time (time the task is runnable
354 * but not actually running due to scheduler contention).
355 */
356TRACE_EVENT(sched_stat_wait,
357
358 TP_PROTO(struct task_struct *tsk, u64 delay),
359
360 TP_ARGS(tsk, delay),
361
362 TP_STRUCT__entry(
363 __array( char, comm, TASK_COMM_LEN )
364 __field( pid_t, pid )
365 __field( u64, delay )
366 ),
367
368 TP_fast_assign(
369 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
370 __entry->pid = tsk->pid;
371 __entry->delay = delay;
372 )
373 TP_perf_assign(
374 __perf_count(delay);
375 ),
376
377 TP_printk("task: %s:%d wait: %Lu [ns]",
378 __entry->comm, __entry->pid,
379 (unsigned long long)__entry->delay)
380);
381
382/*
383 * Tracepoint for accounting sleep time (time the task is not runnable,
384 * including iowait, see below).
385 */
386TRACE_EVENT(sched_stat_sleep,
387
388 TP_PROTO(struct task_struct *tsk, u64 delay),
389
390 TP_ARGS(tsk, delay),
391
392 TP_STRUCT__entry(
393 __array( char, comm, TASK_COMM_LEN )
394 __field( pid_t, pid )
395 __field( u64, delay )
396 ),
397
398 TP_fast_assign(
399 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
400 __entry->pid = tsk->pid;
401 __entry->delay = delay;
402 )
403 TP_perf_assign(
404 __perf_count(delay);
405 ),
406
407 TP_printk("task: %s:%d sleep: %Lu [ns]",
408 __entry->comm, __entry->pid,
409 (unsigned long long)__entry->delay)
410);
411
412/*
413 * Tracepoint for accounting iowait time (time the task is not runnable
414 * due to waiting on IO to complete).
415 */
416TRACE_EVENT(sched_stat_iowait,
417
418 TP_PROTO(struct task_struct *tsk, u64 delay),
419
420 TP_ARGS(tsk, delay),
421
422 TP_STRUCT__entry(
423 __array( char, comm, TASK_COMM_LEN )
424 __field( pid_t, pid )
425 __field( u64, delay )
426 ),
427
428 TP_fast_assign(
429 memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
430 __entry->pid = tsk->pid;
431 __entry->delay = delay;
432 )
433 TP_perf_assign(
434 __perf_count(delay);
435 ),
436
437 TP_printk("task: %s:%d iowait: %Lu [ns]",
438 __entry->comm, __entry->pid,
439 (unsigned long long)__entry->delay)
440);
441
343#endif /* _TRACE_SCHED_H */ 442#endif /* _TRACE_SCHED_H */
344 443
345/* This part must be outside protection */ 444/* This part must be outside protection */
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
new file mode 100644
index 000000000000..397dff2dbd5a
--- /dev/null
+++ b/include/trace/events/syscalls.h
@@ -0,0 +1,70 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM syscalls
3
4#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EVENTS_SYSCALLS_H
6
7#include <linux/tracepoint.h>
8
9#include <asm/ptrace.h>
10#include <asm/syscall.h>
11
12
13#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
14
15extern void syscall_regfunc(void);
16extern void syscall_unregfunc(void);
17
18TRACE_EVENT_FN(sys_enter,
19
20 TP_PROTO(struct pt_regs *regs, long id),
21
22 TP_ARGS(regs, id),
23
24 TP_STRUCT__entry(
25 __field( long, id )
26 __array( unsigned long, args, 6 )
27 ),
28
29 TP_fast_assign(
30 __entry->id = id;
31 syscall_get_arguments(current, regs, 0, 6, __entry->args);
32 ),
33
34 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
35 __entry->id,
36 __entry->args[0], __entry->args[1], __entry->args[2],
37 __entry->args[3], __entry->args[4], __entry->args[5]),
38
39 syscall_regfunc, syscall_unregfunc
40);
41
42TRACE_EVENT_FN(sys_exit,
43
44 TP_PROTO(struct pt_regs *regs, long ret),
45
46 TP_ARGS(regs, ret),
47
48 TP_STRUCT__entry(
49 __field( long, id )
50 __field( long, ret )
51 ),
52
53 TP_fast_assign(
54 __entry->id = syscall_get_nr(current, regs);
55 __entry->ret = ret;
56 ),
57
58 TP_printk("NR %ld = %ld",
59 __entry->id, __entry->ret),
60
61 syscall_regfunc, syscall_unregfunc
62);
63
64#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
65
66#endif /* _TRACE_EVENTS_SYSCALLS_H */
67
68/* This part must be outside protection */
69#include <trace/define_trace.h>
70
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index f64fbaae781a..308bafd93325 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -21,11 +21,14 @@
21#undef __field 21#undef __field
22#define __field(type, item) type item; 22#define __field(type, item) type item;
23 23
24#undef __field_ext
25#define __field_ext(type, item, filter_type) type item;
26
24#undef __array 27#undef __array
25#define __array(type, item, len) type item[len]; 28#define __array(type, item, len) type item[len];
26 29
27#undef __dynamic_array 30#undef __dynamic_array
28#define __dynamic_array(type, item, len) unsigned short __data_loc_##item; 31#define __dynamic_array(type, item, len) u32 __data_loc_##item;
29 32
30#undef __string 33#undef __string
31#define __string(item, src) __dynamic_array(char, item, -1) 34#define __string(item, src) __dynamic_array(char, item, -1)
@@ -42,6 +45,16 @@
42 }; \ 45 }; \
43 static struct ftrace_event_call event_##name 46 static struct ftrace_event_call event_##name
44 47
48#undef __cpparg
49#define __cpparg(arg...) arg
50
51/* Callbacks are meaningless to ftrace. */
52#undef TRACE_EVENT_FN
53#define TRACE_EVENT_FN(name, proto, args, tstruct, \
54 assign, print, reg, unreg) \
55 TRACE_EVENT(name, __cpparg(proto), __cpparg(args), \
56 __cpparg(tstruct), __cpparg(assign), __cpparg(print)) \
57
45#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 58#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
46 59
47 60
@@ -51,23 +64,27 @@
51 * Include the following: 64 * Include the following:
52 * 65 *
53 * struct ftrace_data_offsets_<call> { 66 * struct ftrace_data_offsets_<call> {
54 * int <item1>; 67 * u32 <item1>;
55 * int <item2>; 68 * u32 <item2>;
56 * [...] 69 * [...]
57 * }; 70 * };
58 * 71 *
59 * The __dynamic_array() macro will create each int <item>, this is 72 * The __dynamic_array() macro will create each u32 <item>, this is
60 * to keep the offset of each array from the beginning of the event. 73 * to keep the offset of each array from the beginning of the event.
74 * The size of an array is also encoded, in the higher 16 bits of <item>.
61 */ 75 */
62 76
63#undef __field 77#undef __field
64#define __field(type, item); 78#define __field(type, item)
79
80#undef __field_ext
81#define __field_ext(type, item, filter_type)
65 82
66#undef __array 83#undef __array
67#define __array(type, item, len) 84#define __array(type, item, len)
68 85
69#undef __dynamic_array 86#undef __dynamic_array
70#define __dynamic_array(type, item, len) int item; 87#define __dynamic_array(type, item, len) u32 item;
71 88
72#undef __string 89#undef __string
73#define __string(item, src) __dynamic_array(char, item, -1) 90#define __string(item, src) __dynamic_array(char, item, -1)
@@ -109,6 +126,9 @@
109 if (!ret) \ 126 if (!ret) \
110 return 0; 127 return 0;
111 128
129#undef __field_ext
130#define __field_ext(type, item, filter_type) __field(type, item)
131
112#undef __array 132#undef __array
113#define __array(type, item, len) \ 133#define __array(type, item, len) \
114 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 134 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
@@ -120,7 +140,7 @@
120 140
121#undef __dynamic_array 141#undef __dynamic_array
122#define __dynamic_array(type, item, len) \ 142#define __dynamic_array(type, item, len) \
123 ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ 143 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
124 "offset:%u;\tsize:%u;\n", \ 144 "offset:%u;\tsize:%u;\n", \
125 (unsigned int)offsetof(typeof(field), \ 145 (unsigned int)offsetof(typeof(field), \
126 __data_loc_##item), \ 146 __data_loc_##item), \
@@ -150,7 +170,8 @@
150#undef TRACE_EVENT 170#undef TRACE_EVENT
151#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 171#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
152static int \ 172static int \
153ftrace_format_##call(struct trace_seq *s) \ 173ftrace_format_##call(struct ftrace_event_call *unused, \
174 struct trace_seq *s) \
154{ \ 175{ \
155 struct ftrace_raw_##call field __attribute__((unused)); \ 176 struct ftrace_raw_##call field __attribute__((unused)); \
156 int ret = 0; \ 177 int ret = 0; \
@@ -210,7 +231,7 @@ ftrace_format_##call(struct trace_seq *s) \
210 231
211#undef __get_dynamic_array 232#undef __get_dynamic_array
212#define __get_dynamic_array(field) \ 233#define __get_dynamic_array(field) \
213 ((void *)__entry + __entry->__data_loc_##field) 234 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
214 235
215#undef __get_str 236#undef __get_str
216#define __get_str(field) (char *)__get_dynamic_array(field) 237#define __get_str(field) (char *)__get_dynamic_array(field)
@@ -263,28 +284,33 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
263 284
264#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 285#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
265 286
266#undef __field 287#undef __field_ext
267#define __field(type, item) \ 288#define __field_ext(type, item, filter_type) \
268 ret = trace_define_field(event_call, #type, #item, \ 289 ret = trace_define_field(event_call, #type, #item, \
269 offsetof(typeof(field), item), \ 290 offsetof(typeof(field), item), \
270 sizeof(field.item), is_signed_type(type)); \ 291 sizeof(field.item), \
292 is_signed_type(type), filter_type); \
271 if (ret) \ 293 if (ret) \
272 return ret; 294 return ret;
273 295
296#undef __field
297#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
298
274#undef __array 299#undef __array
275#define __array(type, item, len) \ 300#define __array(type, item, len) \
276 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 301 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
277 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 302 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
278 offsetof(typeof(field), item), \ 303 offsetof(typeof(field), item), \
279 sizeof(field.item), 0); \ 304 sizeof(field.item), 0, FILTER_OTHER); \
280 if (ret) \ 305 if (ret) \
281 return ret; 306 return ret;
282 307
283#undef __dynamic_array 308#undef __dynamic_array
284#define __dynamic_array(type, item, len) \ 309#define __dynamic_array(type, item, len) \
285 ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ 310 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
286 offsetof(typeof(field), __data_loc_##item), \ 311 offsetof(typeof(field), __data_loc_##item), \
287 sizeof(field.__data_loc_##item), 0); 312 sizeof(field.__data_loc_##item), 0, \
313 FILTER_OTHER);
288 314
289#undef __string 315#undef __string
290#define __string(item, src) __dynamic_array(char, item, -1) 316#define __string(item, src) __dynamic_array(char, item, -1)
@@ -292,17 +318,14 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
292#undef TRACE_EVENT 318#undef TRACE_EVENT
293#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ 319#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
294int \ 320int \
295ftrace_define_fields_##call(void) \ 321ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
296{ \ 322{ \
297 struct ftrace_raw_##call field; \ 323 struct ftrace_raw_##call field; \
298 struct ftrace_event_call *event_call = &event_##call; \
299 int ret; \ 324 int ret; \
300 \ 325 \
301 __common_field(int, type, 1); \ 326 ret = trace_define_common_fields(event_call); \
302 __common_field(unsigned char, flags, 0); \ 327 if (ret) \
303 __common_field(unsigned char, preempt_count, 0); \ 328 return ret; \
304 __common_field(int, pid, 1); \
305 __common_field(int, tgid, 1); \
306 \ 329 \
307 tstruct; \ 330 tstruct; \
308 \ 331 \
@@ -321,6 +344,9 @@ ftrace_define_fields_##call(void) \
321#undef __field 344#undef __field
322#define __field(type, item) 345#define __field(type, item)
323 346
347#undef __field_ext
348#define __field_ext(type, item, filter_type)
349
324#undef __array 350#undef __array
325#define __array(type, item, len) 351#define __array(type, item, len)
326 352
@@ -328,6 +354,7 @@ ftrace_define_fields_##call(void) \
328#define __dynamic_array(type, item, len) \ 354#define __dynamic_array(type, item, len) \
329 __data_offsets->item = __data_size + \ 355 __data_offsets->item = __data_size + \
330 offsetof(typeof(*entry), __data); \ 356 offsetof(typeof(*entry), __data); \
357 __data_offsets->item |= (len * sizeof(type)) << 16; \
331 __data_size += (len) * sizeof(type); 358 __data_size += (len) * sizeof(type);
332 359
333#undef __string 360#undef __string
@@ -433,13 +460,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
433 * { 460 * {
434 * struct ring_buffer_event *event; 461 * struct ring_buffer_event *event;
435 * struct ftrace_raw_<call> *entry; <-- defined in stage 1 462 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
463 * struct ring_buffer *buffer;
436 * unsigned long irq_flags; 464 * unsigned long irq_flags;
437 * int pc; 465 * int pc;
438 * 466 *
439 * local_save_flags(irq_flags); 467 * local_save_flags(irq_flags);
440 * pc = preempt_count(); 468 * pc = preempt_count();
441 * 469 *
442 * event = trace_current_buffer_lock_reserve(event_<call>.id, 470 * event = trace_current_buffer_lock_reserve(&buffer,
471 * event_<call>.id,
443 * sizeof(struct ftrace_raw_<call>), 472 * sizeof(struct ftrace_raw_<call>),
444 * irq_flags, pc); 473 * irq_flags, pc);
445 * if (!event) 474 * if (!event)
@@ -449,7 +478,7 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
449 * <assign>; <-- Here we assign the entries by the __field and 478 * <assign>; <-- Here we assign the entries by the __field and
450 * __array macros. 479 * __array macros.
451 * 480 *
452 * trace_current_buffer_unlock_commit(event, irq_flags, pc); 481 * trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc);
453 * } 482 * }
454 * 483 *
455 * static int ftrace_raw_reg_event_<call>(void) 484 * static int ftrace_raw_reg_event_<call>(void)
@@ -541,6 +570,7 @@ static void ftrace_raw_event_##call(proto) \
541 struct ftrace_event_call *event_call = &event_##call; \ 570 struct ftrace_event_call *event_call = &event_##call; \
542 struct ring_buffer_event *event; \ 571 struct ring_buffer_event *event; \
543 struct ftrace_raw_##call *entry; \ 572 struct ftrace_raw_##call *entry; \
573 struct ring_buffer *buffer; \
544 unsigned long irq_flags; \ 574 unsigned long irq_flags; \
545 int __data_size; \ 575 int __data_size; \
546 int pc; \ 576 int pc; \
@@ -550,7 +580,8 @@ static void ftrace_raw_event_##call(proto) \
550 \ 580 \
551 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ 581 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
552 \ 582 \
553 event = trace_current_buffer_lock_reserve(event_##call.id, \ 583 event = trace_current_buffer_lock_reserve(&buffer, \
584 event_##call.id, \
554 sizeof(*entry) + __data_size, \ 585 sizeof(*entry) + __data_size, \
555 irq_flags, pc); \ 586 irq_flags, pc); \
556 if (!event) \ 587 if (!event) \
@@ -562,11 +593,12 @@ static void ftrace_raw_event_##call(proto) \
562 \ 593 \
563 { assign; } \ 594 { assign; } \
564 \ 595 \
565 if (!filter_current_check_discard(event_call, entry, event)) \ 596 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
566 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ 597 trace_nowake_buffer_unlock_commit(buffer, \
598 event, irq_flags, pc); \
567} \ 599} \
568 \ 600 \
569static int ftrace_raw_reg_event_##call(void) \ 601static int ftrace_raw_reg_event_##call(void *ptr) \
570{ \ 602{ \
571 int ret; \ 603 int ret; \
572 \ 604 \
@@ -577,7 +609,7 @@ static int ftrace_raw_reg_event_##call(void) \
577 return ret; \ 609 return ret; \
578} \ 610} \
579 \ 611 \
580static void ftrace_raw_unreg_event_##call(void) \ 612static void ftrace_raw_unreg_event_##call(void *ptr) \
581{ \ 613{ \
582 unregister_trace_##call(ftrace_raw_event_##call); \ 614 unregister_trace_##call(ftrace_raw_event_##call); \
583} \ 615} \
@@ -595,7 +627,6 @@ static int ftrace_raw_init_event_##call(void) \
595 return -ENODEV; \ 627 return -ENODEV; \
596 event_##call.id = id; \ 628 event_##call.id = id; \
597 INIT_LIST_HEAD(&event_##call.fields); \ 629 INIT_LIST_HEAD(&event_##call.fields); \
598 init_preds(&event_##call); \
599 return 0; \ 630 return 0; \
600} \ 631} \
601 \ 632 \
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 8cfe515cbc47..5dc283ba5ae0 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -1,8 +1,13 @@
1#ifndef _TRACE_SYSCALL_H 1#ifndef _TRACE_SYSCALL_H
2#define _TRACE_SYSCALL_H 2#define _TRACE_SYSCALL_H
3 3
4#include <linux/tracepoint.h>
5#include <linux/unistd.h>
6#include <linux/ftrace_event.h>
7
4#include <asm/ptrace.h> 8#include <asm/ptrace.h>
5 9
10
6/* 11/*
7 * A syscall entry in the ftrace syscalls array. 12 * A syscall entry in the ftrace syscalls array.
8 * 13 *
@@ -10,26 +15,49 @@
10 * @nb_args: number of parameters it takes 15 * @nb_args: number of parameters it takes
11 * @types: list of types as strings 16 * @types: list of types as strings
12 * @args: list of args as strings (args[i] matches types[i]) 17 * @args: list of args as strings (args[i] matches types[i])
18 * @enter_id: associated ftrace enter event id
19 * @exit_id: associated ftrace exit event id
20 * @enter_event: associated syscall_enter trace event
21 * @exit_event: associated syscall_exit trace event
13 */ 22 */
14struct syscall_metadata { 23struct syscall_metadata {
15 const char *name; 24 const char *name;
16 int nb_args; 25 int nb_args;
17 const char **types; 26 const char **types;
18 const char **args; 27 const char **args;
28 int enter_id;
29 int exit_id;
30
31 struct ftrace_event_call *enter_event;
32 struct ftrace_event_call *exit_event;
19}; 33};
20 34
21#ifdef CONFIG_FTRACE_SYSCALLS 35#ifdef CONFIG_FTRACE_SYSCALLS
22extern void arch_init_ftrace_syscalls(void);
23extern struct syscall_metadata *syscall_nr_to_meta(int nr); 36extern struct syscall_metadata *syscall_nr_to_meta(int nr);
24extern void start_ftrace_syscalls(void); 37extern int syscall_name_to_nr(char *name);
25extern void stop_ftrace_syscalls(void); 38void set_syscall_enter_id(int num, int id);
26extern void ftrace_syscall_enter(struct pt_regs *regs); 39void set_syscall_exit_id(int num, int id);
27extern void ftrace_syscall_exit(struct pt_regs *regs); 40extern struct trace_event event_syscall_enter;
28#else 41extern struct trace_event event_syscall_exit;
29static inline void start_ftrace_syscalls(void) { } 42extern int reg_event_syscall_enter(void *ptr);
30static inline void stop_ftrace_syscalls(void) { } 43extern void unreg_event_syscall_enter(void *ptr);
31static inline void ftrace_syscall_enter(struct pt_regs *regs) { } 44extern int reg_event_syscall_exit(void *ptr);
32static inline void ftrace_syscall_exit(struct pt_regs *regs) { } 45extern void unreg_event_syscall_exit(void *ptr);
46extern int syscall_enter_format(struct ftrace_event_call *call,
47 struct trace_seq *s);
48extern int syscall_exit_format(struct ftrace_event_call *call,
49 struct trace_seq *s);
50extern int syscall_enter_define_fields(struct ftrace_event_call *call);
51extern int syscall_exit_define_fields(struct ftrace_event_call *call);
52enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
53enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
54#endif
55#ifdef CONFIG_EVENT_PROFILE
56int reg_prof_syscall_enter(char *name);
57void unreg_prof_syscall_enter(char *name);
58int reg_prof_syscall_exit(char *name);
59void unreg_prof_syscall_exit(char *name);
60
33#endif 61#endif
34 62
35#endif /* _TRACE_SYSCALL_H */ 63#endif /* _TRACE_SYSCALL_H */
diff --git a/init/Kconfig b/init/Kconfig
index 3f7e60995c80..8e8b76d8a272 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -316,38 +316,28 @@ choice
316 prompt "RCU Implementation" 316 prompt "RCU Implementation"
317 default TREE_RCU 317 default TREE_RCU
318 318
319config CLASSIC_RCU
320 bool "Classic RCU"
321 help
322 This option selects the classic RCU implementation that is
323 designed for best read-side performance on non-realtime
324 systems.
325
326 Select this option if you are unsure.
327
328config TREE_RCU 319config TREE_RCU
329 bool "Tree-based hierarchical RCU" 320 bool "Tree-based hierarchical RCU"
330 help 321 help
331 This option selects the RCU implementation that is 322 This option selects the RCU implementation that is
332 designed for very large SMP system with hundreds or 323 designed for very large SMP system with hundreds or
333 thousands of CPUs. 324 thousands of CPUs. It also scales down nicely to
325 smaller systems.
334 326
335config PREEMPT_RCU 327config TREE_PREEMPT_RCU
336 bool "Preemptible RCU" 328 bool "Preemptable tree-based hierarchical RCU"
337 depends on PREEMPT 329 depends on PREEMPT
338 help 330 help
339 This option reduces the latency of the kernel by making certain 331 This option selects the RCU implementation that is
340 RCU sections preemptible. Normally RCU code is non-preemptible, if 332 designed for very large SMP systems with hundreds or
341 this option is selected then read-only RCU sections become 333 thousands of CPUs, but for which real-time response
342 preemptible. This helps latency, but may expose bugs due to 334 is also required.
343 now-naive assumptions about each RCU read-side critical section
344 remaining on a given CPU through its execution.
345 335
346endchoice 336endchoice
347 337
348config RCU_TRACE 338config RCU_TRACE
349 bool "Enable tracing for RCU" 339 bool "Enable tracing for RCU"
350 depends on TREE_RCU || PREEMPT_RCU 340 depends on TREE_RCU || TREE_PREEMPT_RCU
351 help 341 help
352 This option provides tracing in RCU which presents stats 342 This option provides tracing in RCU which presents stats
353 in debugfs for debugging RCU implementation. 343 in debugfs for debugging RCU implementation.
@@ -359,7 +349,7 @@ config RCU_FANOUT
359 int "Tree-based hierarchical RCU fanout value" 349 int "Tree-based hierarchical RCU fanout value"
360 range 2 64 if 64BIT 350 range 2 64 if 64BIT
361 range 2 32 if !64BIT 351 range 2 32 if !64BIT
362 depends on TREE_RCU 352 depends on TREE_RCU || TREE_PREEMPT_RCU
363 default 64 if 64BIT 353 default 64 if 64BIT
364 default 32 if !64BIT 354 default 32 if !64BIT
365 help 355 help
@@ -374,7 +364,7 @@ config RCU_FANOUT
374 364
375config RCU_FANOUT_EXACT 365config RCU_FANOUT_EXACT
376 bool "Disable tree-based hierarchical RCU auto-balancing" 366 bool "Disable tree-based hierarchical RCU auto-balancing"
377 depends on TREE_RCU 367 depends on TREE_RCU || TREE_PREEMPT_RCU
378 default n 368 default n
379 help 369 help
380 This option forces use of the exact RCU_FANOUT value specified, 370 This option forces use of the exact RCU_FANOUT value specified,
@@ -387,18 +377,12 @@ config RCU_FANOUT_EXACT
387 Say N if unsure. 377 Say N if unsure.
388 378
389config TREE_RCU_TRACE 379config TREE_RCU_TRACE
390 def_bool RCU_TRACE && TREE_RCU 380 def_bool RCU_TRACE && ( TREE_RCU || TREE_PREEMPT_RCU )
391 select DEBUG_FS
392 help
393 This option provides tracing for the TREE_RCU implementation,
394 permitting Makefile to trivially select kernel/rcutree_trace.c.
395
396config PREEMPT_RCU_TRACE
397 def_bool RCU_TRACE && PREEMPT_RCU
398 select DEBUG_FS 381 select DEBUG_FS
399 help 382 help
400 This option provides tracing for the PREEMPT_RCU implementation, 383 This option provides tracing for the TREE_RCU and
401 permitting Makefile to trivially select kernel/rcupreempt_trace.c. 384 TREE_PREEMPT_RCU implementations, permitting Makefile to
385 trivially select kernel/rcutree_trace.c.
402 386
403endmenu # "RCU Subsystem" 387endmenu # "RCU Subsystem"
404 388
diff --git a/init/main.c b/init/main.c
index 11f4f145be3f..b34fd8e5edef 100644
--- a/init/main.c
+++ b/init/main.c
@@ -451,6 +451,7 @@ static noinline void __init_refok rest_init(void)
451{ 451{
452 int pid; 452 int pid;
453 453
454 rcu_scheduler_starting();
454 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); 455 kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
455 numa_default_policy(); 456 numa_default_policy();
456 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 457 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
@@ -462,7 +463,6 @@ static noinline void __init_refok rest_init(void)
462 * at least once to get things moving: 463 * at least once to get things moving:
463 */ 464 */
464 init_idle_bootup_task(current); 465 init_idle_bootup_task(current);
465 rcu_scheduler_starting();
466 preempt_enable_no_resched(); 466 preempt_enable_no_resched();
467 schedule(); 467 schedule();
468 preempt_disable(); 468 preempt_disable();
@@ -631,7 +631,6 @@ asmlinkage void __init start_kernel(void)
631 softirq_init(); 631 softirq_init();
632 timekeeping_init(); 632 timekeeping_init();
633 time_init(); 633 time_init();
634 sched_clock_init();
635 profile_init(); 634 profile_init();
636 if (!irqs_disabled()) 635 if (!irqs_disabled())
637 printk(KERN_CRIT "start_kernel(): bug: interrupts were " 636 printk(KERN_CRIT "start_kernel(): bug: interrupts were "
@@ -682,6 +681,7 @@ asmlinkage void __init start_kernel(void)
682 numa_policy_init(); 681 numa_policy_init();
683 if (late_time_init) 682 if (late_time_init)
684 late_time_init(); 683 late_time_init();
684 sched_clock_init();
685 calibrate_delay(); 685 calibrate_delay();
686 pidmap_init(); 686 pidmap_init();
687 anon_vma_init(); 687 anon_vma_init();
diff --git a/kernel/Makefile b/kernel/Makefile
index 2093a691f1c2..b833bd5cc127 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -80,11 +80,9 @@ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
80obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ 80obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
81obj-$(CONFIG_SECCOMP) += seccomp.o 81obj-$(CONFIG_SECCOMP) += seccomp.o
82obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 82obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
83obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
84obj-$(CONFIG_TREE_RCU) += rcutree.o 83obj-$(CONFIG_TREE_RCU) += rcutree.o
85obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o 84obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
86obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o 85obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
87obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
88obj-$(CONFIG_RELAY) += relay.o 86obj-$(CONFIG_RELAY) += relay.o
89obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 87obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
90obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 88obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 9f3391090b3e..9a4715a2f6bf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -491,13 +491,17 @@ static void do_acct_process(struct bsd_acct_struct *acct,
491 u64 run_time; 491 u64 run_time;
492 struct timespec uptime; 492 struct timespec uptime;
493 struct tty_struct *tty; 493 struct tty_struct *tty;
494 const struct cred *orig_cred;
495
496 /* Perform file operations on behalf of whoever enabled accounting */
497 orig_cred = override_creds(file->f_cred);
494 498
495 /* 499 /*
496 * First check to see if there is enough free_space to continue 500 * First check to see if there is enough free_space to continue
497 * the process accounting system. 501 * the process accounting system.
498 */ 502 */
499 if (!check_free_space(acct, file)) 503 if (!check_free_space(acct, file))
500 return; 504 goto out;
501 505
502 /* 506 /*
503 * Fill the accounting struct with the needed info as recorded 507 * Fill the accounting struct with the needed info as recorded
@@ -578,6 +582,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
578 sizeof(acct_t), &file->f_pos); 582 sizeof(acct_t), &file->f_pos);
579 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; 583 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
580 set_fs(fs); 584 set_fs(fs);
585out:
586 revert_creds(orig_cred);
581} 587}
582 588
583/** 589/**
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b6eadfe30e7b..c7ece8f027f2 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -600,6 +600,7 @@ static struct inode_operations cgroup_dir_inode_operations;
600static struct file_operations proc_cgroupstats_operations; 600static struct file_operations proc_cgroupstats_operations;
601 601
602static struct backing_dev_info cgroup_backing_dev_info = { 602static struct backing_dev_info cgroup_backing_dev_info = {
603 .name = "cgroup",
603 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 604 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
604}; 605};
605 606
diff --git a/kernel/cred.c b/kernel/cred.c
index 1bb4d7e5d616..006fcab009d5 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -18,6 +18,18 @@
18#include <linux/cn_proc.h> 18#include <linux/cn_proc.h>
19#include "cred-internals.h" 19#include "cred-internals.h"
20 20
21#if 0
22#define kdebug(FMT, ...) \
23 printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
24#else
25static inline __attribute__((format(printf, 1, 2)))
26void no_printk(const char *fmt, ...)
27{
28}
29#define kdebug(FMT, ...) \
30 no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
31#endif
32
21static struct kmem_cache *cred_jar; 33static struct kmem_cache *cred_jar;
22 34
23/* 35/*
@@ -36,6 +48,10 @@ static struct thread_group_cred init_tgcred = {
36 */ 48 */
37struct cred init_cred = { 49struct cred init_cred = {
38 .usage = ATOMIC_INIT(4), 50 .usage = ATOMIC_INIT(4),
51#ifdef CONFIG_DEBUG_CREDENTIALS
52 .subscribers = ATOMIC_INIT(2),
53 .magic = CRED_MAGIC,
54#endif
39 .securebits = SECUREBITS_DEFAULT, 55 .securebits = SECUREBITS_DEFAULT,
40 .cap_inheritable = CAP_INIT_INH_SET, 56 .cap_inheritable = CAP_INIT_INH_SET,
41 .cap_permitted = CAP_FULL_SET, 57 .cap_permitted = CAP_FULL_SET,
@@ -48,6 +64,31 @@ struct cred init_cred = {
48#endif 64#endif
49}; 65};
50 66
67static inline void set_cred_subscribers(struct cred *cred, int n)
68{
69#ifdef CONFIG_DEBUG_CREDENTIALS
70 atomic_set(&cred->subscribers, n);
71#endif
72}
73
74static inline int read_cred_subscribers(const struct cred *cred)
75{
76#ifdef CONFIG_DEBUG_CREDENTIALS
77 return atomic_read(&cred->subscribers);
78#else
79 return 0;
80#endif
81}
82
83static inline void alter_cred_subscribers(const struct cred *_cred, int n)
84{
85#ifdef CONFIG_DEBUG_CREDENTIALS
86 struct cred *cred = (struct cred *) _cred;
87
88 atomic_add(n, &cred->subscribers);
89#endif
90}
91
51/* 92/*
52 * Dispose of the shared task group credentials 93 * Dispose of the shared task group credentials
53 */ 94 */
@@ -85,9 +126,22 @@ static void put_cred_rcu(struct rcu_head *rcu)
85{ 126{
86 struct cred *cred = container_of(rcu, struct cred, rcu); 127 struct cred *cred = container_of(rcu, struct cred, rcu);
87 128
129 kdebug("put_cred_rcu(%p)", cred);
130
131#ifdef CONFIG_DEBUG_CREDENTIALS
132 if (cred->magic != CRED_MAGIC_DEAD ||
133 atomic_read(&cred->usage) != 0 ||
134 read_cred_subscribers(cred) != 0)
135 panic("CRED: put_cred_rcu() sees %p with"
136 " mag %x, put %p, usage %d, subscr %d\n",
137 cred, cred->magic, cred->put_addr,
138 atomic_read(&cred->usage),
139 read_cred_subscribers(cred));
140#else
88 if (atomic_read(&cred->usage) != 0) 141 if (atomic_read(&cred->usage) != 0)
89 panic("CRED: put_cred_rcu() sees %p with usage %d\n", 142 panic("CRED: put_cred_rcu() sees %p with usage %d\n",
90 cred, atomic_read(&cred->usage)); 143 cred, atomic_read(&cred->usage));
144#endif
91 145
92 security_cred_free(cred); 146 security_cred_free(cred);
93 key_put(cred->thread_keyring); 147 key_put(cred->thread_keyring);
@@ -106,12 +160,90 @@ static void put_cred_rcu(struct rcu_head *rcu)
106 */ 160 */
107void __put_cred(struct cred *cred) 161void __put_cred(struct cred *cred)
108{ 162{
163 kdebug("__put_cred(%p{%d,%d})", cred,
164 atomic_read(&cred->usage),
165 read_cred_subscribers(cred));
166
109 BUG_ON(atomic_read(&cred->usage) != 0); 167 BUG_ON(atomic_read(&cred->usage) != 0);
168#ifdef CONFIG_DEBUG_CREDENTIALS
169 BUG_ON(read_cred_subscribers(cred) != 0);
170 cred->magic = CRED_MAGIC_DEAD;
171 cred->put_addr = __builtin_return_address(0);
172#endif
173 BUG_ON(cred == current->cred);
174 BUG_ON(cred == current->real_cred);
110 175
111 call_rcu(&cred->rcu, put_cred_rcu); 176 call_rcu(&cred->rcu, put_cred_rcu);
112} 177}
113EXPORT_SYMBOL(__put_cred); 178EXPORT_SYMBOL(__put_cred);
114 179
180/*
181 * Clean up a task's credentials when it exits
182 */
183void exit_creds(struct task_struct *tsk)
184{
185 struct cred *cred;
186
187 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
188 atomic_read(&tsk->cred->usage),
189 read_cred_subscribers(tsk->cred));
190
191 cred = (struct cred *) tsk->real_cred;
192 tsk->real_cred = NULL;
193 validate_creds(cred);
194 alter_cred_subscribers(cred, -1);
195 put_cred(cred);
196
197 cred = (struct cred *) tsk->cred;
198 tsk->cred = NULL;
199 validate_creds(cred);
200 alter_cred_subscribers(cred, -1);
201 put_cred(cred);
202
203 cred = (struct cred *) tsk->replacement_session_keyring;
204 if (cred) {
205 tsk->replacement_session_keyring = NULL;
206 validate_creds(cred);
207 put_cred(cred);
208 }
209}
210
211/*
212 * Allocate blank credentials, such that the credentials can be filled in at a
213 * later date without risk of ENOMEM.
214 */
215struct cred *cred_alloc_blank(void)
216{
217 struct cred *new;
218
219 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
220 if (!new)
221 return NULL;
222
223#ifdef CONFIG_KEYS
224 new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL);
225 if (!new->tgcred) {
226 kfree(new);
227 return NULL;
228 }
229 atomic_set(&new->tgcred->usage, 1);
230#endif
231
232 atomic_set(&new->usage, 1);
233
234 if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
235 goto error;
236
237#ifdef CONFIG_DEBUG_CREDENTIALS
238 new->magic = CRED_MAGIC;
239#endif
240 return new;
241
242error:
243 abort_creds(new);
244 return NULL;
245}
246
115/** 247/**
116 * prepare_creds - Prepare a new set of credentials for modification 248 * prepare_creds - Prepare a new set of credentials for modification
117 * 249 *
@@ -132,16 +264,19 @@ struct cred *prepare_creds(void)
132 const struct cred *old; 264 const struct cred *old;
133 struct cred *new; 265 struct cred *new;
134 266
135 BUG_ON(atomic_read(&task->real_cred->usage) < 1); 267 validate_process_creds();
136 268
137 new = kmem_cache_alloc(cred_jar, GFP_KERNEL); 269 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
138 if (!new) 270 if (!new)
139 return NULL; 271 return NULL;
140 272
273 kdebug("prepare_creds() alloc %p", new);
274
141 old = task->cred; 275 old = task->cred;
142 memcpy(new, old, sizeof(struct cred)); 276 memcpy(new, old, sizeof(struct cred));
143 277
144 atomic_set(&new->usage, 1); 278 atomic_set(&new->usage, 1);
279 set_cred_subscribers(new, 0);
145 get_group_info(new->group_info); 280 get_group_info(new->group_info);
146 get_uid(new->user); 281 get_uid(new->user);
147 282
@@ -157,6 +292,7 @@ struct cred *prepare_creds(void)
157 292
158 if (security_prepare_creds(new, old, GFP_KERNEL) < 0) 293 if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
159 goto error; 294 goto error;
295 validate_creds(new);
160 return new; 296 return new;
161 297
162error: 298error:
@@ -229,9 +365,12 @@ struct cred *prepare_usermodehelper_creds(void)
229 if (!new) 365 if (!new)
230 return NULL; 366 return NULL;
231 367
368 kdebug("prepare_usermodehelper_creds() alloc %p", new);
369
232 memcpy(new, &init_cred, sizeof(struct cred)); 370 memcpy(new, &init_cred, sizeof(struct cred));
233 371
234 atomic_set(&new->usage, 1); 372 atomic_set(&new->usage, 1);
373 set_cred_subscribers(new, 0);
235 get_group_info(new->group_info); 374 get_group_info(new->group_info);
236 get_uid(new->user); 375 get_uid(new->user);
237 376
@@ -250,6 +389,7 @@ struct cred *prepare_usermodehelper_creds(void)
250#endif 389#endif
251 if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) 390 if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0)
252 goto error; 391 goto error;
392 validate_creds(new);
253 393
254 BUG_ON(atomic_read(&new->usage) != 1); 394 BUG_ON(atomic_read(&new->usage) != 1);
255 return new; 395 return new;
@@ -286,6 +426,10 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
286 ) { 426 ) {
287 p->real_cred = get_cred(p->cred); 427 p->real_cred = get_cred(p->cred);
288 get_cred(p->cred); 428 get_cred(p->cred);
429 alter_cred_subscribers(p->cred, 2);
430 kdebug("share_creds(%p{%d,%d})",
431 p->cred, atomic_read(&p->cred->usage),
432 read_cred_subscribers(p->cred));
289 atomic_inc(&p->cred->user->processes); 433 atomic_inc(&p->cred->user->processes);
290 return 0; 434 return 0;
291 } 435 }
@@ -331,6 +475,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
331 475
332 atomic_inc(&new->user->processes); 476 atomic_inc(&new->user->processes);
333 p->cred = p->real_cred = get_cred(new); 477 p->cred = p->real_cred = get_cred(new);
478 alter_cred_subscribers(new, 2);
479 validate_creds(new);
334 return 0; 480 return 0;
335 481
336error_put: 482error_put:
@@ -355,13 +501,20 @@ error_put:
355int commit_creds(struct cred *new) 501int commit_creds(struct cred *new)
356{ 502{
357 struct task_struct *task = current; 503 struct task_struct *task = current;
358 const struct cred *old; 504 const struct cred *old = task->real_cred;
359 505
360 BUG_ON(task->cred != task->real_cred); 506 kdebug("commit_creds(%p{%d,%d})", new,
361 BUG_ON(atomic_read(&task->real_cred->usage) < 2); 507 atomic_read(&new->usage),
508 read_cred_subscribers(new));
509
510 BUG_ON(task->cred != old);
511#ifdef CONFIG_DEBUG_CREDENTIALS
512 BUG_ON(read_cred_subscribers(old) < 2);
513 validate_creds(old);
514 validate_creds(new);
515#endif
362 BUG_ON(atomic_read(&new->usage) < 1); 516 BUG_ON(atomic_read(&new->usage) < 1);
363 517
364 old = task->real_cred;
365 security_commit_creds(new, old); 518 security_commit_creds(new, old);
366 519
367 get_cred(new); /* we will require a ref for the subj creds too */ 520 get_cred(new); /* we will require a ref for the subj creds too */
@@ -390,12 +543,14 @@ int commit_creds(struct cred *new)
390 * cheaply with the new uid cache, so if it matters 543 * cheaply with the new uid cache, so if it matters
391 * we should be checking for it. -DaveM 544 * we should be checking for it. -DaveM
392 */ 545 */
546 alter_cred_subscribers(new, 2);
393 if (new->user != old->user) 547 if (new->user != old->user)
394 atomic_inc(&new->user->processes); 548 atomic_inc(&new->user->processes);
395 rcu_assign_pointer(task->real_cred, new); 549 rcu_assign_pointer(task->real_cred, new);
396 rcu_assign_pointer(task->cred, new); 550 rcu_assign_pointer(task->cred, new);
397 if (new->user != old->user) 551 if (new->user != old->user)
398 atomic_dec(&old->user->processes); 552 atomic_dec(&old->user->processes);
553 alter_cred_subscribers(old, -2);
399 554
400 sched_switch_user(task); 555 sched_switch_user(task);
401 556
@@ -428,6 +583,13 @@ EXPORT_SYMBOL(commit_creds);
428 */ 583 */
429void abort_creds(struct cred *new) 584void abort_creds(struct cred *new)
430{ 585{
586 kdebug("abort_creds(%p{%d,%d})", new,
587 atomic_read(&new->usage),
588 read_cred_subscribers(new));
589
590#ifdef CONFIG_DEBUG_CREDENTIALS
591 BUG_ON(read_cred_subscribers(new) != 0);
592#endif
431 BUG_ON(atomic_read(&new->usage) < 1); 593 BUG_ON(atomic_read(&new->usage) < 1);
432 put_cred(new); 594 put_cred(new);
433} 595}
@@ -444,7 +606,20 @@ const struct cred *override_creds(const struct cred *new)
444{ 606{
445 const struct cred *old = current->cred; 607 const struct cred *old = current->cred;
446 608
447 rcu_assign_pointer(current->cred, get_cred(new)); 609 kdebug("override_creds(%p{%d,%d})", new,
610 atomic_read(&new->usage),
611 read_cred_subscribers(new));
612
613 validate_creds(old);
614 validate_creds(new);
615 get_cred(new);
616 alter_cred_subscribers(new, 1);
617 rcu_assign_pointer(current->cred, new);
618 alter_cred_subscribers(old, -1);
619
620 kdebug("override_creds() = %p{%d,%d}", old,
621 atomic_read(&old->usage),
622 read_cred_subscribers(old));
448 return old; 623 return old;
449} 624}
450EXPORT_SYMBOL(override_creds); 625EXPORT_SYMBOL(override_creds);
@@ -460,7 +635,15 @@ void revert_creds(const struct cred *old)
460{ 635{
461 const struct cred *override = current->cred; 636 const struct cred *override = current->cred;
462 637
638 kdebug("revert_creds(%p{%d,%d})", old,
639 atomic_read(&old->usage),
640 read_cred_subscribers(old));
641
642 validate_creds(old);
643 validate_creds(override);
644 alter_cred_subscribers(old, 1);
463 rcu_assign_pointer(current->cred, old); 645 rcu_assign_pointer(current->cred, old);
646 alter_cred_subscribers(override, -1);
464 put_cred(override); 647 put_cred(override);
465} 648}
466EXPORT_SYMBOL(revert_creds); 649EXPORT_SYMBOL(revert_creds);
@@ -502,11 +685,15 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
502 if (!new) 685 if (!new)
503 return NULL; 686 return NULL;
504 687
688 kdebug("prepare_kernel_cred() alloc %p", new);
689
505 if (daemon) 690 if (daemon)
506 old = get_task_cred(daemon); 691 old = get_task_cred(daemon);
507 else 692 else
508 old = get_cred(&init_cred); 693 old = get_cred(&init_cred);
509 694
695 validate_creds(old);
696
510 *new = *old; 697 *new = *old;
511 get_uid(new->user); 698 get_uid(new->user);
512 get_group_info(new->group_info); 699 get_group_info(new->group_info);
@@ -526,7 +713,9 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
526 goto error; 713 goto error;
527 714
528 atomic_set(&new->usage, 1); 715 atomic_set(&new->usage, 1);
716 set_cred_subscribers(new, 0);
529 put_cred(old); 717 put_cred(old);
718 validate_creds(new);
530 return new; 719 return new;
531 720
532error: 721error:
@@ -589,3 +778,95 @@ int set_create_files_as(struct cred *new, struct inode *inode)
589 return security_kernel_create_files_as(new, inode); 778 return security_kernel_create_files_as(new, inode);
590} 779}
591EXPORT_SYMBOL(set_create_files_as); 780EXPORT_SYMBOL(set_create_files_as);
781
782#ifdef CONFIG_DEBUG_CREDENTIALS
783
784/*
785 * dump invalid credentials
786 */
787static void dump_invalid_creds(const struct cred *cred, const char *label,
788 const struct task_struct *tsk)
789{
790 printk(KERN_ERR "CRED: %s credentials: %p %s%s%s\n",
791 label, cred,
792 cred == &init_cred ? "[init]" : "",
793 cred == tsk->real_cred ? "[real]" : "",
794 cred == tsk->cred ? "[eff]" : "");
795 printk(KERN_ERR "CRED: ->magic=%x, put_addr=%p\n",
796 cred->magic, cred->put_addr);
797 printk(KERN_ERR "CRED: ->usage=%d, subscr=%d\n",
798 atomic_read(&cred->usage),
799 read_cred_subscribers(cred));
800 printk(KERN_ERR "CRED: ->*uid = { %d,%d,%d,%d }\n",
801 cred->uid, cred->euid, cred->suid, cred->fsuid);
802 printk(KERN_ERR "CRED: ->*gid = { %d,%d,%d,%d }\n",
803 cred->gid, cred->egid, cred->sgid, cred->fsgid);
804#ifdef CONFIG_SECURITY
805 printk(KERN_ERR "CRED: ->security is %p\n", cred->security);
806 if ((unsigned long) cred->security >= PAGE_SIZE &&
807 (((unsigned long) cred->security & 0xffffff00) !=
808 (POISON_FREE << 24 | POISON_FREE << 16 | POISON_FREE << 8)))
809 printk(KERN_ERR "CRED: ->security {%x, %x}\n",
810 ((u32*)cred->security)[0],
811 ((u32*)cred->security)[1]);
812#endif
813}
814
815/*
816 * report use of invalid credentials
817 */
818void __invalid_creds(const struct cred *cred, const char *file, unsigned line)
819{
820 printk(KERN_ERR "CRED: Invalid credentials\n");
821 printk(KERN_ERR "CRED: At %s:%u\n", file, line);
822 dump_invalid_creds(cred, "Specified", current);
823 BUG();
824}
825EXPORT_SYMBOL(__invalid_creds);
826
827/*
828 * check the credentials on a process
829 */
830void __validate_process_creds(struct task_struct *tsk,
831 const char *file, unsigned line)
832{
833 if (tsk->cred == tsk->real_cred) {
834 if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
835 creds_are_invalid(tsk->cred)))
836 goto invalid_creds;
837 } else {
838 if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
839 read_cred_subscribers(tsk->cred) < 1 ||
840 creds_are_invalid(tsk->real_cred) ||
841 creds_are_invalid(tsk->cred)))
842 goto invalid_creds;
843 }
844 return;
845
846invalid_creds:
847 printk(KERN_ERR "CRED: Invalid process credentials\n");
848 printk(KERN_ERR "CRED: At %s:%u\n", file, line);
849
850 dump_invalid_creds(tsk->real_cred, "Real", tsk);
851 if (tsk->cred != tsk->real_cred)
852 dump_invalid_creds(tsk->cred, "Effective", tsk);
853 else
854 printk(KERN_ERR "CRED: Effective creds == Real creds\n");
855 BUG();
856}
857EXPORT_SYMBOL(__validate_process_creds);
858
859/*
860 * check creds for do_exit()
861 */
862void validate_creds_for_do_exit(struct task_struct *tsk)
863{
864 kdebug("validate_creds_for_do_exit(%p,%p{%d,%d})",
865 tsk->real_cred, tsk->cred,
866 atomic_read(&tsk->cred->usage),
867 read_cred_subscribers(tsk->cred));
868
869 __validate_process_creds(tsk, __FILE__, __LINE__);
870}
871
872#endif /* CONFIG_DEBUG_CREDENTIALS */
diff --git a/kernel/exit.c b/kernel/exit.c
index 869dc221733e..ae5d8660ddff 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -901,6 +901,8 @@ NORET_TYPE void do_exit(long code)
901 901
902 tracehook_report_exit(&code); 902 tracehook_report_exit(&code);
903 903
904 validate_creds_for_do_exit(tsk);
905
904 /* 906 /*
905 * We're taking recursive faults here in do_exit. Safest is to just 907 * We're taking recursive faults here in do_exit. Safest is to just
906 * leave this task alone and wait for reboot. 908 * leave this task alone and wait for reboot.
@@ -1009,7 +1011,10 @@ NORET_TYPE void do_exit(long code)
1009 if (tsk->splice_pipe) 1011 if (tsk->splice_pipe)
1010 __free_pipe_info(tsk->splice_pipe); 1012 __free_pipe_info(tsk->splice_pipe);
1011 1013
1014 validate_creds_for_do_exit(tsk);
1015
1012 preempt_disable(); 1016 preempt_disable();
1017 exit_rcu();
1013 /* causes final put_task_struct in finish_task_switch(). */ 1018 /* causes final put_task_struct in finish_task_switch(). */
1014 tsk->state = TASK_DEAD; 1019 tsk->state = TASK_DEAD;
1015 schedule(); 1020 schedule();
diff --git a/kernel/fork.c b/kernel/fork.c
index e6c04d462ab2..bfee931ee3fb 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -152,8 +152,7 @@ void __put_task_struct(struct task_struct *tsk)
152 WARN_ON(atomic_read(&tsk->usage)); 152 WARN_ON(atomic_read(&tsk->usage));
153 WARN_ON(tsk == current); 153 WARN_ON(tsk == current);
154 154
155 put_cred(tsk->real_cred); 155 exit_creds(tsk);
156 put_cred(tsk->cred);
157 delayacct_tsk_free(tsk); 156 delayacct_tsk_free(tsk);
158 157
159 if (!profile_handoff_task(tsk)) 158 if (!profile_handoff_task(tsk))
@@ -1008,10 +1007,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1008 copy_flags(clone_flags, p); 1007 copy_flags(clone_flags, p);
1009 INIT_LIST_HEAD(&p->children); 1008 INIT_LIST_HEAD(&p->children);
1010 INIT_LIST_HEAD(&p->sibling); 1009 INIT_LIST_HEAD(&p->sibling);
1011#ifdef CONFIG_PREEMPT_RCU 1010 rcu_copy_process(p);
1012 p->rcu_read_lock_nesting = 0;
1013 p->rcu_flipctr_idx = 0;
1014#endif /* #ifdef CONFIG_PREEMPT_RCU */
1015 p->vfork_done = NULL; 1011 p->vfork_done = NULL;
1016 spin_lock_init(&p->alloc_lock); 1012 spin_lock_init(&p->alloc_lock);
1017 1013
@@ -1297,8 +1293,7 @@ bad_fork_cleanup_put_domain:
1297 module_put(task_thread_info(p)->exec_domain->module); 1293 module_put(task_thread_info(p)->exec_domain->module);
1298bad_fork_cleanup_count: 1294bad_fork_cleanup_count:
1299 atomic_dec(&p->cred->user->processes); 1295 atomic_dec(&p->cred->user->processes);
1300 put_cred(p->real_cred); 1296 exit_creds(p);
1301 put_cred(p->cred);
1302bad_fork_free: 1297bad_fork_free:
1303 free_task(p); 1298 free_task(p);
1304fork_out: 1299fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index e18cfbdc7190..248dd119a86e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -115,6 +115,9 @@ struct futex_q {
115 /* rt_waiter storage for requeue_pi: */ 115 /* rt_waiter storage for requeue_pi: */
116 struct rt_mutex_waiter *rt_waiter; 116 struct rt_mutex_waiter *rt_waiter;
117 117
118 /* The expected requeue pi target futex key: */
119 union futex_key *requeue_pi_key;
120
118 /* Bitset for the optional bitmasked wakeup */ 121 /* Bitset for the optional bitmasked wakeup */
119 u32 bitset; 122 u32 bitset;
120}; 123};
@@ -1089,6 +1092,10 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1089 if (!top_waiter) 1092 if (!top_waiter)
1090 return 0; 1093 return 0;
1091 1094
1095 /* Ensure we requeue to the expected futex. */
1096 if (!match_futex(top_waiter->requeue_pi_key, key2))
1097 return -EINVAL;
1098
1092 /* 1099 /*
1093 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in 1100 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1094 * the contended case or if set_waiters is 1. The pi_state is returned 1101 * the contended case or if set_waiters is 1. The pi_state is returned
@@ -1276,6 +1283,12 @@ retry_private:
1276 continue; 1283 continue;
1277 } 1284 }
1278 1285
1286 /* Ensure we requeue to the expected futex for requeue_pi. */
1287 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1288 ret = -EINVAL;
1289 break;
1290 }
1291
1279 /* 1292 /*
1280 * Requeue nr_requeue waiters and possibly one more in the case 1293 * Requeue nr_requeue waiters and possibly one more in the case
1281 * of requeue_pi if we couldn't acquire the lock atomically. 1294 * of requeue_pi if we couldn't acquire the lock atomically.
@@ -1751,6 +1764,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1751 q.pi_state = NULL; 1764 q.pi_state = NULL;
1752 q.bitset = bitset; 1765 q.bitset = bitset;
1753 q.rt_waiter = NULL; 1766 q.rt_waiter = NULL;
1767 q.requeue_pi_key = NULL;
1754 1768
1755 if (abs_time) { 1769 if (abs_time) {
1756 to = &timeout; 1770 to = &timeout;
@@ -1858,6 +1872,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1858 1872
1859 q.pi_state = NULL; 1873 q.pi_state = NULL;
1860 q.rt_waiter = NULL; 1874 q.rt_waiter = NULL;
1875 q.requeue_pi_key = NULL;
1861retry: 1876retry:
1862 q.key = FUTEX_KEY_INIT; 1877 q.key = FUTEX_KEY_INIT;
1863 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); 1878 ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
@@ -2118,11 +2133,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2118 * We call schedule in futex_wait_queue_me() when we enqueue and return there 2133 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2119 * via the following: 2134 * via the following:
2120 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 2135 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2121 * 2) wakeup on uaddr2 after a requeue and subsequent unlock 2136 * 2) wakeup on uaddr2 after a requeue
2122 * 3) signal (before or after requeue) 2137 * 3) signal
2123 * 4) timeout (before or after requeue) 2138 * 4) timeout
2124 * 2139 *
2125 * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function. 2140 * If 3, cleanup and return -ERESTARTNOINTR.
2126 * 2141 *
2127 * If 2, we may then block on trying to take the rt_mutex and return via: 2142 * If 2, we may then block on trying to take the rt_mutex and return via:
2128 * 5) successful lock 2143 * 5) successful lock
@@ -2130,7 +2145,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2130 * 7) timeout 2145 * 7) timeout
2131 * 8) other lock acquisition failure 2146 * 8) other lock acquisition failure
2132 * 2147 *
2133 * If 6, we setup a restart_block with futex_lock_pi() as the function. 2148 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2134 * 2149 *
2135 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 2150 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2136 * 2151 *
@@ -2169,15 +2184,16 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2169 debug_rt_mutex_init_waiter(&rt_waiter); 2184 debug_rt_mutex_init_waiter(&rt_waiter);
2170 rt_waiter.task = NULL; 2185 rt_waiter.task = NULL;
2171 2186
2172 q.pi_state = NULL;
2173 q.bitset = bitset;
2174 q.rt_waiter = &rt_waiter;
2175
2176 key2 = FUTEX_KEY_INIT; 2187 key2 = FUTEX_KEY_INIT;
2177 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); 2188 ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
2178 if (unlikely(ret != 0)) 2189 if (unlikely(ret != 0))
2179 goto out; 2190 goto out;
2180 2191
2192 q.pi_state = NULL;
2193 q.bitset = bitset;
2194 q.rt_waiter = &rt_waiter;
2195 q.requeue_pi_key = &key2;
2196
2181 /* Prepare to wait on uaddr. */ 2197 /* Prepare to wait on uaddr. */
2182 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); 2198 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
2183 if (ret) 2199 if (ret)
@@ -2248,14 +2264,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2248 rt_mutex_unlock(pi_mutex); 2264 rt_mutex_unlock(pi_mutex);
2249 } else if (ret == -EINTR) { 2265 } else if (ret == -EINTR) {
2250 /* 2266 /*
2251 * We've already been requeued, but we have no way to 2267 * We've already been requeued, but cannot restart by calling
2252 * restart by calling futex_lock_pi() directly. We 2268 * futex_lock_pi() directly. We could restart this syscall, but
2253 * could restart the syscall, but that will look at 2269 * it would detect that the user space "val" changed and return
2254 * the user space value and return right away. So we 2270 * -EWOULDBLOCK. Save the overhead of the restart and return
2255 * drop back with EWOULDBLOCK to tell user space that 2271 * -EWOULDBLOCK directly.
2256 * "val" has been changed. That's the same what the
2257 * restart of the syscall would do in
2258 * futex_wait_setup().
2259 */ 2272 */
2260 ret = -EWOULDBLOCK; 2273 ret = -EWOULDBLOCK;
2261 } 2274 }
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 13c68e71b726..c1660194d115 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -222,6 +222,34 @@ int set_irq_chip_data(unsigned int irq, void *data)
222} 222}
223EXPORT_SYMBOL(set_irq_chip_data); 223EXPORT_SYMBOL(set_irq_chip_data);
224 224
225/**
226 * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
227 *
228 * @irq: Interrupt number
229 * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag
230 *
231 * The IRQ_NESTED_THREAD flag indicates that on
232 * request_threaded_irq() no separate interrupt thread should be
233 * created for the irq as the handler are called nested in the
234 * context of a demultiplexing interrupt handler thread.
235 */
236void set_irq_nested_thread(unsigned int irq, int nest)
237{
238 struct irq_desc *desc = irq_to_desc(irq);
239 unsigned long flags;
240
241 if (!desc)
242 return;
243
244 spin_lock_irqsave(&desc->lock, flags);
245 if (nest)
246 desc->status |= IRQ_NESTED_THREAD;
247 else
248 desc->status &= ~IRQ_NESTED_THREAD;
249 spin_unlock_irqrestore(&desc->lock, flags);
250}
251EXPORT_SYMBOL_GPL(set_irq_nested_thread);
252
225/* 253/*
226 * default enable function 254 * default enable function
227 */ 255 */
@@ -299,6 +327,45 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
299 } 327 }
300} 328}
301 329
330/*
331 * handle_nested_irq - Handle a nested irq from a irq thread
332 * @irq: the interrupt number
333 *
334 * Handle interrupts which are nested into a threaded interrupt
335 * handler. The handler function is called inside the calling
336 * threads context.
337 */
338void handle_nested_irq(unsigned int irq)
339{
340 struct irq_desc *desc = irq_to_desc(irq);
341 struct irqaction *action;
342 irqreturn_t action_ret;
343
344 might_sleep();
345
346 spin_lock_irq(&desc->lock);
347
348 kstat_incr_irqs_this_cpu(irq, desc);
349
350 action = desc->action;
351 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
352 goto out_unlock;
353
354 desc->status |= IRQ_INPROGRESS;
355 spin_unlock_irq(&desc->lock);
356
357 action_ret = action->thread_fn(action->irq, action->dev_id);
358 if (!noirqdebug)
359 note_interrupt(irq, desc, action_ret);
360
361 spin_lock_irq(&desc->lock);
362 desc->status &= ~IRQ_INPROGRESS;
363
364out_unlock:
365 spin_unlock_irq(&desc->lock);
366}
367EXPORT_SYMBOL_GPL(handle_nested_irq);
368
302/** 369/**
303 * handle_simple_irq - Simple and software-decoded IRQs. 370 * handle_simple_irq - Simple and software-decoded IRQs.
304 * @irq: the interrupt number 371 * @irq: the interrupt number
@@ -382,7 +449,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
382 449
383 spin_lock(&desc->lock); 450 spin_lock(&desc->lock);
384 desc->status &= ~IRQ_INPROGRESS; 451 desc->status &= ~IRQ_INPROGRESS;
385 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 452
453 if (unlikely(desc->status & IRQ_ONESHOT))
454 desc->status |= IRQ_MASKED;
455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
386 desc->chip->unmask(irq); 456 desc->chip->unmask(irq);
387out_unlock: 457out_unlock:
388 spin_unlock(&desc->lock); 458 spin_unlock(&desc->lock);
@@ -572,6 +642,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
572 desc->chip = &dummy_irq_chip; 642 desc->chip = &dummy_irq_chip;
573 } 643 }
574 644
645 chip_bus_lock(irq, desc);
575 spin_lock_irqsave(&desc->lock, flags); 646 spin_lock_irqsave(&desc->lock, flags);
576 647
577 /* Uninstall? */ 648 /* Uninstall? */
@@ -591,6 +662,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
591 desc->chip->startup(irq); 662 desc->chip->startup(irq);
592 } 663 }
593 spin_unlock_irqrestore(&desc->lock, flags); 664 spin_unlock_irqrestore(&desc->lock, flags);
665 chip_bus_sync_unlock(irq, desc);
594} 666}
595EXPORT_SYMBOL_GPL(__set_irq_handler); 667EXPORT_SYMBOL_GPL(__set_irq_handler);
596 668
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 065205bdd920..a81cf80554db 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -161,7 +161,7 @@ int __init early_irq_init(void)
161 161
162 desc = irq_desc_legacy; 162 desc = irq_desc_legacy;
163 legacy_count = ARRAY_SIZE(irq_desc_legacy); 163 legacy_count = ARRAY_SIZE(irq_desc_legacy);
164 node = first_online_node; 164 node = first_online_node;
165 165
166 /* allocate irq_desc_ptrs array based on nr_irqs */ 166 /* allocate irq_desc_ptrs array based on nr_irqs */
167 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); 167 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
@@ -172,6 +172,9 @@ int __init early_irq_init(void)
172 172
173 for (i = 0; i < legacy_count; i++) { 173 for (i = 0; i < legacy_count; i++) {
174 desc[i].irq = i; 174 desc[i].irq = i;
175#ifdef CONFIG_SMP
176 desc[i].node = node;
177#endif
175 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 178 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
176 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 179 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
177 alloc_desc_masks(&desc[i], node, true); 180 alloc_desc_masks(&desc[i], node, true);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e70ed5592eb9..1b5d742c6a77 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -44,6 +44,19 @@ extern int irq_select_affinity_usr(unsigned int irq);
44 44
45extern void irq_set_thread_affinity(struct irq_desc *desc); 45extern void irq_set_thread_affinity(struct irq_desc *desc);
46 46
47/* Inline functions for support of irq chips on slow busses */
48static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
49{
50 if (unlikely(desc->chip->bus_lock))
51 desc->chip->bus_lock(irq);
52}
53
54static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
55{
56 if (unlikely(desc->chip->bus_sync_unlock))
57 desc->chip->bus_sync_unlock(irq);
58}
59
47/* 60/*
48 * Debugging printout: 61 * Debugging printout:
49 */ 62 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0ec9ed831737..bde4c667d24d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -230,9 +230,11 @@ void disable_irq_nosync(unsigned int irq)
230 if (!desc) 230 if (!desc)
231 return; 231 return;
232 232
233 chip_bus_lock(irq, desc);
233 spin_lock_irqsave(&desc->lock, flags); 234 spin_lock_irqsave(&desc->lock, flags);
234 __disable_irq(desc, irq, false); 235 __disable_irq(desc, irq, false);
235 spin_unlock_irqrestore(&desc->lock, flags); 236 spin_unlock_irqrestore(&desc->lock, flags);
237 chip_bus_sync_unlock(irq, desc);
236} 238}
237EXPORT_SYMBOL(disable_irq_nosync); 239EXPORT_SYMBOL(disable_irq_nosync);
238 240
@@ -294,7 +296,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
294 * matches the last disable, processing of interrupts on this 296 * matches the last disable, processing of interrupts on this
295 * IRQ line is re-enabled. 297 * IRQ line is re-enabled.
296 * 298 *
297 * This function may be called from IRQ context. 299 * This function may be called from IRQ context only when
300 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
298 */ 301 */
299void enable_irq(unsigned int irq) 302void enable_irq(unsigned int irq)
300{ 303{
@@ -304,9 +307,11 @@ void enable_irq(unsigned int irq)
304 if (!desc) 307 if (!desc)
305 return; 308 return;
306 309
310 chip_bus_lock(irq, desc);
307 spin_lock_irqsave(&desc->lock, flags); 311 spin_lock_irqsave(&desc->lock, flags);
308 __enable_irq(desc, irq, false); 312 __enable_irq(desc, irq, false);
309 spin_unlock_irqrestore(&desc->lock, flags); 313 spin_unlock_irqrestore(&desc->lock, flags);
314 chip_bus_sync_unlock(irq, desc);
310} 315}
311EXPORT_SYMBOL(enable_irq); 316EXPORT_SYMBOL(enable_irq);
312 317
@@ -436,6 +441,26 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
436 return ret; 441 return ret;
437} 442}
438 443
444/*
445 * Default primary interrupt handler for threaded interrupts. Is
446 * assigned as primary handler when request_threaded_irq is called
447 * with handler == NULL. Useful for oneshot interrupts.
448 */
449static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
450{
451 return IRQ_WAKE_THREAD;
452}
453
454/*
455 * Primary handler for nested threaded interrupts. Should never be
456 * called.
457 */
458static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
459{
460 WARN(1, "Primary handler called for nested irq %d\n", irq);
461 return IRQ_NONE;
462}
463
439static int irq_wait_for_interrupt(struct irqaction *action) 464static int irq_wait_for_interrupt(struct irqaction *action)
440{ 465{
441 while (!kthread_should_stop()) { 466 while (!kthread_should_stop()) {
@@ -451,6 +476,23 @@ static int irq_wait_for_interrupt(struct irqaction *action)
451 return -1; 476 return -1;
452} 477}
453 478
479/*
480 * Oneshot interrupts keep the irq line masked until the threaded
481 * handler finished. unmask if the interrupt has not been disabled and
482 * is marked MASKED.
483 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{
486 chip_bus_lock(irq, desc);
487 spin_lock_irq(&desc->lock);
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq);
491 }
492 spin_unlock_irq(&desc->lock);
493 chip_bus_sync_unlock(irq, desc);
494}
495
454#ifdef CONFIG_SMP 496#ifdef CONFIG_SMP
455/* 497/*
456 * Check whether we need to change the affinity of the interrupt thread. 498 * Check whether we need to change the affinity of the interrupt thread.
@@ -492,7 +534,7 @@ static int irq_thread(void *data)
492 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 534 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, };
493 struct irqaction *action = data; 535 struct irqaction *action = data;
494 struct irq_desc *desc = irq_to_desc(action->irq); 536 struct irq_desc *desc = irq_to_desc(action->irq);
495 int wake; 537 int wake, oneshot = desc->status & IRQ_ONESHOT;
496 538
497 sched_setscheduler(current, SCHED_FIFO, &param); 539 sched_setscheduler(current, SCHED_FIFO, &param);
498 current->irqaction = action; 540 current->irqaction = action;
@@ -518,6 +560,9 @@ static int irq_thread(void *data)
518 spin_unlock_irq(&desc->lock); 560 spin_unlock_irq(&desc->lock);
519 561
520 action->thread_fn(action->irq, action->dev_id); 562 action->thread_fn(action->irq, action->dev_id);
563
564 if (oneshot)
565 irq_finalize_oneshot(action->irq, desc);
521 } 566 }
522 567
523 wake = atomic_dec_and_test(&desc->threads_active); 568 wake = atomic_dec_and_test(&desc->threads_active);
@@ -565,7 +610,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
565 struct irqaction *old, **old_ptr; 610 struct irqaction *old, **old_ptr;
566 const char *old_name = NULL; 611 const char *old_name = NULL;
567 unsigned long flags; 612 unsigned long flags;
568 int shared = 0; 613 int nested, shared = 0;
569 int ret; 614 int ret;
570 615
571 if (!desc) 616 if (!desc)
@@ -590,10 +635,32 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
590 rand_initialize_irq(irq); 635 rand_initialize_irq(irq);
591 } 636 }
592 637
638 /* Oneshot interrupts are not allowed with shared */
639 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
640 return -EINVAL;
641
642 /*
643 * Check whether the interrupt nests into another interrupt
644 * thread.
645 */
646 nested = desc->status & IRQ_NESTED_THREAD;
647 if (nested) {
648 if (!new->thread_fn)
649 return -EINVAL;
650 /*
651 * Replace the primary handler which was provided from
652 * the driver for non nested interrupt handling by the
653 * dummy function which warns when called.
654 */
655 new->handler = irq_nested_primary_handler;
656 }
657
593 /* 658 /*
594 * Threaded handler ? 659 * Create a handler thread when a thread function is supplied
660 * and the interrupt does not nest into another interrupt
661 * thread.
595 */ 662 */
596 if (new->thread_fn) { 663 if (new->thread_fn && !nested) {
597 struct task_struct *t; 664 struct task_struct *t;
598 665
599 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 666 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
@@ -662,9 +729,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
662 desc->status |= IRQ_PER_CPU; 729 desc->status |= IRQ_PER_CPU;
663#endif 730#endif
664 731
665 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 732 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
666 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 733 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
667 734
735 if (new->flags & IRQF_ONESHOT)
736 desc->status |= IRQ_ONESHOT;
737
668 if (!(desc->status & IRQ_NOAUTOEN)) { 738 if (!(desc->status & IRQ_NOAUTOEN)) {
669 desc->depth = 0; 739 desc->depth = 0;
670 desc->status &= ~IRQ_DISABLED; 740 desc->status &= ~IRQ_DISABLED;
@@ -875,7 +945,14 @@ EXPORT_SYMBOL_GPL(remove_irq);
875 */ 945 */
876void free_irq(unsigned int irq, void *dev_id) 946void free_irq(unsigned int irq, void *dev_id)
877{ 947{
948 struct irq_desc *desc = irq_to_desc(irq);
949
950 if (!desc)
951 return;
952
953 chip_bus_lock(irq, desc);
878 kfree(__free_irq(irq, dev_id)); 954 kfree(__free_irq(irq, dev_id));
955 chip_bus_sync_unlock(irq, desc);
879} 956}
880EXPORT_SYMBOL(free_irq); 957EXPORT_SYMBOL(free_irq);
881 958
@@ -884,6 +961,8 @@ EXPORT_SYMBOL(free_irq);
884 * @irq: Interrupt line to allocate 961 * @irq: Interrupt line to allocate
885 * @handler: Function to be called when the IRQ occurs. 962 * @handler: Function to be called when the IRQ occurs.
886 * Primary handler for threaded interrupts 963 * Primary handler for threaded interrupts
964 * If NULL and thread_fn != NULL the default
965 * primary handler is installed
887 * @thread_fn: Function called from the irq handler thread 966 * @thread_fn: Function called from the irq handler thread
888 * If NULL, no irq thread is created 967 * If NULL, no irq thread is created
889 * @irqflags: Interrupt type flags 968 * @irqflags: Interrupt type flags
@@ -963,8 +1042,12 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
963 1042
964 if (desc->status & IRQ_NOREQUEST) 1043 if (desc->status & IRQ_NOREQUEST)
965 return -EINVAL; 1044 return -EINVAL;
966 if (!handler) 1045
967 return -EINVAL; 1046 if (!handler) {
1047 if (!thread_fn)
1048 return -EINVAL;
1049 handler = irq_default_primary_handler;
1050 }
968 1051
969 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1052 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
970 if (!action) 1053 if (!action)
@@ -976,7 +1059,10 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
976 action->name = devname; 1059 action->name = devname;
977 action->dev_id = dev_id; 1060 action->dev_id = dev_id;
978 1061
1062 chip_bus_lock(irq, desc);
979 retval = __setup_irq(irq, desc, action); 1063 retval = __setup_irq(irq, desc, action);
1064 chip_bus_sync_unlock(irq, desc);
1065
980 if (retval) 1066 if (retval)
981 kfree(action); 1067 kfree(action);
982 1068
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 638d8bedec14..a0bb09e79867 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -15,10 +15,10 @@
15/** 15/**
16 * suspend_device_irqs - disable all currently enabled interrupt lines 16 * suspend_device_irqs - disable all currently enabled interrupt lines
17 * 17 *
18 * During system-wide suspend or hibernation device interrupts need to be 18 * During system-wide suspend or hibernation device drivers need to be prevented
19 * disabled at the chip level and this function is provided for this purpose. 19 * from receiving interrupts and this function is provided for this purpose.
20 * It disables all interrupt lines that are enabled at the moment and sets the 20 * It marks all interrupt lines in use, except for the timer ones, as disabled
21 * IRQ_SUSPENDED flag for them. 21 * and sets the IRQ_SUSPENDED flag for each of them.
22 */ 22 */
23void suspend_device_irqs(void) 23void suspend_device_irqs(void)
24{ 24{
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 89c7117acf2b..090c3763f3a2 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -70,8 +70,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 70 if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; 71 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
72 72
73 if (!desc->chip || !desc->chip->retrigger || 73 if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
74 !desc->chip->retrigger(irq)) {
75#ifdef CONFIG_HARDIRQS_SW_RESEND 74#ifdef CONFIG_HARDIRQS_SW_RESEND
76 /* Set it pending and activate the softirq: */ 75 /* Set it pending and activate the softirq: */
77 set_bit(irq, irqs_resend); 76 set_bit(irq, irqs_resend);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 4d568294de3e..114e704760fe 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -297,7 +297,6 @@ static int __init irqfixup_setup(char *str)
297 297
298__setup("irqfixup", irqfixup_setup); 298__setup("irqfixup", irqfixup_setup);
299module_param(irqfixup, int, 0644); 299module_param(irqfixup, int, 0644);
300MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode");
301 300
302static int __init irqpoll_setup(char *str) 301static int __init irqpoll_setup(char *str)
303{ 302{
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 385c31a1bdbf..9fcb53a11f87 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -37,6 +37,8 @@
37#include <linux/suspend.h> 37#include <linux/suspend.h>
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
39 39
40#include <trace/events/module.h>
41
40extern int max_threads; 42extern int max_threads;
41 43
42static struct workqueue_struct *khelper_wq; 44static struct workqueue_struct *khelper_wq;
@@ -78,6 +80,10 @@ int __request_module(bool wait, const char *fmt, ...)
78#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
79 static int kmod_loop_msg; 81 static int kmod_loop_msg;
80 82
83 ret = security_kernel_module_request();
84 if (ret)
85 return ret;
86
81 va_start(args, fmt); 87 va_start(args, fmt);
82 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 88 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
83 va_end(args); 89 va_end(args);
@@ -108,6 +114,8 @@ int __request_module(bool wait, const char *fmt, ...)
108 return -ENOMEM; 114 return -ENOMEM;
109 } 115 }
110 116
117 trace_module_request(module_name, wait, _RET_IP_);
118
111 ret = call_usermodehelper(modprobe_path, argv, envp, 119 ret = call_usermodehelper(modprobe_path, argv, envp,
112 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC); 120 wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
113 atomic_dec(&kmod_concurrent); 121 atomic_dec(&kmod_concurrent);
@@ -462,6 +470,7 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
462 int retval = 0; 470 int retval = 0;
463 471
464 BUG_ON(atomic_read(&sub_info->cred->usage) != 1); 472 BUG_ON(atomic_read(&sub_info->cred->usage) != 1);
473 validate_creds(sub_info->cred);
465 474
466 helper_lock(); 475 helper_lock();
467 if (sub_info->path[0] == '\0') 476 if (sub_info->path[0] == '\0')
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 0540948e29ab..ef177d653b2c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -103,7 +103,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 103#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104 104
105struct kprobe_insn_page { 105struct kprobe_insn_page {
106 struct hlist_node hlist; 106 struct list_head list;
107 kprobe_opcode_t *insns; /* Page of instruction slots */ 107 kprobe_opcode_t *insns; /* Page of instruction slots */
108 char slot_used[INSNS_PER_PAGE]; 108 char slot_used[INSNS_PER_PAGE];
109 int nused; 109 int nused;
@@ -117,7 +117,7 @@ enum kprobe_slot_state {
117}; 117};
118 118
119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */ 119static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
120static struct hlist_head kprobe_insn_pages; 120static LIST_HEAD(kprobe_insn_pages);
121static int kprobe_garbage_slots; 121static int kprobe_garbage_slots;
122static int collect_garbage_slots(void); 122static int collect_garbage_slots(void);
123 123
@@ -152,10 +152,9 @@ loop_end:
152static kprobe_opcode_t __kprobes *__get_insn_slot(void) 152static kprobe_opcode_t __kprobes *__get_insn_slot(void)
153{ 153{
154 struct kprobe_insn_page *kip; 154 struct kprobe_insn_page *kip;
155 struct hlist_node *pos;
156 155
157 retry: 156 retry:
158 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 157 list_for_each_entry(kip, &kprobe_insn_pages, list) {
159 if (kip->nused < INSNS_PER_PAGE) { 158 if (kip->nused < INSNS_PER_PAGE) {
160 int i; 159 int i;
161 for (i = 0; i < INSNS_PER_PAGE; i++) { 160 for (i = 0; i < INSNS_PER_PAGE; i++) {
@@ -189,8 +188,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
189 kfree(kip); 188 kfree(kip);
190 return NULL; 189 return NULL;
191 } 190 }
192 INIT_HLIST_NODE(&kip->hlist); 191 INIT_LIST_HEAD(&kip->list);
193 hlist_add_head(&kip->hlist, &kprobe_insn_pages); 192 list_add(&kip->list, &kprobe_insn_pages);
194 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE); 193 memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
195 kip->slot_used[0] = SLOT_USED; 194 kip->slot_used[0] = SLOT_USED;
196 kip->nused = 1; 195 kip->nused = 1;
@@ -219,12 +218,8 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
219 * so as not to have to set it up again the 218 * so as not to have to set it up again the
220 * next time somebody inserts a probe. 219 * next time somebody inserts a probe.
221 */ 220 */
222 hlist_del(&kip->hlist); 221 if (!list_is_singular(&kprobe_insn_pages)) {
223 if (hlist_empty(&kprobe_insn_pages)) { 222 list_del(&kip->list);
224 INIT_HLIST_NODE(&kip->hlist);
225 hlist_add_head(&kip->hlist,
226 &kprobe_insn_pages);
227 } else {
228 module_free(NULL, kip->insns); 223 module_free(NULL, kip->insns);
229 kfree(kip); 224 kfree(kip);
230 } 225 }
@@ -235,14 +230,13 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
235 230
236static int __kprobes collect_garbage_slots(void) 231static int __kprobes collect_garbage_slots(void)
237{ 232{
238 struct kprobe_insn_page *kip; 233 struct kprobe_insn_page *kip, *next;
239 struct hlist_node *pos, *next;
240 234
241 /* Ensure no-one is preepmted on the garbages */ 235 /* Ensure no-one is preepmted on the garbages */
242 if (check_safety()) 236 if (check_safety())
243 return -EAGAIN; 237 return -EAGAIN;
244 238
245 hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) { 239 list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
246 int i; 240 int i;
247 if (kip->ngarbage == 0) 241 if (kip->ngarbage == 0)
248 continue; 242 continue;
@@ -260,19 +254,17 @@ static int __kprobes collect_garbage_slots(void)
260void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 254void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
261{ 255{
262 struct kprobe_insn_page *kip; 256 struct kprobe_insn_page *kip;
263 struct hlist_node *pos;
264 257
265 mutex_lock(&kprobe_insn_mutex); 258 mutex_lock(&kprobe_insn_mutex);
266 hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) { 259 list_for_each_entry(kip, &kprobe_insn_pages, list) {
267 if (kip->insns <= slot && 260 if (kip->insns <= slot &&
268 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 261 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
269 int i = (slot - kip->insns) / MAX_INSN_SIZE; 262 int i = (slot - kip->insns) / MAX_INSN_SIZE;
270 if (dirty) { 263 if (dirty) {
271 kip->slot_used[i] = SLOT_DIRTY; 264 kip->slot_used[i] = SLOT_DIRTY;
272 kip->ngarbage++; 265 kip->ngarbage++;
273 } else { 266 } else
274 collect_one_slot(kip, i); 267 collect_one_slot(kip, i);
275 }
276 break; 268 break;
277 } 269 }
278 } 270 }
diff --git a/kernel/kthread.c b/kernel/kthread.c
index eb8751aa0418..5fe709982caa 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -16,8 +16,6 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <trace/events/sched.h> 17#include <trace/events/sched.h>
18 18
19#define KTHREAD_NICE_LEVEL (-5)
20
21static DEFINE_SPINLOCK(kthread_create_lock); 19static DEFINE_SPINLOCK(kthread_create_lock);
22static LIST_HEAD(kthread_create_list); 20static LIST_HEAD(kthread_create_list);
23struct task_struct *kthreadd_task; 21struct task_struct *kthreadd_task;
@@ -145,7 +143,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
145 * The kernel thread should not inherit these properties. 143 * The kernel thread should not inherit these properties.
146 */ 144 */
147 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param); 145 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
148 set_user_nice(create.result, KTHREAD_NICE_LEVEL);
149 set_cpus_allowed_ptr(create.result, cpu_all_mask); 146 set_cpus_allowed_ptr(create.result, cpu_all_mask);
150 } 147 }
151 return create.result; 148 return create.result;
@@ -221,7 +218,6 @@ int kthreadd(void *unused)
221 /* Setup a clean context for our children to inherit. */ 218 /* Setup a clean context for our children to inherit. */
222 set_task_comm(tsk, "kthreadd"); 219 set_task_comm(tsk, "kthreadd");
223 ignore_signals(tsk); 220 ignore_signals(tsk);
224 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
225 set_cpus_allowed_ptr(tsk, cpu_all_mask); 221 set_cpus_allowed_ptr(tsk, cpu_all_mask);
226 set_mems_allowed(node_possible_map); 222 set_mems_allowed(node_possible_map);
227 223
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..f74d2d7aa605 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace)
366 367
367 save_stack_trace(trace); 368 save_stack_trace(trace);
368 369
370 /*
371 * Some daft arches put -1 at the end to indicate its a full trace.
372 *
373 * <rant> this is buggy anyway, since it takes a whole extra entry so a
374 * complete trace that maxes out the entries provided will be reported
375 * as incomplete, friggin useless </rant>
376 */
377 if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
378 trace->nr_entries--;
379
369 trace->max_entries = trace->nr_entries; 380 trace->max_entries = trace->nr_entries;
370 381
371 nr_stack_trace_entries += trace->nr_entries; 382 nr_stack_trace_entries += trace->nr_entries;
372 383
373 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 384 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
374 if (!debug_locks_off_graph_unlock()) 385 if (!debug_locks_off_graph_unlock())
375 return 0; 386 return 0;
376 387
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains;
388unsigned int nr_softirq_chains; 399unsigned int nr_softirq_chains;
389unsigned int nr_process_chains; 400unsigned int nr_process_chains;
390unsigned int max_lockdep_depth; 401unsigned int max_lockdep_depth;
391unsigned int max_recursion_depth;
392
393static unsigned int lockdep_dependency_gen_id;
394
395static bool lockdep_dependency_visit(struct lock_class *source,
396 unsigned int depth)
397{
398 if (!depth)
399 lockdep_dependency_gen_id++;
400 if (source->dep_gen_id == lockdep_dependency_gen_id)
401 return true;
402 source->dep_gen_id = lockdep_dependency_gen_id;
403 return false;
404}
405 402
406#ifdef CONFIG_DEBUG_LOCKDEP 403#ifdef CONFIG_DEBUG_LOCKDEP
407/* 404/*
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on;
431atomic_t redundant_softirqs_off; 428atomic_t redundant_softirqs_off;
432atomic_t nr_unused_locks; 429atomic_t nr_unused_locks;
433atomic_t nr_cyclic_checks; 430atomic_t nr_cyclic_checks;
434atomic_t nr_cyclic_check_recursions;
435atomic_t nr_find_usage_forwards_checks; 431atomic_t nr_find_usage_forwards_checks;
436atomic_t nr_find_usage_forwards_recursions;
437atomic_t nr_find_usage_backwards_checks; 432atomic_t nr_find_usage_backwards_checks;
438atomic_t nr_find_usage_backwards_recursions;
439#endif 433#endif
440 434
441/* 435/*
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
551 } 545 }
552} 546}
553 547
554static void print_lock_class_header(struct lock_class *class, int depth)
555{
556 int bit;
557
558 printk("%*s->", depth, "");
559 print_lock_name(class);
560 printk(" ops: %lu", class->ops);
561 printk(" {\n");
562
563 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
564 if (class->usage_mask & (1 << bit)) {
565 int len = depth;
566
567 len += printk("%*s %s", depth, "", usage_str[bit]);
568 len += printk(" at:\n");
569 print_stack_trace(class->usage_traces + bit, len);
570 }
571 }
572 printk("%*s }\n", depth, "");
573
574 printk("%*s ... key at: ",depth,"");
575 print_ip_sym((unsigned long)class->key);
576}
577
578/*
579 * printk all lock dependencies starting at <entry>:
580 */
581static void __used
582print_lock_dependencies(struct lock_class *class, int depth)
583{
584 struct lock_list *entry;
585
586 if (lockdep_dependency_visit(class, depth))
587 return;
588
589 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
590 return;
591
592 print_lock_class_header(class, depth);
593
594 list_for_each_entry(entry, &class->locks_after, entry) {
595 if (DEBUG_LOCKS_WARN_ON(!entry->class))
596 return;
597
598 print_lock_dependencies(entry->class, depth + 1);
599
600 printk("%*s ... acquired at:\n",depth,"");
601 print_stack_trace(&entry->trace, 2);
602 printk("\n");
603 }
604}
605
606static void print_kernel_version(void) 548static void print_kernel_version(void)
607{ 549{
608 printk("%s %.*s\n", init_utsname()->release, 550 printk("%s %.*s\n", init_utsname()->release,
@@ -898,22 +840,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
898} 840}
899 841
900/* 842/*
843 * For good efficiency of modular, we use power of 2
844 */
845#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
846#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
847
848/*
849 * The circular_queue and helpers is used to implement the
850 * breadth-first search(BFS)algorithem, by which we can build
851 * the shortest path from the next lock to be acquired to the
852 * previous held lock if there is a circular between them.
853 */
854struct circular_queue {
855 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
856 unsigned int front, rear;
857};
858
859static struct circular_queue lock_cq;
860
861unsigned int max_bfs_queue_depth;
862
863static unsigned int lockdep_dependency_gen_id;
864
865static inline void __cq_init(struct circular_queue *cq)
866{
867 cq->front = cq->rear = 0;
868 lockdep_dependency_gen_id++;
869}
870
871static inline int __cq_empty(struct circular_queue *cq)
872{
873 return (cq->front == cq->rear);
874}
875
876static inline int __cq_full(struct circular_queue *cq)
877{
878 return ((cq->rear + 1) & CQ_MASK) == cq->front;
879}
880
881static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
882{
883 if (__cq_full(cq))
884 return -1;
885
886 cq->element[cq->rear] = elem;
887 cq->rear = (cq->rear + 1) & CQ_MASK;
888 return 0;
889}
890
891static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
892{
893 if (__cq_empty(cq))
894 return -1;
895
896 *elem = cq->element[cq->front];
897 cq->front = (cq->front + 1) & CQ_MASK;
898 return 0;
899}
900
901static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
902{
903 return (cq->rear - cq->front) & CQ_MASK;
904}
905
906static inline void mark_lock_accessed(struct lock_list *lock,
907 struct lock_list *parent)
908{
909 unsigned long nr;
910
911 nr = lock - list_entries;
912 WARN_ON(nr >= nr_list_entries);
913 lock->parent = parent;
914 lock->class->dep_gen_id = lockdep_dependency_gen_id;
915}
916
917static inline unsigned long lock_accessed(struct lock_list *lock)
918{
919 unsigned long nr;
920
921 nr = lock - list_entries;
922 WARN_ON(nr >= nr_list_entries);
923 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
924}
925
926static inline struct lock_list *get_lock_parent(struct lock_list *child)
927{
928 return child->parent;
929}
930
931static inline int get_lock_depth(struct lock_list *child)
932{
933 int depth = 0;
934 struct lock_list *parent;
935
936 while ((parent = get_lock_parent(child))) {
937 child = parent;
938 depth++;
939 }
940 return depth;
941}
942
943static int __bfs(struct lock_list *source_entry,
944 void *data,
945 int (*match)(struct lock_list *entry, void *data),
946 struct lock_list **target_entry,
947 int forward)
948{
949 struct lock_list *entry;
950 struct list_head *head;
951 struct circular_queue *cq = &lock_cq;
952 int ret = 1;
953
954 if (match(source_entry, data)) {
955 *target_entry = source_entry;
956 ret = 0;
957 goto exit;
958 }
959
960 if (forward)
961 head = &source_entry->class->locks_after;
962 else
963 head = &source_entry->class->locks_before;
964
965 if (list_empty(head))
966 goto exit;
967
968 __cq_init(cq);
969 __cq_enqueue(cq, (unsigned long)source_entry);
970
971 while (!__cq_empty(cq)) {
972 struct lock_list *lock;
973
974 __cq_dequeue(cq, (unsigned long *)&lock);
975
976 if (!lock->class) {
977 ret = -2;
978 goto exit;
979 }
980
981 if (forward)
982 head = &lock->class->locks_after;
983 else
984 head = &lock->class->locks_before;
985
986 list_for_each_entry(entry, head, entry) {
987 if (!lock_accessed(entry)) {
988 unsigned int cq_depth;
989 mark_lock_accessed(entry, lock);
990 if (match(entry, data)) {
991 *target_entry = entry;
992 ret = 0;
993 goto exit;
994 }
995
996 if (__cq_enqueue(cq, (unsigned long)entry)) {
997 ret = -1;
998 goto exit;
999 }
1000 cq_depth = __cq_get_elem_count(cq);
1001 if (max_bfs_queue_depth < cq_depth)
1002 max_bfs_queue_depth = cq_depth;
1003 }
1004 }
1005 }
1006exit:
1007 return ret;
1008}
1009
1010static inline int __bfs_forwards(struct lock_list *src_entry,
1011 void *data,
1012 int (*match)(struct lock_list *entry, void *data),
1013 struct lock_list **target_entry)
1014{
1015 return __bfs(src_entry, data, match, target_entry, 1);
1016
1017}
1018
1019static inline int __bfs_backwards(struct lock_list *src_entry,
1020 void *data,
1021 int (*match)(struct lock_list *entry, void *data),
1022 struct lock_list **target_entry)
1023{
1024 return __bfs(src_entry, data, match, target_entry, 0);
1025
1026}
1027
1028/*
901 * Recursive, forwards-direction lock-dependency checking, used for 1029 * Recursive, forwards-direction lock-dependency checking, used for
902 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1030 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
903 * checking. 1031 * checking.
904 *
905 * (to keep the stackframe of the recursive functions small we
906 * use these global variables, and we also mark various helper
907 * functions as noinline.)
908 */ 1032 */
909static struct held_lock *check_source, *check_target;
910 1033
911/* 1034/*
912 * Print a dependency chain entry (this is only done when a deadlock 1035 * Print a dependency chain entry (this is only done when a deadlock
913 * has been detected): 1036 * has been detected):
914 */ 1037 */
915static noinline int 1038static noinline int
916print_circular_bug_entry(struct lock_list *target, unsigned int depth) 1039print_circular_bug_entry(struct lock_list *target, int depth)
917{ 1040{
918 if (debug_locks_silent) 1041 if (debug_locks_silent)
919 return 0; 1042 return 0;
@@ -930,11 +1053,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
930 * header first: 1053 * header first:
931 */ 1054 */
932static noinline int 1055static noinline int
933print_circular_bug_header(struct lock_list *entry, unsigned int depth) 1056print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1057 struct held_lock *check_src,
1058 struct held_lock *check_tgt)
934{ 1059{
935 struct task_struct *curr = current; 1060 struct task_struct *curr = current;
936 1061
937 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1062 if (debug_locks_silent)
938 return 0; 1063 return 0;
939 1064
940 printk("\n=======================================================\n"); 1065 printk("\n=======================================================\n");
@@ -943,9 +1068,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
943 printk( "-------------------------------------------------------\n"); 1068 printk( "-------------------------------------------------------\n");
944 printk("%s/%d is trying to acquire lock:\n", 1069 printk("%s/%d is trying to acquire lock:\n",
945 curr->comm, task_pid_nr(curr)); 1070 curr->comm, task_pid_nr(curr));
946 print_lock(check_source); 1071 print_lock(check_src);
947 printk("\nbut task is already holding lock:\n"); 1072 printk("\nbut task is already holding lock:\n");
948 print_lock(check_target); 1073 print_lock(check_tgt);
949 printk("\nwhich lock already depends on the new lock.\n\n"); 1074 printk("\nwhich lock already depends on the new lock.\n\n");
950 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1075 printk("\nthe existing dependency chain (in reverse order) is:\n");
951 1076
@@ -954,19 +1079,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
954 return 0; 1079 return 0;
955} 1080}
956 1081
957static noinline int print_circular_bug_tail(void) 1082static inline int class_equal(struct lock_list *entry, void *data)
1083{
1084 return entry->class == data;
1085}
1086
1087static noinline int print_circular_bug(struct lock_list *this,
1088 struct lock_list *target,
1089 struct held_lock *check_src,
1090 struct held_lock *check_tgt)
958{ 1091{
959 struct task_struct *curr = current; 1092 struct task_struct *curr = current;
960 struct lock_list this; 1093 struct lock_list *parent;
1094 int depth;
961 1095
962 if (debug_locks_silent) 1096 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
963 return 0; 1097 return 0;
964 1098
965 this.class = hlock_class(check_source); 1099 if (!save_trace(&this->trace))
966 if (!save_trace(&this.trace))
967 return 0; 1100 return 0;
968 1101
969 print_circular_bug_entry(&this, 0); 1102 depth = get_lock_depth(target);
1103
1104 print_circular_bug_header(target, depth, check_src, check_tgt);
1105
1106 parent = get_lock_parent(target);
1107
1108 while (parent) {
1109 print_circular_bug_entry(parent, --depth);
1110 parent = get_lock_parent(parent);
1111 }
970 1112
971 printk("\nother info that might help us debug this:\n\n"); 1113 printk("\nother info that might help us debug this:\n\n");
972 lockdep_print_held_locks(curr); 1114 lockdep_print_held_locks(curr);
@@ -977,73 +1119,69 @@ static noinline int print_circular_bug_tail(void)
977 return 0; 1119 return 0;
978} 1120}
979 1121
980#define RECURSION_LIMIT 40 1122static noinline int print_bfs_bug(int ret)
981
982static int noinline print_infinite_recursion_bug(void)
983{ 1123{
984 if (!debug_locks_off_graph_unlock()) 1124 if (!debug_locks_off_graph_unlock())
985 return 0; 1125 return 0;
986 1126
987 WARN_ON(1); 1127 WARN(1, "lockdep bfs error:%d\n", ret);
988 1128
989 return 0; 1129 return 0;
990} 1130}
991 1131
992unsigned long __lockdep_count_forward_deps(struct lock_class *class, 1132static int noop_count(struct lock_list *entry, void *data)
993 unsigned int depth)
994{ 1133{
995 struct lock_list *entry; 1134 (*(unsigned long *)data)++;
996 unsigned long ret = 1; 1135 return 0;
1136}
997 1137
998 if (lockdep_dependency_visit(class, depth)) 1138unsigned long __lockdep_count_forward_deps(struct lock_list *this)
999 return 0; 1139{
1140 unsigned long count = 0;
1141 struct lock_list *uninitialized_var(target_entry);
1000 1142
1001 /* 1143 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1002 * Recurse this class's dependency list:
1003 */
1004 list_for_each_entry(entry, &class->locks_after, entry)
1005 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1006 1144
1007 return ret; 1145 return count;
1008} 1146}
1009
1010unsigned long lockdep_count_forward_deps(struct lock_class *class) 1147unsigned long lockdep_count_forward_deps(struct lock_class *class)
1011{ 1148{
1012 unsigned long ret, flags; 1149 unsigned long ret, flags;
1150 struct lock_list this;
1151
1152 this.parent = NULL;
1153 this.class = class;
1013 1154
1014 local_irq_save(flags); 1155 local_irq_save(flags);
1015 __raw_spin_lock(&lockdep_lock); 1156 __raw_spin_lock(&lockdep_lock);
1016 ret = __lockdep_count_forward_deps(class, 0); 1157 ret = __lockdep_count_forward_deps(&this);
1017 __raw_spin_unlock(&lockdep_lock); 1158 __raw_spin_unlock(&lockdep_lock);
1018 local_irq_restore(flags); 1159 local_irq_restore(flags);
1019 1160
1020 return ret; 1161 return ret;
1021} 1162}
1022 1163
1023unsigned long __lockdep_count_backward_deps(struct lock_class *class, 1164unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1024 unsigned int depth)
1025{ 1165{
1026 struct lock_list *entry; 1166 unsigned long count = 0;
1027 unsigned long ret = 1; 1167 struct lock_list *uninitialized_var(target_entry);
1028 1168
1029 if (lockdep_dependency_visit(class, depth)) 1169 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1030 return 0;
1031 /*
1032 * Recurse this class's dependency list:
1033 */
1034 list_for_each_entry(entry, &class->locks_before, entry)
1035 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1036 1170
1037 return ret; 1171 return count;
1038} 1172}
1039 1173
1040unsigned long lockdep_count_backward_deps(struct lock_class *class) 1174unsigned long lockdep_count_backward_deps(struct lock_class *class)
1041{ 1175{
1042 unsigned long ret, flags; 1176 unsigned long ret, flags;
1177 struct lock_list this;
1178
1179 this.parent = NULL;
1180 this.class = class;
1043 1181
1044 local_irq_save(flags); 1182 local_irq_save(flags);
1045 __raw_spin_lock(&lockdep_lock); 1183 __raw_spin_lock(&lockdep_lock);
1046 ret = __lockdep_count_backward_deps(class, 0); 1184 ret = __lockdep_count_backward_deps(&this);
1047 __raw_spin_unlock(&lockdep_lock); 1185 __raw_spin_unlock(&lockdep_lock);
1048 local_irq_restore(flags); 1186 local_irq_restore(flags);
1049 1187
@@ -1055,29 +1193,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1055 * lead to <target>. Print an error and return 0 if it does. 1193 * lead to <target>. Print an error and return 0 if it does.
1056 */ 1194 */
1057static noinline int 1195static noinline int
1058check_noncircular(struct lock_class *source, unsigned int depth) 1196check_noncircular(struct lock_list *root, struct lock_class *target,
1197 struct lock_list **target_entry)
1059{ 1198{
1060 struct lock_list *entry; 1199 int result;
1061 1200
1062 if (lockdep_dependency_visit(source, depth)) 1201 debug_atomic_inc(&nr_cyclic_checks);
1063 return 1;
1064 1202
1065 debug_atomic_inc(&nr_cyclic_check_recursions); 1203 result = __bfs_forwards(root, target, class_equal, target_entry);
1066 if (depth > max_recursion_depth) 1204
1067 max_recursion_depth = depth; 1205 return result;
1068 if (depth >= RECURSION_LIMIT)
1069 return print_infinite_recursion_bug();
1070 /*
1071 * Check this lock's dependency list:
1072 */
1073 list_for_each_entry(entry, &source->locks_after, entry) {
1074 if (entry->class == hlock_class(check_target))
1075 return print_circular_bug_header(entry, depth+1);
1076 debug_atomic_inc(&nr_cyclic_checks);
1077 if (!check_noncircular(entry->class, depth+1))
1078 return print_circular_bug_entry(entry, depth+1);
1079 }
1080 return 1;
1081} 1206}
1082 1207
1083#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1208#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1211,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
1086 * proving that two subgraphs can be connected by a new dependency 1211 * proving that two subgraphs can be connected by a new dependency
1087 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1212 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1088 */ 1213 */
1089static enum lock_usage_bit find_usage_bit; 1214
1090static struct lock_class *forwards_match, *backwards_match; 1215static inline int usage_match(struct lock_list *entry, void *bit)
1216{
1217 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1218}
1219
1220
1091 1221
1092/* 1222/*
1093 * Find a node in the forwards-direction dependency sub-graph starting 1223 * Find a node in the forwards-direction dependency sub-graph starting
1094 * at <source> that matches <find_usage_bit>. 1224 * at @root->class that matches @bit.
1095 * 1225 *
1096 * Return 2 if such a node exists in the subgraph, and put that node 1226 * Return 0 if such a node exists in the subgraph, and put that node
1097 * into <forwards_match>. 1227 * into *@target_entry.
1098 * 1228 *
1099 * Return 1 otherwise and keep <forwards_match> unchanged. 1229 * Return 1 otherwise and keep *@target_entry unchanged.
1100 * Return 0 on error. 1230 * Return <0 on error.
1101 */ 1231 */
1102static noinline int 1232static int
1103find_usage_forwards(struct lock_class *source, unsigned int depth) 1233find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1234 struct lock_list **target_entry)
1104{ 1235{
1105 struct lock_list *entry; 1236 int result;
1106 int ret;
1107
1108 if (lockdep_dependency_visit(source, depth))
1109 return 1;
1110
1111 if (depth > max_recursion_depth)
1112 max_recursion_depth = depth;
1113 if (depth >= RECURSION_LIMIT)
1114 return print_infinite_recursion_bug();
1115 1237
1116 debug_atomic_inc(&nr_find_usage_forwards_checks); 1238 debug_atomic_inc(&nr_find_usage_forwards_checks);
1117 if (source->usage_mask & (1 << find_usage_bit)) {
1118 forwards_match = source;
1119 return 2;
1120 }
1121 1239
1122 /* 1240 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1123 * Check this lock's dependency list: 1241
1124 */ 1242 return result;
1125 list_for_each_entry(entry, &source->locks_after, entry) {
1126 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1127 ret = find_usage_forwards(entry->class, depth+1);
1128 if (ret == 2 || ret == 0)
1129 return ret;
1130 }
1131 return 1;
1132} 1243}
1133 1244
1134/* 1245/*
1135 * Find a node in the backwards-direction dependency sub-graph starting 1246 * Find a node in the backwards-direction dependency sub-graph starting
1136 * at <source> that matches <find_usage_bit>. 1247 * at @root->class that matches @bit.
1137 * 1248 *
1138 * Return 2 if such a node exists in the subgraph, and put that node 1249 * Return 0 if such a node exists in the subgraph, and put that node
1139 * into <backwards_match>. 1250 * into *@target_entry.
1140 * 1251 *
1141 * Return 1 otherwise and keep <backwards_match> unchanged. 1252 * Return 1 otherwise and keep *@target_entry unchanged.
1142 * Return 0 on error. 1253 * Return <0 on error.
1143 */ 1254 */
1144static noinline int 1255static int
1145find_usage_backwards(struct lock_class *source, unsigned int depth) 1256find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1257 struct lock_list **target_entry)
1146{ 1258{
1147 struct lock_list *entry; 1259 int result;
1148 int ret;
1149 1260
1150 if (lockdep_dependency_visit(source, depth)) 1261 debug_atomic_inc(&nr_find_usage_backwards_checks);
1151 return 1;
1152 1262
1153 if (!__raw_spin_is_locked(&lockdep_lock)) 1263 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1154 return DEBUG_LOCKS_WARN_ON(1);
1155 1264
1156 if (depth > max_recursion_depth) 1265 return result;
1157 max_recursion_depth = depth; 1266}
1158 if (depth >= RECURSION_LIMIT)
1159 return print_infinite_recursion_bug();
1160 1267
1161 debug_atomic_inc(&nr_find_usage_backwards_checks); 1268static void print_lock_class_header(struct lock_class *class, int depth)
1162 if (source->usage_mask & (1 << find_usage_bit)) { 1269{
1163 backwards_match = source; 1270 int bit;
1164 return 2;
1165 }
1166 1271
1167 if (!source && debug_locks_off_graph_unlock()) { 1272 printk("%*s->", depth, "");
1168 WARN_ON(1); 1273 print_lock_name(class);
1169 return 0; 1274 printk(" ops: %lu", class->ops);
1170 } 1275 printk(" {\n");
1171 1276
1172 /* 1277 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1173 * Check this lock's dependency list: 1278 if (class->usage_mask & (1 << bit)) {
1174 */ 1279 int len = depth;
1175 list_for_each_entry(entry, &source->locks_before, entry) { 1280
1176 debug_atomic_inc(&nr_find_usage_backwards_recursions); 1281 len += printk("%*s %s", depth, "", usage_str[bit]);
1177 ret = find_usage_backwards(entry->class, depth+1); 1282 len += printk(" at:\n");
1178 if (ret == 2 || ret == 0) 1283 print_stack_trace(class->usage_traces + bit, len);
1179 return ret; 1284 }
1180 } 1285 }
1181 return 1; 1286 printk("%*s }\n", depth, "");
1287
1288 printk("%*s ... key at: ",depth,"");
1289 print_ip_sym((unsigned long)class->key);
1290}
1291
1292/*
1293 * printk the shortest lock dependencies from @start to @end in reverse order:
1294 */
1295static void __used
1296print_shortest_lock_dependencies(struct lock_list *leaf,
1297 struct lock_list *root)
1298{
1299 struct lock_list *entry = leaf;
1300 int depth;
1301
1302 /*compute depth from generated tree by BFS*/
1303 depth = get_lock_depth(leaf);
1304
1305 do {
1306 print_lock_class_header(entry->class, depth);
1307 printk("%*s ... acquired at:\n", depth, "");
1308 print_stack_trace(&entry->trace, 2);
1309 printk("\n");
1310
1311 if (depth == 0 && (entry != root)) {
1312 printk("lockdep:%s bad BFS generated tree\n", __func__);
1313 break;
1314 }
1315
1316 entry = get_lock_parent(entry);
1317 depth--;
1318 } while (entry && (depth >= 0));
1319
1320 return;
1182} 1321}
1183 1322
1184static int 1323static int
1185print_bad_irq_dependency(struct task_struct *curr, 1324print_bad_irq_dependency(struct task_struct *curr,
1325 struct lock_list *prev_root,
1326 struct lock_list *next_root,
1327 struct lock_list *backwards_entry,
1328 struct lock_list *forwards_entry,
1186 struct held_lock *prev, 1329 struct held_lock *prev,
1187 struct held_lock *next, 1330 struct held_lock *next,
1188 enum lock_usage_bit bit1, 1331 enum lock_usage_bit bit1,
@@ -1215,26 +1358,32 @@ print_bad_irq_dependency(struct task_struct *curr,
1215 1358
1216 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1359 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1217 irqclass); 1360 irqclass);
1218 print_lock_name(backwards_match); 1361 print_lock_name(backwards_entry->class);
1219 printk("\n... which became %s-irq-safe at:\n", irqclass); 1362 printk("\n... which became %s-irq-safe at:\n", irqclass);
1220 1363
1221 print_stack_trace(backwards_match->usage_traces + bit1, 1); 1364 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1222 1365
1223 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1366 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1224 print_lock_name(forwards_match); 1367 print_lock_name(forwards_entry->class);
1225 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1368 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1226 printk("..."); 1369 printk("...");
1227 1370
1228 print_stack_trace(forwards_match->usage_traces + bit2, 1); 1371 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1229 1372
1230 printk("\nother info that might help us debug this:\n\n"); 1373 printk("\nother info that might help us debug this:\n\n");
1231 lockdep_print_held_locks(curr); 1374 lockdep_print_held_locks(curr);
1232 1375
1233 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); 1376 printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1234 print_lock_dependencies(backwards_match, 0); 1377 printk(" and the holding lock:\n");
1378 if (!save_trace(&prev_root->trace))
1379 return 0;
1380 print_shortest_lock_dependencies(backwards_entry, prev_root);
1235 1381
1236 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); 1382 printk("\nthe dependencies between the lock to be acquired");
1237 print_lock_dependencies(forwards_match, 0); 1383 printk(" and %s-irq-unsafe lock:\n", irqclass);
1384 if (!save_trace(&next_root->trace))
1385 return 0;
1386 print_shortest_lock_dependencies(forwards_entry, next_root);
1238 1387
1239 printk("\nstack backtrace:\n"); 1388 printk("\nstack backtrace:\n");
1240 dump_stack(); 1389 dump_stack();
@@ -1248,19 +1397,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1248 enum lock_usage_bit bit_forwards, const char *irqclass) 1397 enum lock_usage_bit bit_forwards, const char *irqclass)
1249{ 1398{
1250 int ret; 1399 int ret;
1400 struct lock_list this, that;
1401 struct lock_list *uninitialized_var(target_entry);
1402 struct lock_list *uninitialized_var(target_entry1);
1251 1403
1252 find_usage_bit = bit_backwards; 1404 this.parent = NULL;
1253 /* fills in <backwards_match> */ 1405
1254 ret = find_usage_backwards(hlock_class(prev), 0); 1406 this.class = hlock_class(prev);
1255 if (!ret || ret == 1) 1407 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1408 if (ret < 0)
1409 return print_bfs_bug(ret);
1410 if (ret == 1)
1256 return ret; 1411 return ret;
1257 1412
1258 find_usage_bit = bit_forwards; 1413 that.parent = NULL;
1259 ret = find_usage_forwards(hlock_class(next), 0); 1414 that.class = hlock_class(next);
1260 if (!ret || ret == 1) 1415 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1416 if (ret < 0)
1417 return print_bfs_bug(ret);
1418 if (ret == 1)
1261 return ret; 1419 return ret;
1262 /* ret == 2 */ 1420
1263 return print_bad_irq_dependency(curr, prev, next, 1421 return print_bad_irq_dependency(curr, &this, &that,
1422 target_entry, target_entry1,
1423 prev, next,
1264 bit_backwards, bit_forwards, irqclass); 1424 bit_backwards, bit_forwards, irqclass);
1265} 1425}
1266 1426
@@ -1472,6 +1632,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1472{ 1632{
1473 struct lock_list *entry; 1633 struct lock_list *entry;
1474 int ret; 1634 int ret;
1635 struct lock_list this;
1636 struct lock_list *uninitialized_var(target_entry);
1475 1637
1476 /* 1638 /*
1477 * Prove that the new <prev> -> <next> dependency would not 1639 * Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1644,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1482 * We are using global variables to control the recursion, to 1644 * We are using global variables to control the recursion, to
1483 * keep the stackframe size of the recursive functions low: 1645 * keep the stackframe size of the recursive functions low:
1484 */ 1646 */
1485 check_source = next; 1647 this.class = hlock_class(next);
1486 check_target = prev; 1648 this.parent = NULL;
1487 if (!(check_noncircular(hlock_class(next), 0))) 1649 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1488 return print_circular_bug_tail(); 1650 if (unlikely(!ret))
1651 return print_circular_bug(&this, target_entry, next, prev);
1652 else if (unlikely(ret < 0))
1653 return print_bfs_bug(ret);
1489 1654
1490 if (!check_prev_add_irq(curr, prev, next)) 1655 if (!check_prev_add_irq(curr, prev, next))
1491 return 0; 1656 return 0;
@@ -1884,7 +2049,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1884 * print irq inversion bug: 2049 * print irq inversion bug:
1885 */ 2050 */
1886static int 2051static int
1887print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, 2052print_irq_inversion_bug(struct task_struct *curr,
2053 struct lock_list *root, struct lock_list *other,
1888 struct held_lock *this, int forwards, 2054 struct held_lock *this, int forwards,
1889 const char *irqclass) 2055 const char *irqclass)
1890{ 2056{
@@ -1902,17 +2068,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1902 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2068 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1903 else 2069 else
1904 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2070 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1905 print_lock_name(other); 2071 print_lock_name(other->class);
1906 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2072 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1907 2073
1908 printk("\nother info that might help us debug this:\n"); 2074 printk("\nother info that might help us debug this:\n");
1909 lockdep_print_held_locks(curr); 2075 lockdep_print_held_locks(curr);
1910 2076
1911 printk("\nthe first lock's dependencies:\n"); 2077 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
1912 print_lock_dependencies(hlock_class(this), 0); 2078 if (!save_trace(&root->trace))
1913 2079 return 0;
1914 printk("\nthe second lock's dependencies:\n"); 2080 print_shortest_lock_dependencies(other, root);
1915 print_lock_dependencies(other, 0);
1916 2081
1917 printk("\nstack backtrace:\n"); 2082 printk("\nstack backtrace:\n");
1918 dump_stack(); 2083 dump_stack();
@@ -1929,14 +2094,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1929 enum lock_usage_bit bit, const char *irqclass) 2094 enum lock_usage_bit bit, const char *irqclass)
1930{ 2095{
1931 int ret; 2096 int ret;
1932 2097 struct lock_list root;
1933 find_usage_bit = bit; 2098 struct lock_list *uninitialized_var(target_entry);
1934 /* fills in <forwards_match> */ 2099
1935 ret = find_usage_forwards(hlock_class(this), 0); 2100 root.parent = NULL;
1936 if (!ret || ret == 1) 2101 root.class = hlock_class(this);
2102 ret = find_usage_forwards(&root, bit, &target_entry);
2103 if (ret < 0)
2104 return print_bfs_bug(ret);
2105 if (ret == 1)
1937 return ret; 2106 return ret;
1938 2107
1939 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); 2108 return print_irq_inversion_bug(curr, &root, target_entry,
2109 this, 1, irqclass);
1940} 2110}
1941 2111
1942/* 2112/*
@@ -1948,14 +2118,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1948 enum lock_usage_bit bit, const char *irqclass) 2118 enum lock_usage_bit bit, const char *irqclass)
1949{ 2119{
1950 int ret; 2120 int ret;
1951 2121 struct lock_list root;
1952 find_usage_bit = bit; 2122 struct lock_list *uninitialized_var(target_entry);
1953 /* fills in <backwards_match> */ 2123
1954 ret = find_usage_backwards(hlock_class(this), 0); 2124 root.parent = NULL;
1955 if (!ret || ret == 1) 2125 root.class = hlock_class(this);
2126 ret = find_usage_backwards(&root, bit, &target_entry);
2127 if (ret < 0)
2128 return print_bfs_bug(ret);
2129 if (ret == 1)
1956 return ret; 2130 return ret;
1957 2131
1958 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); 2132 return print_irq_inversion_bug(curr, &root, target_entry,
2133 this, 1, irqclass);
1959} 2134}
1960 2135
1961void print_irqtrace_events(struct task_struct *curr) 2136void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2705,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2530 */ 2705 */
2531static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2706static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2532 int trylock, int read, int check, int hardirqs_off, 2707 int trylock, int read, int check, int hardirqs_off,
2533 struct lockdep_map *nest_lock, unsigned long ip) 2708 struct lockdep_map *nest_lock, unsigned long ip,
2709 int references)
2534{ 2710{
2535 struct task_struct *curr = current; 2711 struct task_struct *curr = current;
2536 struct lock_class *class = NULL; 2712 struct lock_class *class = NULL;
2537 struct held_lock *hlock; 2713 struct held_lock *hlock;
2538 unsigned int depth, id; 2714 unsigned int depth, id;
2539 int chain_head = 0; 2715 int chain_head = 0;
2716 int class_idx;
2540 u64 chain_key; 2717 u64 chain_key;
2541 2718
2542 if (!prove_locking) 2719 if (!prove_locking)
@@ -2584,10 +2761,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2584 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 2761 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2585 return 0; 2762 return 0;
2586 2763
2764 class_idx = class - lock_classes + 1;
2765
2766 if (depth) {
2767 hlock = curr->held_locks + depth - 1;
2768 if (hlock->class_idx == class_idx && nest_lock) {
2769 if (hlock->references)
2770 hlock->references++;
2771 else
2772 hlock->references = 2;
2773
2774 return 1;
2775 }
2776 }
2777
2587 hlock = curr->held_locks + depth; 2778 hlock = curr->held_locks + depth;
2588 if (DEBUG_LOCKS_WARN_ON(!class)) 2779 if (DEBUG_LOCKS_WARN_ON(!class))
2589 return 0; 2780 return 0;
2590 hlock->class_idx = class - lock_classes + 1; 2781 hlock->class_idx = class_idx;
2591 hlock->acquire_ip = ip; 2782 hlock->acquire_ip = ip;
2592 hlock->instance = lock; 2783 hlock->instance = lock;
2593 hlock->nest_lock = nest_lock; 2784 hlock->nest_lock = nest_lock;
@@ -2595,6 +2786,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2595 hlock->read = read; 2786 hlock->read = read;
2596 hlock->check = check; 2787 hlock->check = check;
2597 hlock->hardirqs_off = !!hardirqs_off; 2788 hlock->hardirqs_off = !!hardirqs_off;
2789 hlock->references = references;
2598#ifdef CONFIG_LOCK_STAT 2790#ifdef CONFIG_LOCK_STAT
2599 hlock->waittime_stamp = 0; 2791 hlock->waittime_stamp = 0;
2600 hlock->holdtime_stamp = sched_clock(); 2792 hlock->holdtime_stamp = sched_clock();
@@ -2703,6 +2895,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2703 return 1; 2895 return 1;
2704} 2896}
2705 2897
2898static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2899{
2900 if (hlock->instance == lock)
2901 return 1;
2902
2903 if (hlock->references) {
2904 struct lock_class *class = lock->class_cache;
2905
2906 if (!class)
2907 class = look_up_lock_class(lock, 0);
2908
2909 if (DEBUG_LOCKS_WARN_ON(!class))
2910 return 0;
2911
2912 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2913 return 0;
2914
2915 if (hlock->class_idx == class - lock_classes + 1)
2916 return 1;
2917 }
2918
2919 return 0;
2920}
2921
2706static int 2922static int
2707__lock_set_class(struct lockdep_map *lock, const char *name, 2923__lock_set_class(struct lockdep_map *lock, const char *name,
2708 struct lock_class_key *key, unsigned int subclass, 2924 struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2942,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2726 */ 2942 */
2727 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 2943 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2728 break; 2944 break;
2729 if (hlock->instance == lock) 2945 if (match_held_lock(hlock, lock))
2730 goto found_it; 2946 goto found_it;
2731 prev_hlock = hlock; 2947 prev_hlock = hlock;
2732 } 2948 }
@@ -2745,7 +2961,8 @@ found_it:
2745 if (!__lock_acquire(hlock->instance, 2961 if (!__lock_acquire(hlock->instance,
2746 hlock_class(hlock)->subclass, hlock->trylock, 2962 hlock_class(hlock)->subclass, hlock->trylock,
2747 hlock->read, hlock->check, hlock->hardirqs_off, 2963 hlock->read, hlock->check, hlock->hardirqs_off,
2748 hlock->nest_lock, hlock->acquire_ip)) 2964 hlock->nest_lock, hlock->acquire_ip,
2965 hlock->references))
2749 return 0; 2966 return 0;
2750 } 2967 }
2751 2968
@@ -2784,20 +3001,34 @@ lock_release_non_nested(struct task_struct *curr,
2784 */ 3001 */
2785 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3002 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2786 break; 3003 break;
2787 if (hlock->instance == lock) 3004 if (match_held_lock(hlock, lock))
2788 goto found_it; 3005 goto found_it;
2789 prev_hlock = hlock; 3006 prev_hlock = hlock;
2790 } 3007 }
2791 return print_unlock_inbalance_bug(curr, lock, ip); 3008 return print_unlock_inbalance_bug(curr, lock, ip);
2792 3009
2793found_it: 3010found_it:
2794 lock_release_holdtime(hlock); 3011 if (hlock->instance == lock)
3012 lock_release_holdtime(hlock);
3013
3014 if (hlock->references) {
3015 hlock->references--;
3016 if (hlock->references) {
3017 /*
3018 * We had, and after removing one, still have
3019 * references, the current lock stack is still
3020 * valid. We're done!
3021 */
3022 return 1;
3023 }
3024 }
2795 3025
2796 /* 3026 /*
2797 * We have the right lock to unlock, 'hlock' points to it. 3027 * We have the right lock to unlock, 'hlock' points to it.
2798 * Now we remove it from the stack, and add back the other 3028 * Now we remove it from the stack, and add back the other
2799 * entries (if any), recalculating the hash along the way: 3029 * entries (if any), recalculating the hash along the way:
2800 */ 3030 */
3031
2801 curr->lockdep_depth = i; 3032 curr->lockdep_depth = i;
2802 curr->curr_chain_key = hlock->prev_chain_key; 3033 curr->curr_chain_key = hlock->prev_chain_key;
2803 3034
@@ -2806,7 +3037,8 @@ found_it:
2806 if (!__lock_acquire(hlock->instance, 3037 if (!__lock_acquire(hlock->instance,
2807 hlock_class(hlock)->subclass, hlock->trylock, 3038 hlock_class(hlock)->subclass, hlock->trylock,
2808 hlock->read, hlock->check, hlock->hardirqs_off, 3039 hlock->read, hlock->check, hlock->hardirqs_off,
2809 hlock->nest_lock, hlock->acquire_ip)) 3040 hlock->nest_lock, hlock->acquire_ip,
3041 hlock->references))
2810 return 0; 3042 return 0;
2811 } 3043 }
2812 3044
@@ -2836,7 +3068,7 @@ static int lock_release_nested(struct task_struct *curr,
2836 /* 3068 /*
2837 * Is the unlock non-nested: 3069 * Is the unlock non-nested:
2838 */ 3070 */
2839 if (hlock->instance != lock) 3071 if (hlock->instance != lock || hlock->references)
2840 return lock_release_non_nested(curr, lock, ip); 3072 return lock_release_non_nested(curr, lock, ip);
2841 curr->lockdep_depth--; 3073 curr->lockdep_depth--;
2842 3074
@@ -2881,6 +3113,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2881 check_chain_key(curr); 3113 check_chain_key(curr);
2882} 3114}
2883 3115
3116static int __lock_is_held(struct lockdep_map *lock)
3117{
3118 struct task_struct *curr = current;
3119 int i;
3120
3121 for (i = 0; i < curr->lockdep_depth; i++) {
3122 struct held_lock *hlock = curr->held_locks + i;
3123
3124 if (match_held_lock(hlock, lock))
3125 return 1;
3126 }
3127
3128 return 0;
3129}
3130
2884/* 3131/*
2885 * Check whether we follow the irq-flags state precisely: 3132 * Check whether we follow the irq-flags state precisely:
2886 */ 3133 */
@@ -2957,7 +3204,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2957 3204
2958 current->lockdep_recursion = 1; 3205 current->lockdep_recursion = 1;
2959 __lock_acquire(lock, subclass, trylock, read, check, 3206 __lock_acquire(lock, subclass, trylock, read, check,
2960 irqs_disabled_flags(flags), nest_lock, ip); 3207 irqs_disabled_flags(flags), nest_lock, ip, 0);
2961 current->lockdep_recursion = 0; 3208 current->lockdep_recursion = 0;
2962 raw_local_irq_restore(flags); 3209 raw_local_irq_restore(flags);
2963} 3210}
@@ -2982,6 +3229,26 @@ void lock_release(struct lockdep_map *lock, int nested,
2982} 3229}
2983EXPORT_SYMBOL_GPL(lock_release); 3230EXPORT_SYMBOL_GPL(lock_release);
2984 3231
3232int lock_is_held(struct lockdep_map *lock)
3233{
3234 unsigned long flags;
3235 int ret = 0;
3236
3237 if (unlikely(current->lockdep_recursion))
3238 return ret;
3239
3240 raw_local_irq_save(flags);
3241 check_flags(flags);
3242
3243 current->lockdep_recursion = 1;
3244 ret = __lock_is_held(lock);
3245 current->lockdep_recursion = 0;
3246 raw_local_irq_restore(flags);
3247
3248 return ret;
3249}
3250EXPORT_SYMBOL_GPL(lock_is_held);
3251
2985void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3252void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2986{ 3253{
2987 current->lockdep_reclaim_gfp = gfp_mask; 3254 current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3308,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3041 */ 3308 */
3042 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3309 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3043 break; 3310 break;
3044 if (hlock->instance == lock) 3311 if (match_held_lock(hlock, lock))
3045 goto found_it; 3312 goto found_it;
3046 prev_hlock = hlock; 3313 prev_hlock = hlock;
3047 } 3314 }
@@ -3049,6 +3316,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3049 return; 3316 return;
3050 3317
3051found_it: 3318found_it:
3319 if (hlock->instance != lock)
3320 return;
3321
3052 hlock->waittime_stamp = sched_clock(); 3322 hlock->waittime_stamp = sched_clock();
3053 3323
3054 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3324 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3088,7 +3358,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3088 */ 3358 */
3089 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3359 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3090 break; 3360 break;
3091 if (hlock->instance == lock) 3361 if (match_held_lock(hlock, lock))
3092 goto found_it; 3362 goto found_it;
3093 prev_hlock = hlock; 3363 prev_hlock = hlock;
3094 } 3364 }
@@ -3096,6 +3366,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3096 return; 3366 return;
3097 3367
3098found_it: 3368found_it:
3369 if (hlock->instance != lock)
3370 return;
3371
3099 cpu = smp_processor_id(); 3372 cpu = smp_processor_id();
3100 if (hlock->waittime_stamp) { 3373 if (hlock->waittime_stamp) {
3101 now = sched_clock(); 3374 now = sched_clock();
@@ -3326,7 +3599,12 @@ void __init lockdep_info(void)
3326 sizeof(struct list_head) * CLASSHASH_SIZE + 3599 sizeof(struct list_head) * CLASSHASH_SIZE +
3327 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 3600 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3328 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 3601 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3329 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); 3602 sizeof(struct list_head) * CHAINHASH_SIZE
3603#ifdef CONFIG_PROVE_LOCKING
3604 + sizeof(struct circular_queue)
3605#endif
3606 ) / 1024
3607 );
3330 3608
3331 printk(" per task-struct memory footprint: %lu bytes\n", 3609 printk(" per task-struct memory footprint: %lu bytes\n",
3332 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 3610 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac3a0d7..a2ee95ad1313 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
91extern unsigned int max_lockdep_depth; 91extern unsigned int max_lockdep_depth;
92extern unsigned int max_recursion_depth; 92extern unsigned int max_recursion_depth;
93 93
94extern unsigned int max_bfs_queue_depth;
95
94#ifdef CONFIG_PROVE_LOCKING 96#ifdef CONFIG_PROVE_LOCKING
95extern unsigned long lockdep_count_forward_deps(struct lock_class *); 97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
96extern unsigned long lockdep_count_backward_deps(struct lock_class *); 98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index e94caa666dba..d4b3dbc79fdb 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -25,38 +25,12 @@
25 25
26static void *l_next(struct seq_file *m, void *v, loff_t *pos) 26static void *l_next(struct seq_file *m, void *v, loff_t *pos)
27{ 27{
28 struct lock_class *class; 28 return seq_list_next(v, &all_lock_classes, pos);
29
30 (*pos)++;
31
32 if (v == SEQ_START_TOKEN)
33 class = m->private;
34 else {
35 class = v;
36
37 if (class->lock_entry.next != &all_lock_classes)
38 class = list_entry(class->lock_entry.next,
39 struct lock_class, lock_entry);
40 else
41 class = NULL;
42 }
43
44 return class;
45} 29}
46 30
47static void *l_start(struct seq_file *m, loff_t *pos) 31static void *l_start(struct seq_file *m, loff_t *pos)
48{ 32{
49 struct lock_class *class; 33 return seq_list_start_head(&all_lock_classes, *pos);
50 loff_t i = 0;
51
52 if (*pos == 0)
53 return SEQ_START_TOKEN;
54
55 list_for_each_entry(class, &all_lock_classes, lock_entry) {
56 if (++i == *pos)
57 return class;
58 }
59 return NULL;
60} 34}
61 35
62static void l_stop(struct seq_file *m, void *v) 36static void l_stop(struct seq_file *m, void *v)
@@ -82,11 +56,11 @@ static void print_name(struct seq_file *m, struct lock_class *class)
82 56
83static int l_show(struct seq_file *m, void *v) 57static int l_show(struct seq_file *m, void *v)
84{ 58{
85 struct lock_class *class = v; 59 struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
86 struct lock_list *entry; 60 struct lock_list *entry;
87 char usage[LOCK_USAGE_CHARS]; 61 char usage[LOCK_USAGE_CHARS];
88 62
89 if (v == SEQ_START_TOKEN) { 63 if (v == &all_lock_classes) {
90 seq_printf(m, "all lock classes:\n"); 64 seq_printf(m, "all lock classes:\n");
91 return 0; 65 return 0;
92 } 66 }
@@ -128,17 +102,7 @@ static const struct seq_operations lockdep_ops = {
128 102
129static int lockdep_open(struct inode *inode, struct file *file) 103static int lockdep_open(struct inode *inode, struct file *file)
130{ 104{
131 int res = seq_open(file, &lockdep_ops); 105 return seq_open(file, &lockdep_ops);
132 if (!res) {
133 struct seq_file *m = file->private_data;
134
135 if (!list_empty(&all_lock_classes))
136 m->private = list_entry(all_lock_classes.next,
137 struct lock_class, lock_entry);
138 else
139 m->private = NULL;
140 }
141 return res;
142} 106}
143 107
144static const struct file_operations proc_lockdep_operations = { 108static const struct file_operations proc_lockdep_operations = {
@@ -149,37 +113,23 @@ static const struct file_operations proc_lockdep_operations = {
149}; 113};
150 114
151#ifdef CONFIG_PROVE_LOCKING 115#ifdef CONFIG_PROVE_LOCKING
152static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
153{
154 struct lock_chain *chain;
155
156 (*pos)++;
157
158 if (v == SEQ_START_TOKEN)
159 chain = m->private;
160 else {
161 chain = v;
162
163 if (*pos < nr_lock_chains)
164 chain = lock_chains + *pos;
165 else
166 chain = NULL;
167 }
168
169 return chain;
170}
171
172static void *lc_start(struct seq_file *m, loff_t *pos) 116static void *lc_start(struct seq_file *m, loff_t *pos)
173{ 117{
174 if (*pos == 0) 118 if (*pos == 0)
175 return SEQ_START_TOKEN; 119 return SEQ_START_TOKEN;
176 120
177 if (*pos < nr_lock_chains) 121 if (*pos - 1 < nr_lock_chains)
178 return lock_chains + *pos; 122 return lock_chains + (*pos - 1);
179 123
180 return NULL; 124 return NULL;
181} 125}
182 126
127static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
128{
129 (*pos)++;
130 return lc_start(m, pos);
131}
132
183static void lc_stop(struct seq_file *m, void *v) 133static void lc_stop(struct seq_file *m, void *v)
184{ 134{
185} 135}
@@ -220,16 +170,7 @@ static const struct seq_operations lockdep_chains_ops = {
220 170
221static int lockdep_chains_open(struct inode *inode, struct file *file) 171static int lockdep_chains_open(struct inode *inode, struct file *file)
222{ 172{
223 int res = seq_open(file, &lockdep_chains_ops); 173 return seq_open(file, &lockdep_chains_ops);
224 if (!res) {
225 struct seq_file *m = file->private_data;
226
227 if (nr_lock_chains)
228 m->private = lock_chains;
229 else
230 m->private = NULL;
231 }
232 return res;
233} 174}
234 175
235static const struct file_operations proc_lockdep_chains_operations = { 176static const struct file_operations proc_lockdep_chains_operations = {
@@ -258,16 +199,10 @@ static void lockdep_stats_debug_show(struct seq_file *m)
258 debug_atomic_read(&chain_lookup_hits)); 199 debug_atomic_read(&chain_lookup_hits));
259 seq_printf(m, " cyclic checks: %11u\n", 200 seq_printf(m, " cyclic checks: %11u\n",
260 debug_atomic_read(&nr_cyclic_checks)); 201 debug_atomic_read(&nr_cyclic_checks));
261 seq_printf(m, " cyclic-check recursions: %11u\n",
262 debug_atomic_read(&nr_cyclic_check_recursions));
263 seq_printf(m, " find-mask forwards checks: %11u\n", 202 seq_printf(m, " find-mask forwards checks: %11u\n",
264 debug_atomic_read(&nr_find_usage_forwards_checks)); 203 debug_atomic_read(&nr_find_usage_forwards_checks));
265 seq_printf(m, " find-mask forwards recursions: %11u\n",
266 debug_atomic_read(&nr_find_usage_forwards_recursions));
267 seq_printf(m, " find-mask backwards checks: %11u\n", 204 seq_printf(m, " find-mask backwards checks: %11u\n",
268 debug_atomic_read(&nr_find_usage_backwards_checks)); 205 debug_atomic_read(&nr_find_usage_backwards_checks));
269 seq_printf(m, " find-mask backwards recursions:%11u\n",
270 debug_atomic_read(&nr_find_usage_backwards_recursions));
271 206
272 seq_printf(m, " hardirq on events: %11u\n", hi1); 207 seq_printf(m, " hardirq on events: %11u\n", hi1);
273 seq_printf(m, " hardirq off events: %11u\n", hi2); 208 seq_printf(m, " hardirq off events: %11u\n", hi2);
@@ -409,8 +344,10 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
409 nr_unused); 344 nr_unused);
410 seq_printf(m, " max locking depth: %11u\n", 345 seq_printf(m, " max locking depth: %11u\n",
411 max_lockdep_depth); 346 max_lockdep_depth);
412 seq_printf(m, " max recursion depth: %11u\n", 347#ifdef CONFIG_PROVE_LOCKING
413 max_recursion_depth); 348 seq_printf(m, " max bfs queue depth: %11u\n",
349 max_bfs_queue_depth);
350#endif
414 lockdep_stats_debug_show(m); 351 lockdep_stats_debug_show(m);
415 seq_printf(m, " debug_locks: %11u\n", 352 seq_printf(m, " debug_locks: %11u\n",
416 debug_locks); 353 debug_locks);
@@ -438,7 +375,6 @@ struct lock_stat_data {
438}; 375};
439 376
440struct lock_stat_seq { 377struct lock_stat_seq {
441 struct lock_stat_data *iter;
442 struct lock_stat_data *iter_end; 378 struct lock_stat_data *iter_end;
443 struct lock_stat_data stats[MAX_LOCKDEP_KEYS]; 379 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
444}; 380};
@@ -626,34 +562,22 @@ static void seq_header(struct seq_file *m)
626static void *ls_start(struct seq_file *m, loff_t *pos) 562static void *ls_start(struct seq_file *m, loff_t *pos)
627{ 563{
628 struct lock_stat_seq *data = m->private; 564 struct lock_stat_seq *data = m->private;
565 struct lock_stat_data *iter;
629 566
630 if (*pos == 0) 567 if (*pos == 0)
631 return SEQ_START_TOKEN; 568 return SEQ_START_TOKEN;
632 569
633 data->iter = data->stats + *pos; 570 iter = data->stats + (*pos - 1);
634 if (data->iter >= data->iter_end) 571 if (iter >= data->iter_end)
635 data->iter = NULL; 572 iter = NULL;
636 573
637 return data->iter; 574 return iter;
638} 575}
639 576
640static void *ls_next(struct seq_file *m, void *v, loff_t *pos) 577static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
641{ 578{
642 struct lock_stat_seq *data = m->private;
643
644 (*pos)++; 579 (*pos)++;
645 580 return ls_start(m, pos);
646 if (v == SEQ_START_TOKEN)
647 data->iter = data->stats;
648 else {
649 data->iter = v;
650 data->iter++;
651 }
652
653 if (data->iter == data->iter_end)
654 data->iter = NULL;
655
656 return data->iter;
657} 581}
658 582
659static void ls_stop(struct seq_file *m, void *v) 583static void ls_stop(struct seq_file *m, void *v)
@@ -691,7 +615,6 @@ static int lock_stat_open(struct inode *inode, struct file *file)
691 struct lock_stat_data *iter = data->stats; 615 struct lock_stat_data *iter = data->stats;
692 struct seq_file *m = file->private_data; 616 struct seq_file *m = file->private_data;
693 617
694 data->iter = iter;
695 list_for_each_entry(class, &all_lock_classes, lock_entry) { 618 list_for_each_entry(class, &all_lock_classes, lock_entry) {
696 iter->class = class; 619 iter->class = class;
697 iter->stats = lock_stats(class); 620 iter->stats = lock_stats(class);
@@ -699,7 +622,7 @@ static int lock_stat_open(struct inode *inode, struct file *file)
699 } 622 }
700 data->iter_end = iter; 623 data->iter_end = iter;
701 624
702 sort(data->stats, data->iter_end - data->iter, 625 sort(data->stats, data->iter_end - data->stats,
703 sizeof(struct lock_stat_data), 626 sizeof(struct lock_stat_data),
704 lock_stat_cmp, NULL); 627 lock_stat_cmp, NULL);
705 628
@@ -734,7 +657,6 @@ static int lock_stat_release(struct inode *inode, struct file *file)
734 struct seq_file *seq = file->private_data; 657 struct seq_file *seq = file->private_data;
735 658
736 vfree(seq->private); 659 vfree(seq->private);
737 seq->private = NULL;
738 return seq_release(inode, file); 660 return seq_release(inode, file);
739} 661}
740 662
diff --git a/kernel/module.c b/kernel/module.c
index 2d537186191f..46580edff0cb 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -55,6 +55,11 @@
55#include <linux/percpu.h> 55#include <linux/percpu.h>
56#include <linux/kmemleak.h> 56#include <linux/kmemleak.h>
57 57
58#define CREATE_TRACE_POINTS
59#include <trace/events/module.h>
60
61EXPORT_TRACEPOINT_SYMBOL(module_get);
62
58#if 0 63#if 0
59#define DEBUGP printk 64#define DEBUGP printk
60#else 65#else
@@ -942,6 +947,8 @@ void module_put(struct module *module)
942 if (module) { 947 if (module) {
943 unsigned int cpu = get_cpu(); 948 unsigned int cpu = get_cpu();
944 local_dec(__module_ref_addr(module, cpu)); 949 local_dec(__module_ref_addr(module, cpu));
950 trace_module_put(module, _RET_IP_,
951 local_read(__module_ref_addr(module, cpu)));
945 /* Maybe they're waiting for us to drop reference? */ 952 /* Maybe they're waiting for us to drop reference? */
946 if (unlikely(!module_is_live(module))) 953 if (unlikely(!module_is_live(module)))
947 wake_up_process(module->waiter); 954 wake_up_process(module->waiter);
@@ -1497,6 +1504,8 @@ static int __unlink_module(void *_mod)
1497/* Free a module, remove from lists, etc (must hold module_mutex). */ 1504/* Free a module, remove from lists, etc (must hold module_mutex). */
1498static void free_module(struct module *mod) 1505static void free_module(struct module *mod)
1499{ 1506{
1507 trace_module_free(mod);
1508
1500 /* Delete from various lists */ 1509 /* Delete from various lists */
1501 stop_machine(__unlink_module, mod, NULL); 1510 stop_machine(__unlink_module, mod, NULL);
1502 remove_notes_attrs(mod); 1511 remove_notes_attrs(mod);
@@ -2364,6 +2373,8 @@ static noinline struct module *load_module(void __user *umod,
2364 /* Get rid of temporary copy */ 2373 /* Get rid of temporary copy */
2365 vfree(hdr); 2374 vfree(hdr);
2366 2375
2376 trace_module_load(mod);
2377
2367 /* Done! */ 2378 /* Done! */
2368 return mod; 2379 return mod;
2369 2380
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index f274e1959885..e0d91fdf0c3c 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -46,11 +46,17 @@ static atomic_t nr_task_counters __read_mostly;
46 46
47/* 47/*
48 * perf counter paranoia level: 48 * perf counter paranoia level:
49 * 0 - not paranoid 49 * -1 - not paranoid at all
50 * 1 - disallow cpu counters to unpriv 50 * 0 - disallow raw tracepoint access for unpriv
51 * 2 - disallow kernel profiling to unpriv 51 * 1 - disallow cpu counters for unpriv
52 * 2 - disallow kernel profiling for unpriv
52 */ 53 */
53int sysctl_perf_counter_paranoid __read_mostly; 54int sysctl_perf_counter_paranoid __read_mostly = 1;
55
56static inline bool perf_paranoid_tracepoint_raw(void)
57{
58 return sysctl_perf_counter_paranoid > -1;
59}
54 60
55static inline bool perf_paranoid_cpu(void) 61static inline bool perf_paranoid_cpu(void)
56{ 62{
@@ -469,7 +475,8 @@ static void update_counter_times(struct perf_counter *counter)
469 struct perf_counter_context *ctx = counter->ctx; 475 struct perf_counter_context *ctx = counter->ctx;
470 u64 run_end; 476 u64 run_end;
471 477
472 if (counter->state < PERF_COUNTER_STATE_INACTIVE) 478 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
479 counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE)
473 return; 480 return;
474 481
475 counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 482 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
@@ -518,7 +525,7 @@ static void __perf_counter_disable(void *info)
518 */ 525 */
519 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 526 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
520 update_context_time(ctx); 527 update_context_time(ctx);
521 update_counter_times(counter); 528 update_group_times(counter);
522 if (counter == counter->group_leader) 529 if (counter == counter->group_leader)
523 group_sched_out(counter, cpuctx, ctx); 530 group_sched_out(counter, cpuctx, ctx);
524 else 531 else
@@ -573,7 +580,7 @@ static void perf_counter_disable(struct perf_counter *counter)
573 * in, so we can change the state safely. 580 * in, so we can change the state safely.
574 */ 581 */
575 if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 582 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
576 update_counter_times(counter); 583 update_group_times(counter);
577 counter->state = PERF_COUNTER_STATE_OFF; 584 counter->state = PERF_COUNTER_STATE_OFF;
578 } 585 }
579 586
@@ -851,6 +858,27 @@ retry:
851} 858}
852 859
853/* 860/*
861 * Put a counter into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty.
867 */
868static void __perf_counter_mark_enabled(struct perf_counter *counter,
869 struct perf_counter_context *ctx)
870{
871 struct perf_counter *sub;
872
873 counter->state = PERF_COUNTER_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
877 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled;
879}
880
881/*
854 * Cross CPU call to enable a performance counter 882 * Cross CPU call to enable a performance counter
855 */ 883 */
856static void __perf_counter_enable(void *info) 884static void __perf_counter_enable(void *info)
@@ -877,8 +905,7 @@ static void __perf_counter_enable(void *info)
877 905
878 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 906 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
879 goto unlock; 907 goto unlock;
880 counter->state = PERF_COUNTER_STATE_INACTIVE; 908 __perf_counter_mark_enabled(counter, ctx);
881 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
882 909
883 /* 910 /*
884 * If the counter is in a group and isn't the group leader, 911 * If the counter is in a group and isn't the group leader,
@@ -971,11 +998,9 @@ static void perf_counter_enable(struct perf_counter *counter)
971 * Since we have the lock this context can't be scheduled 998 * Since we have the lock this context can't be scheduled
972 * in, so we can change the state safely. 999 * in, so we can change the state safely.
973 */ 1000 */
974 if (counter->state == PERF_COUNTER_STATE_OFF) { 1001 if (counter->state == PERF_COUNTER_STATE_OFF)
975 counter->state = PERF_COUNTER_STATE_INACTIVE; 1002 __perf_counter_mark_enabled(counter, ctx);
976 counter->tstamp_enabled = 1003
977 ctx->time - counter->total_time_enabled;
978 }
979 out: 1004 out:
980 spin_unlock_irq(&ctx->lock); 1005 spin_unlock_irq(&ctx->lock);
981} 1006}
@@ -1479,9 +1504,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1479 counter->attr.enable_on_exec = 0; 1504 counter->attr.enable_on_exec = 0;
1480 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1505 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1481 continue; 1506 continue;
1482 counter->state = PERF_COUNTER_STATE_INACTIVE; 1507 __perf_counter_mark_enabled(counter, ctx);
1483 counter->tstamp_enabled =
1484 ctx->time - counter->total_time_enabled;
1485 enabled = 1; 1508 enabled = 1;
1486 } 1509 }
1487 1510
@@ -1675,6 +1698,11 @@ static void free_counter(struct perf_counter *counter)
1675 atomic_dec(&nr_task_counters); 1698 atomic_dec(&nr_task_counters);
1676 } 1699 }
1677 1700
1701 if (counter->output) {
1702 fput(counter->output->filp);
1703 counter->output = NULL;
1704 }
1705
1678 if (counter->destroy) 1706 if (counter->destroy)
1679 counter->destroy(counter); 1707 counter->destroy(counter);
1680 1708
@@ -1960,6 +1988,8 @@ unlock:
1960 return ret; 1988 return ret;
1961} 1989}
1962 1990
1991int perf_counter_set_output(struct perf_counter *counter, int output_fd);
1992
1963static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1993static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1964{ 1994{
1965 struct perf_counter *counter = file->private_data; 1995 struct perf_counter *counter = file->private_data;
@@ -1983,6 +2013,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1983 case PERF_COUNTER_IOC_PERIOD: 2013 case PERF_COUNTER_IOC_PERIOD:
1984 return perf_counter_period(counter, (u64 __user *)arg); 2014 return perf_counter_period(counter, (u64 __user *)arg);
1985 2015
2016 case PERF_COUNTER_IOC_SET_OUTPUT:
2017 return perf_counter_set_output(counter, arg);
2018
1986 default: 2019 default:
1987 return -ENOTTY; 2020 return -ENOTTY;
1988 } 2021 }
@@ -2253,6 +2286,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2253 2286
2254 WARN_ON_ONCE(counter->ctx->parent_ctx); 2287 WARN_ON_ONCE(counter->ctx->parent_ctx);
2255 mutex_lock(&counter->mmap_mutex); 2288 mutex_lock(&counter->mmap_mutex);
2289 if (counter->output) {
2290 ret = -EINVAL;
2291 goto unlock;
2292 }
2293
2256 if (atomic_inc_not_zero(&counter->mmap_count)) { 2294 if (atomic_inc_not_zero(&counter->mmap_count)) {
2257 if (nr_pages != counter->data->nr_pages) 2295 if (nr_pages != counter->data->nr_pages)
2258 ret = -EINVAL; 2296 ret = -EINVAL;
@@ -2638,6 +2676,7 @@ static int perf_output_begin(struct perf_output_handle *handle,
2638 struct perf_counter *counter, unsigned int size, 2676 struct perf_counter *counter, unsigned int size,
2639 int nmi, int sample) 2677 int nmi, int sample)
2640{ 2678{
2679 struct perf_counter *output_counter;
2641 struct perf_mmap_data *data; 2680 struct perf_mmap_data *data;
2642 unsigned int offset, head; 2681 unsigned int offset, head;
2643 int have_lost; 2682 int have_lost;
@@ -2647,13 +2686,17 @@ static int perf_output_begin(struct perf_output_handle *handle,
2647 u64 lost; 2686 u64 lost;
2648 } lost_event; 2687 } lost_event;
2649 2688
2689 rcu_read_lock();
2650 /* 2690 /*
2651 * For inherited counters we send all the output towards the parent. 2691 * For inherited counters we send all the output towards the parent.
2652 */ 2692 */
2653 if (counter->parent) 2693 if (counter->parent)
2654 counter = counter->parent; 2694 counter = counter->parent;
2655 2695
2656 rcu_read_lock(); 2696 output_counter = rcu_dereference(counter->output);
2697 if (output_counter)
2698 counter = output_counter;
2699
2657 data = rcu_dereference(counter->data); 2700 data = rcu_dereference(counter->data);
2658 if (!data) 2701 if (!data)
2659 goto out; 2702 goto out;
@@ -3934,6 +3977,7 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3934 * have these. 3977 * have these.
3935 */ 3978 */
3936 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 3979 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3980 perf_paranoid_tracepoint_raw() &&
3937 !capable(CAP_SYS_ADMIN)) 3981 !capable(CAP_SYS_ADMIN))
3938 return ERR_PTR(-EPERM); 3982 return ERR_PTR(-EPERM);
3939 3983
@@ -4066,6 +4110,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4066 hwc->sample_period = attr->sample_period; 4110 hwc->sample_period = attr->sample_period;
4067 if (attr->freq && attr->sample_freq) 4111 if (attr->freq && attr->sample_freq)
4068 hwc->sample_period = 1; 4112 hwc->sample_period = 1;
4113 hwc->last_period = hwc->sample_period;
4069 4114
4070 atomic64_set(&hwc->period_left, hwc->sample_period); 4115 atomic64_set(&hwc->period_left, hwc->sample_period);
4071 4116
@@ -4201,6 +4246,57 @@ err_size:
4201 goto out; 4246 goto out;
4202} 4247}
4203 4248
4249int perf_counter_set_output(struct perf_counter *counter, int output_fd)
4250{
4251 struct perf_counter *output_counter = NULL;
4252 struct file *output_file = NULL;
4253 struct perf_counter *old_output;
4254 int fput_needed = 0;
4255 int ret = -EINVAL;
4256
4257 if (!output_fd)
4258 goto set;
4259
4260 output_file = fget_light(output_fd, &fput_needed);
4261 if (!output_file)
4262 return -EBADF;
4263
4264 if (output_file->f_op != &perf_fops)
4265 goto out;
4266
4267 output_counter = output_file->private_data;
4268
4269 /* Don't chain output fds */
4270 if (output_counter->output)
4271 goto out;
4272
4273 /* Don't set an output fd when we already have an output channel */
4274 if (counter->data)
4275 goto out;
4276
4277 atomic_long_inc(&output_file->f_count);
4278
4279set:
4280 mutex_lock(&counter->mmap_mutex);
4281 old_output = counter->output;
4282 rcu_assign_pointer(counter->output, output_counter);
4283 mutex_unlock(&counter->mmap_mutex);
4284
4285 if (old_output) {
4286 /*
4287 * we need to make sure no existing perf_output_*()
4288 * is still referencing this counter.
4289 */
4290 synchronize_rcu();
4291 fput(old_output->filp);
4292 }
4293
4294 ret = 0;
4295out:
4296 fput_light(output_file, fput_needed);
4297 return ret;
4298}
4299
4204/** 4300/**
4205 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4301 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
4206 * 4302 *
@@ -4220,15 +4316,15 @@ SYSCALL_DEFINE5(perf_counter_open,
4220 struct file *group_file = NULL; 4316 struct file *group_file = NULL;
4221 int fput_needed = 0; 4317 int fput_needed = 0;
4222 int fput_needed2 = 0; 4318 int fput_needed2 = 0;
4223 int ret; 4319 int err;
4224 4320
4225 /* for future expandability... */ 4321 /* for future expandability... */
4226 if (flags) 4322 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4227 return -EINVAL; 4323 return -EINVAL;
4228 4324
4229 ret = perf_copy_attr(attr_uptr, &attr); 4325 err = perf_copy_attr(attr_uptr, &attr);
4230 if (ret) 4326 if (err)
4231 return ret; 4327 return err;
4232 4328
4233 if (!attr.exclude_kernel) { 4329 if (!attr.exclude_kernel) {
4234 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 4330 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
@@ -4251,8 +4347,8 @@ SYSCALL_DEFINE5(perf_counter_open,
4251 * Look up the group leader (we will attach this counter to it): 4347 * Look up the group leader (we will attach this counter to it):
4252 */ 4348 */
4253 group_leader = NULL; 4349 group_leader = NULL;
4254 if (group_fd != -1) { 4350 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4255 ret = -EINVAL; 4351 err = -EINVAL;
4256 group_file = fget_light(group_fd, &fput_needed); 4352 group_file = fget_light(group_fd, &fput_needed);
4257 if (!group_file) 4353 if (!group_file)
4258 goto err_put_context; 4354 goto err_put_context;
@@ -4281,18 +4377,24 @@ SYSCALL_DEFINE5(perf_counter_open,
4281 4377
4282 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4378 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
4283 NULL, GFP_KERNEL); 4379 NULL, GFP_KERNEL);
4284 ret = PTR_ERR(counter); 4380 err = PTR_ERR(counter);
4285 if (IS_ERR(counter)) 4381 if (IS_ERR(counter))
4286 goto err_put_context; 4382 goto err_put_context;
4287 4383
4288 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4384 err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4289 if (ret < 0) 4385 if (err < 0)
4290 goto err_free_put_context; 4386 goto err_free_put_context;
4291 4387
4292 counter_file = fget_light(ret, &fput_needed2); 4388 counter_file = fget_light(err, &fput_needed2);
4293 if (!counter_file) 4389 if (!counter_file)
4294 goto err_free_put_context; 4390 goto err_free_put_context;
4295 4391
4392 if (flags & PERF_FLAG_FD_OUTPUT) {
4393 err = perf_counter_set_output(counter, group_fd);
4394 if (err)
4395 goto err_fput_free_put_context;
4396 }
4397
4296 counter->filp = counter_file; 4398 counter->filp = counter_file;
4297 WARN_ON_ONCE(ctx->parent_ctx); 4399 WARN_ON_ONCE(ctx->parent_ctx);
4298 mutex_lock(&ctx->mutex); 4400 mutex_lock(&ctx->mutex);
@@ -4306,20 +4408,20 @@ SYSCALL_DEFINE5(perf_counter_open,
4306 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4408 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
4307 mutex_unlock(&current->perf_counter_mutex); 4409 mutex_unlock(&current->perf_counter_mutex);
4308 4410
4411err_fput_free_put_context:
4309 fput_light(counter_file, fput_needed2); 4412 fput_light(counter_file, fput_needed2);
4310 4413
4311out_fput:
4312 fput_light(group_file, fput_needed);
4313
4314 return ret;
4315
4316err_free_put_context: 4414err_free_put_context:
4317 kfree(counter); 4415 if (err < 0)
4416 kfree(counter);
4318 4417
4319err_put_context: 4418err_put_context:
4320 put_ctx(ctx); 4419 if (err < 0)
4420 put_ctx(ctx);
4421
4422 fput_light(group_file, fput_needed);
4321 4423
4322 goto out_fput; 4424 return err;
4323} 4425}
4324 4426
4325/* 4427/*
diff --git a/kernel/printk.c b/kernel/printk.c
index b4d97b54c1ec..e10d193a833a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -37,6 +37,12 @@
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39/* 39/*
40 * for_each_console() allows you to iterate on each console
41 */
42#define for_each_console(con) \
43 for (con = console_drivers; con != NULL; con = con->next)
44
45/*
40 * Architectures can override it: 46 * Architectures can override it:
41 */ 47 */
42void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) 48void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -61,6 +67,8 @@ int console_printk[4] = {
61 DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */ 67 DEFAULT_CONSOLE_LOGLEVEL, /* default_console_loglevel */
62}; 68};
63 69
70static int saved_console_loglevel = -1;
71
64/* 72/*
65 * Low level drivers may need that to know if they can schedule in 73 * Low level drivers may need that to know if they can schedule in
66 * their unblank() callback or not. So let's export it. 74 * their unblank() callback or not. So let's export it.
@@ -372,10 +380,15 @@ int do_syslog(int type, char __user *buf, int len)
372 logged_chars = 0; 380 logged_chars = 0;
373 break; 381 break;
374 case 6: /* Disable logging to console */ 382 case 6: /* Disable logging to console */
383 if (saved_console_loglevel == -1)
384 saved_console_loglevel = console_loglevel;
375 console_loglevel = minimum_console_loglevel; 385 console_loglevel = minimum_console_loglevel;
376 break; 386 break;
377 case 7: /* Enable logging to console */ 387 case 7: /* Enable logging to console */
378 console_loglevel = default_console_loglevel; 388 if (saved_console_loglevel != -1) {
389 console_loglevel = saved_console_loglevel;
390 saved_console_loglevel = -1;
391 }
379 break; 392 break;
380 case 8: /* Set level of messages printed to console */ 393 case 8: /* Set level of messages printed to console */
381 error = -EINVAL; 394 error = -EINVAL;
@@ -384,6 +397,8 @@ int do_syslog(int type, char __user *buf, int len)
384 if (len < minimum_console_loglevel) 397 if (len < minimum_console_loglevel)
385 len = minimum_console_loglevel; 398 len = minimum_console_loglevel;
386 console_loglevel = len; 399 console_loglevel = len;
400 /* Implicitly re-enable logging to console */
401 saved_console_loglevel = -1;
387 error = 0; 402 error = 0;
388 break; 403 break;
389 case 9: /* Number of chars in the log buffer */ 404 case 9: /* Number of chars in the log buffer */
@@ -412,7 +427,7 @@ static void __call_console_drivers(unsigned start, unsigned end)
412{ 427{
413 struct console *con; 428 struct console *con;
414 429
415 for (con = console_drivers; con; con = con->next) { 430 for_each_console(con) {
416 if ((con->flags & CON_ENABLED) && con->write && 431 if ((con->flags & CON_ENABLED) && con->write &&
417 (cpu_online(smp_processor_id()) || 432 (cpu_online(smp_processor_id()) ||
418 (con->flags & CON_ANYTIME))) 433 (con->flags & CON_ANYTIME)))
@@ -544,7 +559,7 @@ static int have_callable_console(void)
544{ 559{
545 struct console *con; 560 struct console *con;
546 561
547 for (con = console_drivers; con; con = con->next) 562 for_each_console(con)
548 if (con->flags & CON_ANYTIME) 563 if (con->flags & CON_ANYTIME)
549 return 1; 564 return 1;
550 565
@@ -1082,7 +1097,7 @@ void console_unblank(void)
1082 1097
1083 console_locked = 1; 1098 console_locked = 1;
1084 console_may_schedule = 0; 1099 console_may_schedule = 0;
1085 for (c = console_drivers; c != NULL; c = c->next) 1100 for_each_console(c)
1086 if ((c->flags & CON_ENABLED) && c->unblank) 1101 if ((c->flags & CON_ENABLED) && c->unblank)
1087 c->unblank(); 1102 c->unblank();
1088 release_console_sem(); 1103 release_console_sem();
@@ -1097,7 +1112,7 @@ struct tty_driver *console_device(int *index)
1097 struct tty_driver *driver = NULL; 1112 struct tty_driver *driver = NULL;
1098 1113
1099 acquire_console_sem(); 1114 acquire_console_sem();
1100 for (c = console_drivers; c != NULL; c = c->next) { 1115 for_each_console(c) {
1101 if (!c->device) 1116 if (!c->device)
1102 continue; 1117 continue;
1103 driver = c->device(c, index); 1118 driver = c->device(c, index);
@@ -1134,25 +1149,49 @@ EXPORT_SYMBOL(console_start);
1134 * to register the console printing procedure with printk() and to 1149 * to register the console printing procedure with printk() and to
1135 * print any messages that were printed by the kernel before the 1150 * print any messages that were printed by the kernel before the
1136 * console driver was initialized. 1151 * console driver was initialized.
1152 *
1153 * This can happen pretty early during the boot process (because of
1154 * early_printk) - sometimes before setup_arch() completes - be careful
1155 * of what kernel features are used - they may not be initialised yet.
1156 *
1157 * There are two types of consoles - bootconsoles (early_printk) and
1158 * "real" consoles (everything which is not a bootconsole) which are
1159 * handled differently.
1160 * - Any number of bootconsoles can be registered at any time.
1161 * - As soon as a "real" console is registered, all bootconsoles
1162 * will be unregistered automatically.
1163 * - Once a "real" console is registered, any attempt to register a
1164 * bootconsoles will be rejected
1137 */ 1165 */
1138void register_console(struct console *console) 1166void register_console(struct console *newcon)
1139{ 1167{
1140 int i; 1168 int i;
1141 unsigned long flags; 1169 unsigned long flags;
1142 struct console *bootconsole = NULL; 1170 struct console *bcon = NULL;
1143 1171
1144 if (console_drivers) { 1172 /*
1145 if (console->flags & CON_BOOT) 1173 * before we register a new CON_BOOT console, make sure we don't
1146 return; 1174 * already have a valid console
1147 if (console_drivers->flags & CON_BOOT) 1175 */
1148 bootconsole = console_drivers; 1176 if (console_drivers && newcon->flags & CON_BOOT) {
1177 /* find the last or real console */
1178 for_each_console(bcon) {
1179 if (!(bcon->flags & CON_BOOT)) {
1180 printk(KERN_INFO "Too late to register bootconsole %s%d\n",
1181 newcon->name, newcon->index);
1182 return;
1183 }
1184 }
1149 } 1185 }
1150 1186
1151 if (preferred_console < 0 || bootconsole || !console_drivers) 1187 if (console_drivers && console_drivers->flags & CON_BOOT)
1188 bcon = console_drivers;
1189
1190 if (preferred_console < 0 || bcon || !console_drivers)
1152 preferred_console = selected_console; 1191 preferred_console = selected_console;
1153 1192
1154 if (console->early_setup) 1193 if (newcon->early_setup)
1155 console->early_setup(); 1194 newcon->early_setup();
1156 1195
1157 /* 1196 /*
1158 * See if we want to use this console driver. If we 1197 * See if we want to use this console driver. If we
@@ -1160,13 +1199,13 @@ void register_console(struct console *console)
1160 * that registers here. 1199 * that registers here.
1161 */ 1200 */
1162 if (preferred_console < 0) { 1201 if (preferred_console < 0) {
1163 if (console->index < 0) 1202 if (newcon->index < 0)
1164 console->index = 0; 1203 newcon->index = 0;
1165 if (console->setup == NULL || 1204 if (newcon->setup == NULL ||
1166 console->setup(console, NULL) == 0) { 1205 newcon->setup(newcon, NULL) == 0) {
1167 console->flags |= CON_ENABLED; 1206 newcon->flags |= CON_ENABLED;
1168 if (console->device) { 1207 if (newcon->device) {
1169 console->flags |= CON_CONSDEV; 1208 newcon->flags |= CON_CONSDEV;
1170 preferred_console = 0; 1209 preferred_console = 0;
1171 } 1210 }
1172 } 1211 }
@@ -1178,64 +1217,62 @@ void register_console(struct console *console)
1178 */ 1217 */
1179 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; 1218 for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
1180 i++) { 1219 i++) {
1181 if (strcmp(console_cmdline[i].name, console->name) != 0) 1220 if (strcmp(console_cmdline[i].name, newcon->name) != 0)
1182 continue; 1221 continue;
1183 if (console->index >= 0 && 1222 if (newcon->index >= 0 &&
1184 console->index != console_cmdline[i].index) 1223 newcon->index != console_cmdline[i].index)
1185 continue; 1224 continue;
1186 if (console->index < 0) 1225 if (newcon->index < 0)
1187 console->index = console_cmdline[i].index; 1226 newcon->index = console_cmdline[i].index;
1188#ifdef CONFIG_A11Y_BRAILLE_CONSOLE 1227#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
1189 if (console_cmdline[i].brl_options) { 1228 if (console_cmdline[i].brl_options) {
1190 console->flags |= CON_BRL; 1229 newcon->flags |= CON_BRL;
1191 braille_register_console(console, 1230 braille_register_console(newcon,
1192 console_cmdline[i].index, 1231 console_cmdline[i].index,
1193 console_cmdline[i].options, 1232 console_cmdline[i].options,
1194 console_cmdline[i].brl_options); 1233 console_cmdline[i].brl_options);
1195 return; 1234 return;
1196 } 1235 }
1197#endif 1236#endif
1198 if (console->setup && 1237 if (newcon->setup &&
1199 console->setup(console, console_cmdline[i].options) != 0) 1238 newcon->setup(newcon, console_cmdline[i].options) != 0)
1200 break; 1239 break;
1201 console->flags |= CON_ENABLED; 1240 newcon->flags |= CON_ENABLED;
1202 console->index = console_cmdline[i].index; 1241 newcon->index = console_cmdline[i].index;
1203 if (i == selected_console) { 1242 if (i == selected_console) {
1204 console->flags |= CON_CONSDEV; 1243 newcon->flags |= CON_CONSDEV;
1205 preferred_console = selected_console; 1244 preferred_console = selected_console;
1206 } 1245 }
1207 break; 1246 break;
1208 } 1247 }
1209 1248
1210 if (!(console->flags & CON_ENABLED)) 1249 if (!(newcon->flags & CON_ENABLED))
1211 return; 1250 return;
1212 1251
1213 if (bootconsole && (console->flags & CON_CONSDEV)) { 1252 /*
1214 printk(KERN_INFO "console handover: boot [%s%d] -> real [%s%d]\n", 1253 * If we have a bootconsole, and are switching to a real console,
1215 bootconsole->name, bootconsole->index, 1254 * don't print everything out again, since when the boot console, and
1216 console->name, console->index); 1255 * the real console are the same physical device, it's annoying to
1217 unregister_console(bootconsole); 1256 * see the beginning boot messages twice
1218 console->flags &= ~CON_PRINTBUFFER; 1257 */
1219 } else { 1258 if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
1220 printk(KERN_INFO "console [%s%d] enabled\n", 1259 newcon->flags &= ~CON_PRINTBUFFER;
1221 console->name, console->index);
1222 }
1223 1260
1224 /* 1261 /*
1225 * Put this console in the list - keep the 1262 * Put this console in the list - keep the
1226 * preferred driver at the head of the list. 1263 * preferred driver at the head of the list.
1227 */ 1264 */
1228 acquire_console_sem(); 1265 acquire_console_sem();
1229 if ((console->flags & CON_CONSDEV) || console_drivers == NULL) { 1266 if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
1230 console->next = console_drivers; 1267 newcon->next = console_drivers;
1231 console_drivers = console; 1268 console_drivers = newcon;
1232 if (console->next) 1269 if (newcon->next)
1233 console->next->flags &= ~CON_CONSDEV; 1270 newcon->next->flags &= ~CON_CONSDEV;
1234 } else { 1271 } else {
1235 console->next = console_drivers->next; 1272 newcon->next = console_drivers->next;
1236 console_drivers->next = console; 1273 console_drivers->next = newcon;
1237 } 1274 }
1238 if (console->flags & CON_PRINTBUFFER) { 1275 if (newcon->flags & CON_PRINTBUFFER) {
1239 /* 1276 /*
1240 * release_console_sem() will print out the buffered messages 1277 * release_console_sem() will print out the buffered messages
1241 * for us. 1278 * for us.
@@ -1245,6 +1282,28 @@ void register_console(struct console *console)
1245 spin_unlock_irqrestore(&logbuf_lock, flags); 1282 spin_unlock_irqrestore(&logbuf_lock, flags);
1246 } 1283 }
1247 release_console_sem(); 1284 release_console_sem();
1285
1286 /*
1287 * By unregistering the bootconsoles after we enable the real console
1288 * we get the "console xxx enabled" message on all the consoles -
1289 * boot consoles, real consoles, etc - this is to ensure that end
1290 * users know there might be something in the kernel's log buffer that
1291 * went to the bootconsole (that they do not see on the real console)
1292 */
1293 if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
1294 /* we need to iterate through twice, to make sure we print
1295 * everything out, before we unregister the console(s)
1296 */
1297 printk(KERN_INFO "console [%s%d] enabled, bootconsole disabled\n",
1298 newcon->name, newcon->index);
1299 for_each_console(bcon)
1300 if (bcon->flags & CON_BOOT)
1301 unregister_console(bcon);
1302 } else {
1303 printk(KERN_INFO "%sconsole [%s%d] enabled\n",
1304 (newcon->flags & CON_BOOT) ? "boot" : "" ,
1305 newcon->name, newcon->index);
1306 }
1248} 1307}
1249EXPORT_SYMBOL(register_console); 1308EXPORT_SYMBOL(register_console);
1250 1309
@@ -1287,11 +1346,13 @@ EXPORT_SYMBOL(unregister_console);
1287 1346
1288static int __init disable_boot_consoles(void) 1347static int __init disable_boot_consoles(void)
1289{ 1348{
1290 if (console_drivers != NULL) { 1349 struct console *con;
1291 if (console_drivers->flags & CON_BOOT) { 1350
1351 for_each_console(con) {
1352 if (con->flags & CON_BOOT) {
1292 printk(KERN_INFO "turn off boot console %s%d\n", 1353 printk(KERN_INFO "turn off boot console %s%d\n",
1293 console_drivers->name, console_drivers->index); 1354 con->name, con->index);
1294 return unregister_console(console_drivers); 1355 unregister_console(con);
1295 } 1356 }
1296 } 1357 }
1297 return 0; 1358 return 0;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 082c320e4dbf..307c285af59e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -152,7 +152,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
152 if (!dumpable && !capable(CAP_SYS_PTRACE)) 152 if (!dumpable && !capable(CAP_SYS_PTRACE))
153 return -EPERM; 153 return -EPERM;
154 154
155 return security_ptrace_may_access(task, mode); 155 return security_ptrace_access_check(task, mode);
156} 156}
157 157
158bool ptrace_may_access(struct task_struct *task, unsigned int mode) 158bool ptrace_may_access(struct task_struct *task, unsigned int mode)
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
deleted file mode 100644
index 0f2b0b311304..000000000000
--- a/kernel/rcuclassic.c
+++ /dev/null
@@ -1,807 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * Documentation/RCU
31 *
32 */
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/init.h>
36#include <linux/spinlock.h>
37#include <linux/smp.h>
38#include <linux/rcupdate.h>
39#include <linux/interrupt.h>
40#include <linux/sched.h>
41#include <asm/atomic.h>
42#include <linux/bitops.h>
43#include <linux/module.h>
44#include <linux/completion.h>
45#include <linux/moduleparam.h>
46#include <linux/percpu.h>
47#include <linux/notifier.h>
48#include <linux/cpu.h>
49#include <linux/mutex.h>
50#include <linux/time.h>
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59
60/* Definition for rcupdate control block. */
61static struct rcu_ctrlblk rcu_ctrlblk = {
62 .cur = -300,
63 .completed = -300,
64 .pending = -300,
65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 .cpumask = CPU_BITS_NONE,
67};
68
69static struct rcu_ctrlblk rcu_bh_ctrlblk = {
70 .cur = -300,
71 .completed = -300,
72 .pending = -300,
73 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
74 .cpumask = CPU_BITS_NONE,
75};
76
77static DEFINE_PER_CPU(struct rcu_data, rcu_data);
78static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
79
80/*
81 * Increment the quiescent state counter.
82 * The counter is a bit degenerated: We do not need to know
83 * how many quiescent states passed, just if there was at least
84 * one since the start of the grace period. Thus just a flag.
85 */
86void rcu_qsctr_inc(int cpu)
87{
88 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
89 rdp->passed_quiesc = 1;
90}
91
92void rcu_bh_qsctr_inc(int cpu)
93{
94 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
95 rdp->passed_quiesc = 1;
96}
97
98static int blimit = 10;
99static int qhimark = 10000;
100static int qlowmark = 100;
101
102#ifdef CONFIG_SMP
103static void force_quiescent_state(struct rcu_data *rdp,
104 struct rcu_ctrlblk *rcp)
105{
106 int cpu;
107 unsigned long flags;
108
109 set_need_resched();
110 spin_lock_irqsave(&rcp->lock, flags);
111 if (unlikely(!rcp->signaled)) {
112 rcp->signaled = 1;
113 /*
114 * Don't send IPI to itself. With irqs disabled,
115 * rdp->cpu is the current cpu.
116 *
117 * cpu_online_mask is updated by the _cpu_down()
118 * using __stop_machine(). Since we're in irqs disabled
119 * section, __stop_machine() is not exectuting, hence
120 * the cpu_online_mask is stable.
121 *
122 * However, a cpu might have been offlined _just_ before
123 * we disabled irqs while entering here.
124 * And rcu subsystem might not yet have handled the CPU_DEAD
125 * notification, leading to the offlined cpu's bit
126 * being set in the rcp->cpumask.
127 *
128 * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
129 * sending smp_reschedule() to an offlined CPU.
130 */
131 for_each_cpu_and(cpu,
132 to_cpumask(rcp->cpumask), cpu_online_mask) {
133 if (cpu != rdp->cpu)
134 smp_send_reschedule(cpu);
135 }
136 }
137 spin_unlock_irqrestore(&rcp->lock, flags);
138}
139#else
140static inline void force_quiescent_state(struct rcu_data *rdp,
141 struct rcu_ctrlblk *rcp)
142{
143 set_need_resched();
144}
145#endif
146
147static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
148 struct rcu_data *rdp)
149{
150 long batch;
151
152 head->next = NULL;
153 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
154
155 /*
156 * Determine the batch number of this callback.
157 *
158 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
159 * local variable "batch" and emits codes like this:
160 * 1) rdp->batch = rcp->cur + 1 # gets old value
161 * ......
162 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
163 * then [*nxttail[0], *nxttail[1]) may contain callbacks
164 * that batch# = rdp->batch, see the comment of struct rcu_data.
165 */
166 batch = ACCESS_ONCE(rcp->cur) + 1;
167
168 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
169 /* process callbacks */
170 rdp->nxttail[0] = rdp->nxttail[1];
171 rdp->nxttail[1] = rdp->nxttail[2];
172 if (rcu_batch_after(batch - 1, rdp->batch))
173 rdp->nxttail[0] = rdp->nxttail[2];
174 }
175
176 rdp->batch = batch;
177 *rdp->nxttail[2] = head;
178 rdp->nxttail[2] = &head->next;
179
180 if (unlikely(++rdp->qlen > qhimark)) {
181 rdp->blimit = INT_MAX;
182 force_quiescent_state(rdp, &rcu_ctrlblk);
183 }
184}
185
186#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
187
188static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
189{
190 rcp->gp_start = jiffies;
191 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
192}
193
194static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
195{
196 int cpu;
197 long delta;
198 unsigned long flags;
199
200 /* Only let one CPU complain about others per time interval. */
201
202 spin_lock_irqsave(&rcp->lock, flags);
203 delta = jiffies - rcp->jiffies_stall;
204 if (delta < 2 || rcp->cur != rcp->completed) {
205 spin_unlock_irqrestore(&rcp->lock, flags);
206 return;
207 }
208 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
209 spin_unlock_irqrestore(&rcp->lock, flags);
210
211 /* OK, time to rat on our buddy... */
212
213 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
214 for_each_possible_cpu(cpu) {
215 if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
216 printk(" %d", cpu);
217 }
218 printk(" (detected by %d, t=%ld jiffies)\n",
219 smp_processor_id(), (long)(jiffies - rcp->gp_start));
220}
221
222static void print_cpu_stall(struct rcu_ctrlblk *rcp)
223{
224 unsigned long flags;
225
226 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
227 smp_processor_id(), jiffies,
228 jiffies - rcp->gp_start);
229 dump_stack();
230 spin_lock_irqsave(&rcp->lock, flags);
231 if ((long)(jiffies - rcp->jiffies_stall) >= 0)
232 rcp->jiffies_stall =
233 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
234 spin_unlock_irqrestore(&rcp->lock, flags);
235 set_need_resched(); /* kick ourselves to get things going. */
236}
237
238static void check_cpu_stall(struct rcu_ctrlblk *rcp)
239{
240 long delta;
241
242 delta = jiffies - rcp->jiffies_stall;
243 if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
244 delta >= 0) {
245
246 /* We haven't checked in, so go dump stack. */
247 print_cpu_stall(rcp);
248
249 } else if (rcp->cur != rcp->completed && delta >= 2) {
250
251 /* They had two seconds to dump stack, so complain. */
252 print_other_cpu_stall(rcp);
253 }
254}
255
256#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
257
258static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
259{
260}
261
262static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
263{
264}
265
266#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
267
268/**
269 * call_rcu - Queue an RCU callback for invocation after a grace period.
270 * @head: structure to be used for queueing the RCU updates.
271 * @func: actual update function to be invoked after the grace period
272 *
273 * The update function will be invoked some time after a full grace
274 * period elapses, in other words after all currently executing RCU
275 * read-side critical sections have completed. RCU read-side critical
276 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
277 * and may be nested.
278 */
279void call_rcu(struct rcu_head *head,
280 void (*func)(struct rcu_head *rcu))
281{
282 unsigned long flags;
283
284 head->func = func;
285 local_irq_save(flags);
286 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
287 local_irq_restore(flags);
288}
289EXPORT_SYMBOL_GPL(call_rcu);
290
291/**
292 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
293 * @head: structure to be used for queueing the RCU updates.
294 * @func: actual update function to be invoked after the grace period
295 *
296 * The update function will be invoked some time after a full grace
297 * period elapses, in other words after all currently executing RCU
298 * read-side critical sections have completed. call_rcu_bh() assumes
299 * that the read-side critical sections end on completion of a softirq
300 * handler. This means that read-side critical sections in process
301 * context must not be interrupted by softirqs. This interface is to be
302 * used when most of the read-side critical sections are in softirq context.
303 * RCU read-side critical sections are delimited by rcu_read_lock() and
304 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
305 * and rcu_read_unlock_bh(), if in process context. These may be nested.
306 */
307void call_rcu_bh(struct rcu_head *head,
308 void (*func)(struct rcu_head *rcu))
309{
310 unsigned long flags;
311
312 head->func = func;
313 local_irq_save(flags);
314 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
315 local_irq_restore(flags);
316}
317EXPORT_SYMBOL_GPL(call_rcu_bh);
318
319/*
320 * Return the number of RCU batches processed thus far. Useful
321 * for debug and statistics.
322 */
323long rcu_batches_completed(void)
324{
325 return rcu_ctrlblk.completed;
326}
327EXPORT_SYMBOL_GPL(rcu_batches_completed);
328
329/*
330 * Return the number of RCU batches processed thus far. Useful
331 * for debug and statistics.
332 */
333long rcu_batches_completed_bh(void)
334{
335 return rcu_bh_ctrlblk.completed;
336}
337EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
338
339/* Raises the softirq for processing rcu_callbacks. */
340static inline void raise_rcu_softirq(void)
341{
342 raise_softirq(RCU_SOFTIRQ);
343}
344
345/*
346 * Invoke the completed RCU callbacks. They are expected to be in
347 * a per-cpu list.
348 */
349static void rcu_do_batch(struct rcu_data *rdp)
350{
351 unsigned long flags;
352 struct rcu_head *next, *list;
353 int count = 0;
354
355 list = rdp->donelist;
356 while (list) {
357 next = list->next;
358 prefetch(next);
359 list->func(list);
360 list = next;
361 if (++count >= rdp->blimit)
362 break;
363 }
364 rdp->donelist = list;
365
366 local_irq_save(flags);
367 rdp->qlen -= count;
368 local_irq_restore(flags);
369 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
370 rdp->blimit = blimit;
371
372 if (!rdp->donelist)
373 rdp->donetail = &rdp->donelist;
374 else
375 raise_rcu_softirq();
376}
377
378/*
379 * Grace period handling:
380 * The grace period handling consists out of two steps:
381 * - A new grace period is started.
382 * This is done by rcu_start_batch. The start is not broadcasted to
383 * all cpus, they must pick this up by comparing rcp->cur with
384 * rdp->quiescbatch. All cpus are recorded in the
385 * rcu_ctrlblk.cpumask bitmap.
386 * - All cpus must go through a quiescent state.
387 * Since the start of the grace period is not broadcasted, at least two
388 * calls to rcu_check_quiescent_state are required:
389 * The first call just notices that a new grace period is running. The
390 * following calls check if there was a quiescent state since the beginning
391 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
392 * the bitmap is empty, then the grace period is completed.
393 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
394 * period (if necessary).
395 */
396
397/*
398 * Register a new batch of callbacks, and start it up if there is currently no
399 * active batch and the batch to be registered has not already occurred.
400 * Caller must hold rcu_ctrlblk.lock.
401 */
402static void rcu_start_batch(struct rcu_ctrlblk *rcp)
403{
404 if (rcp->cur != rcp->pending &&
405 rcp->completed == rcp->cur) {
406 rcp->cur++;
407 record_gp_stall_check_time(rcp);
408
409 /*
410 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
411 * Barrier Otherwise it can cause tickless idle CPUs to be
412 * included in rcp->cpumask, which will extend graceperiods
413 * unnecessarily.
414 */
415 smp_mb();
416 cpumask_andnot(to_cpumask(rcp->cpumask),
417 cpu_online_mask, nohz_cpu_mask);
418
419 rcp->signaled = 0;
420 }
421}
422
423/*
424 * cpu went through a quiescent state since the beginning of the grace period.
425 * Clear it from the cpu mask and complete the grace period if it was the last
426 * cpu. Start another grace period if someone has further entries pending
427 */
428static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
429{
430 cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
431 if (cpumask_empty(to_cpumask(rcp->cpumask))) {
432 /* batch completed ! */
433 rcp->completed = rcp->cur;
434 rcu_start_batch(rcp);
435 }
436}
437
438/*
439 * Check if the cpu has gone through a quiescent state (say context
440 * switch). If so and if it already hasn't done so in this RCU
441 * quiescent cycle, then indicate that it has done so.
442 */
443static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
444 struct rcu_data *rdp)
445{
446 unsigned long flags;
447
448 if (rdp->quiescbatch != rcp->cur) {
449 /* start new grace period: */
450 rdp->qs_pending = 1;
451 rdp->passed_quiesc = 0;
452 rdp->quiescbatch = rcp->cur;
453 return;
454 }
455
456 /* Grace period already completed for this cpu?
457 * qs_pending is checked instead of the actual bitmap to avoid
458 * cacheline trashing.
459 */
460 if (!rdp->qs_pending)
461 return;
462
463 /*
464 * Was there a quiescent state since the beginning of the grace
465 * period? If no, then exit and wait for the next call.
466 */
467 if (!rdp->passed_quiesc)
468 return;
469 rdp->qs_pending = 0;
470
471 spin_lock_irqsave(&rcp->lock, flags);
472 /*
473 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
474 * during cpu startup. Ignore the quiescent state.
475 */
476 if (likely(rdp->quiescbatch == rcp->cur))
477 cpu_quiet(rdp->cpu, rcp);
478
479 spin_unlock_irqrestore(&rcp->lock, flags);
480}
481
482
483#ifdef CONFIG_HOTPLUG_CPU
484
485/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
486 * locking requirements, the list it's pulling from has to belong to a cpu
487 * which is dead and hence not processing interrupts.
488 */
489static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
490 struct rcu_head **tail, long batch)
491{
492 unsigned long flags;
493
494 if (list) {
495 local_irq_save(flags);
496 this_rdp->batch = batch;
497 *this_rdp->nxttail[2] = list;
498 this_rdp->nxttail[2] = tail;
499 local_irq_restore(flags);
500 }
501}
502
503static void __rcu_offline_cpu(struct rcu_data *this_rdp,
504 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
505{
506 unsigned long flags;
507
508 /*
509 * if the cpu going offline owns the grace period
510 * we can block indefinitely waiting for it, so flush
511 * it here
512 */
513 spin_lock_irqsave(&rcp->lock, flags);
514 if (rcp->cur != rcp->completed)
515 cpu_quiet(rdp->cpu, rcp);
516 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
517 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
518 spin_unlock(&rcp->lock);
519
520 this_rdp->qlen += rdp->qlen;
521 local_irq_restore(flags);
522}
523
524static void rcu_offline_cpu(int cpu)
525{
526 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
527 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
528
529 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
530 &per_cpu(rcu_data, cpu));
531 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
532 &per_cpu(rcu_bh_data, cpu));
533 put_cpu_var(rcu_data);
534 put_cpu_var(rcu_bh_data);
535}
536
537#else
538
539static void rcu_offline_cpu(int cpu)
540{
541}
542
543#endif
544
545/*
546 * This does the RCU processing work from softirq context.
547 */
548static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
549 struct rcu_data *rdp)
550{
551 unsigned long flags;
552 long completed_snap;
553
554 if (rdp->nxtlist) {
555 local_irq_save(flags);
556 completed_snap = ACCESS_ONCE(rcp->completed);
557
558 /*
559 * move the other grace-period-completed entries to
560 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
561 */
562 if (!rcu_batch_before(completed_snap, rdp->batch))
563 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
564 else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
565 rdp->nxttail[0] = rdp->nxttail[1];
566
567 /*
568 * the grace period for entries in
569 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
570 * move these entries to donelist
571 */
572 if (rdp->nxttail[0] != &rdp->nxtlist) {
573 *rdp->donetail = rdp->nxtlist;
574 rdp->donetail = rdp->nxttail[0];
575 rdp->nxtlist = *rdp->nxttail[0];
576 *rdp->donetail = NULL;
577
578 if (rdp->nxttail[1] == rdp->nxttail[0])
579 rdp->nxttail[1] = &rdp->nxtlist;
580 if (rdp->nxttail[2] == rdp->nxttail[0])
581 rdp->nxttail[2] = &rdp->nxtlist;
582 rdp->nxttail[0] = &rdp->nxtlist;
583 }
584
585 local_irq_restore(flags);
586
587 if (rcu_batch_after(rdp->batch, rcp->pending)) {
588 unsigned long flags2;
589
590 /* and start it/schedule start if it's a new batch */
591 spin_lock_irqsave(&rcp->lock, flags2);
592 if (rcu_batch_after(rdp->batch, rcp->pending)) {
593 rcp->pending = rdp->batch;
594 rcu_start_batch(rcp);
595 }
596 spin_unlock_irqrestore(&rcp->lock, flags2);
597 }
598 }
599
600 rcu_check_quiescent_state(rcp, rdp);
601 if (rdp->donelist)
602 rcu_do_batch(rdp);
603}
604
605static void rcu_process_callbacks(struct softirq_action *unused)
606{
607 /*
608 * Memory references from any prior RCU read-side critical sections
609 * executed by the interrupted code must be see before any RCU
610 * grace-period manupulations below.
611 */
612
613 smp_mb(); /* See above block comment. */
614
615 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
616 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
617
618 /*
619 * Memory references from any later RCU read-side critical sections
620 * executed by the interrupted code must be see after any RCU
621 * grace-period manupulations above.
622 */
623
624 smp_mb(); /* See above block comment. */
625}
626
627static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
628{
629 /* Check for CPU stalls, if enabled. */
630 check_cpu_stall(rcp);
631
632 if (rdp->nxtlist) {
633 long completed_snap = ACCESS_ONCE(rcp->completed);
634
635 /*
636 * This cpu has pending rcu entries and the grace period
637 * for them has completed.
638 */
639 if (!rcu_batch_before(completed_snap, rdp->batch))
640 return 1;
641 if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
642 rdp->nxttail[0] != rdp->nxttail[1])
643 return 1;
644 if (rdp->nxttail[0] != &rdp->nxtlist)
645 return 1;
646
647 /*
648 * This cpu has pending rcu entries and the new batch
649 * for then hasn't been started nor scheduled start
650 */
651 if (rcu_batch_after(rdp->batch, rcp->pending))
652 return 1;
653 }
654
655 /* This cpu has finished callbacks to invoke */
656 if (rdp->donelist)
657 return 1;
658
659 /* The rcu core waits for a quiescent state from the cpu */
660 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
661 return 1;
662
663 /* nothing to do */
664 return 0;
665}
666
667/*
668 * Check to see if there is any immediate RCU-related work to be done
669 * by the current CPU, returning 1 if so. This function is part of the
670 * RCU implementation; it is -not- an exported member of the RCU API.
671 */
672int rcu_pending(int cpu)
673{
674 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
675 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
676}
677
678/*
679 * Check to see if any future RCU-related work will need to be done
680 * by the current CPU, even if none need be done immediately, returning
681 * 1 if so. This function is part of the RCU implementation; it is -not-
682 * an exported member of the RCU API.
683 */
684int rcu_needs_cpu(int cpu)
685{
686 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
687 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
688
689 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
690}
691
692/*
693 * Top-level function driving RCU grace-period detection, normally
694 * invoked from the scheduler-clock interrupt. This function simply
695 * increments counters that are read only from softirq by this same
696 * CPU, so there are no memory barriers required.
697 */
698void rcu_check_callbacks(int cpu, int user)
699{
700 if (user ||
701 (idle_cpu(cpu) && rcu_scheduler_active &&
702 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
703
704 /*
705 * Get here if this CPU took its interrupt from user
706 * mode or from the idle loop, and if this is not a
707 * nested interrupt. In this case, the CPU is in
708 * a quiescent state, so count it.
709 *
710 * Also do a memory barrier. This is needed to handle
711 * the case where writes from a preempt-disable section
712 * of code get reordered into schedule() by this CPU's
713 * write buffer. The memory barrier makes sure that
714 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
715 * by other CPUs to happen after any such write.
716 */
717
718 smp_mb(); /* See above block comment. */
719 rcu_qsctr_inc(cpu);
720 rcu_bh_qsctr_inc(cpu);
721
722 } else if (!in_softirq()) {
723
724 /*
725 * Get here if this CPU did not take its interrupt from
726 * softirq, in other words, if it is not interrupting
727 * a rcu_bh read-side critical section. This is an _bh
728 * critical section, so count it. The memory barrier
729 * is needed for the same reason as is the above one.
730 */
731
732 smp_mb(); /* See above block comment. */
733 rcu_bh_qsctr_inc(cpu);
734 }
735 raise_rcu_softirq();
736}
737
738static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
739 struct rcu_data *rdp)
740{
741 unsigned long flags;
742
743 spin_lock_irqsave(&rcp->lock, flags);
744 memset(rdp, 0, sizeof(*rdp));
745 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
746 rdp->donetail = &rdp->donelist;
747 rdp->quiescbatch = rcp->completed;
748 rdp->qs_pending = 0;
749 rdp->cpu = cpu;
750 rdp->blimit = blimit;
751 spin_unlock_irqrestore(&rcp->lock, flags);
752}
753
754static void __cpuinit rcu_online_cpu(int cpu)
755{
756 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
757 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
758
759 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
760 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
761 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
762}
763
764static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
765 unsigned long action, void *hcpu)
766{
767 long cpu = (long)hcpu;
768
769 switch (action) {
770 case CPU_UP_PREPARE:
771 case CPU_UP_PREPARE_FROZEN:
772 rcu_online_cpu(cpu);
773 break;
774 case CPU_DEAD:
775 case CPU_DEAD_FROZEN:
776 rcu_offline_cpu(cpu);
777 break;
778 default:
779 break;
780 }
781 return NOTIFY_OK;
782}
783
784static struct notifier_block __cpuinitdata rcu_nb = {
785 .notifier_call = rcu_cpu_notify,
786};
787
788/*
789 * Initializes rcu mechanism. Assumed to be called early.
790 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
791 * Note that rcu_qsctr and friends are implicitly
792 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
793 */
794void __init __rcu_init(void)
795{
796#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
797 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
798#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
799 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
800 (void *)(long)smp_processor_id());
801 /* Register notifier for non-boot CPUs */
802 register_cpu_notifier(&rcu_nb);
803}
804
805module_param(blimit, int, 0);
806module_param(qhimark, int, 0);
807module_param(qlowmark, int, 0);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a967c9feb90a..bd5d5c8e5140 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -98,6 +98,30 @@ void synchronize_rcu(void)
98} 98}
99EXPORT_SYMBOL_GPL(synchronize_rcu); 99EXPORT_SYMBOL_GPL(synchronize_rcu);
100 100
101/**
102 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
103 *
104 * Control will return to the caller some time after a full rcu_bh grace
105 * period has elapsed, in other words after all currently executing rcu_bh
106 * read-side critical sections have completed. RCU read-side critical
107 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
108 * and may be nested.
109 */
110void synchronize_rcu_bh(void)
111{
112 struct rcu_synchronize rcu;
113
114 if (rcu_blocking_is_gp())
115 return;
116
117 init_completion(&rcu.completion);
118 /* Will wake me after RCU finished. */
119 call_rcu_bh(&rcu.head, wakeme_after_rcu);
120 /* Wait for it. */
121 wait_for_completion(&rcu.completion);
122}
123EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
124
101static void rcu_barrier_callback(struct rcu_head *notused) 125static void rcu_barrier_callback(struct rcu_head *notused)
102{ 126{
103 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 127 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -129,6 +153,7 @@ static void rcu_barrier_func(void *type)
129static inline void wait_migrated_callbacks(void) 153static inline void wait_migrated_callbacks(void)
130{ 154{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); 155 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
156 smp_mb(); /* In case we didn't sleep. */
132} 157}
133 158
134/* 159/*
@@ -192,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused)
192 wake_up(&rcu_migrate_wq); 217 wake_up(&rcu_migrate_wq);
193} 218}
194 219
220extern int rcu_cpu_notify(struct notifier_block *self,
221 unsigned long action, void *hcpu);
222
195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 223static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
196 unsigned long action, void *hcpu) 224 unsigned long action, void *hcpu)
197{ 225{
226 rcu_cpu_notify(self, action, hcpu);
198 if (action == CPU_DYING) { 227 if (action == CPU_DYING) {
199 /* 228 /*
200 * preempt_disable() in on_each_cpu() prevents stop_machine(), 229 * preempt_disable() in on_each_cpu() prevents stop_machine(),
@@ -209,7 +238,8 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
209 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); 238 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
210 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); 239 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
211 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); 240 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
212 } else if (action == CPU_POST_DEAD) { 241 } else if (action == CPU_DOWN_PREPARE) {
242 /* Don't need to wait until next removal operation. */
213 /* rcu_migrate_head is protected by cpu_add_remove_lock */ 243 /* rcu_migrate_head is protected by cpu_add_remove_lock */
214 wait_migrated_callbacks(); 244 wait_migrated_callbacks();
215 } 245 }
@@ -219,8 +249,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
219 249
220void __init rcu_init(void) 250void __init rcu_init(void)
221{ 251{
252 int i;
253
222 __rcu_init(); 254 __rcu_init();
223 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); 255 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
256
257 /*
258 * We don't need protection against CPU-hotplug here because
259 * this is called early in boot, before either interrupts
260 * or the scheduler are operational.
261 */
262 for_each_online_cpu(i)
263 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
224} 264}
225 265
226void rcu_scheduler_starting(void) 266void rcu_scheduler_starting(void)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
deleted file mode 100644
index beb0e659adcc..000000000000
--- a/kernel/rcupreempt.c
+++ /dev/null
@@ -1,1539 +0,0 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, realtime implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2006
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar
22 * for pushing me away from locks and towards counters, and
23 * to Suparna Bhattacharya for pushing me completely away
24 * from atomic instructions on the read side.
25 *
26 * - Added handling of Dynamic Ticks
27 * Copyright 2007 - Paul E. Mckenney <paulmck@us.ibm.com>
28 * - Steven Rostedt <srostedt@redhat.com>
29 *
30 * Papers: http://www.rdrop.com/users/paulmck/RCU
31 *
32 * Design Document: http://lwn.net/Articles/253651/
33 *
34 * For detailed explanation of Read-Copy Update mechanism see -
35 * Documentation/RCU/ *.txt
36 *
37 */
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/init.h>
41#include <linux/spinlock.h>
42#include <linux/smp.h>
43#include <linux/rcupdate.h>
44#include <linux/interrupt.h>
45#include <linux/sched.h>
46#include <asm/atomic.h>
47#include <linux/bitops.h>
48#include <linux/module.h>
49#include <linux/kthread.h>
50#include <linux/completion.h>
51#include <linux/moduleparam.h>
52#include <linux/percpu.h>
53#include <linux/notifier.h>
54#include <linux/cpu.h>
55#include <linux/random.h>
56#include <linux/delay.h>
57#include <linux/cpumask.h>
58#include <linux/rcupreempt_trace.h>
59#include <asm/byteorder.h>
60
61/*
62 * PREEMPT_RCU data structures.
63 */
64
65/*
66 * GP_STAGES specifies the number of times the state machine has
67 * to go through the all the rcu_try_flip_states (see below)
68 * in a single Grace Period.
69 *
70 * GP in GP_STAGES stands for Grace Period ;)
71 */
72#define GP_STAGES 2
73struct rcu_data {
74 spinlock_t lock; /* Protect rcu_data fields. */
75 long completed; /* Number of last completed batch. */
76 int waitlistcount;
77 struct rcu_head *nextlist;
78 struct rcu_head **nexttail;
79 struct rcu_head *waitlist[GP_STAGES];
80 struct rcu_head **waittail[GP_STAGES];
81 struct rcu_head *donelist; /* from waitlist & waitschedlist */
82 struct rcu_head **donetail;
83 long rcu_flipctr[2];
84 struct rcu_head *nextschedlist;
85 struct rcu_head **nextschedtail;
86 struct rcu_head *waitschedlist;
87 struct rcu_head **waitschedtail;
88 int rcu_sched_sleeping;
89#ifdef CONFIG_RCU_TRACE
90 struct rcupreempt_trace trace;
91#endif /* #ifdef CONFIG_RCU_TRACE */
92};
93
94/*
95 * States for rcu_try_flip() and friends.
96 */
97
98enum rcu_try_flip_states {
99
100 /*
101 * Stay here if nothing is happening. Flip the counter if somthing
102 * starts happening. Denoted by "I"
103 */
104 rcu_try_flip_idle_state,
105
106 /*
107 * Wait here for all CPUs to notice that the counter has flipped. This
108 * prevents the old set of counters from ever being incremented once
109 * we leave this state, which in turn is necessary because we cannot
110 * test any individual counter for zero -- we can only check the sum.
111 * Denoted by "A".
112 */
113 rcu_try_flip_waitack_state,
114
115 /*
116 * Wait here for the sum of the old per-CPU counters to reach zero.
117 * Denoted by "Z".
118 */
119 rcu_try_flip_waitzero_state,
120
121 /*
122 * Wait here for each of the other CPUs to execute a memory barrier.
123 * This is necessary to ensure that these other CPUs really have
124 * completed executing their RCU read-side critical sections, despite
125 * their CPUs wildly reordering memory. Denoted by "M".
126 */
127 rcu_try_flip_waitmb_state,
128};
129
130/*
131 * States for rcu_ctrlblk.rcu_sched_sleep.
132 */
133
134enum rcu_sched_sleep_states {
135 rcu_sched_not_sleeping, /* Not sleeping, callbacks need GP. */
136 rcu_sched_sleep_prep, /* Thinking of sleeping, rechecking. */
137 rcu_sched_sleeping, /* Sleeping, awaken if GP needed. */
138};
139
140struct rcu_ctrlblk {
141 spinlock_t fliplock; /* Protect state-machine transitions. */
142 long completed; /* Number of last completed batch. */
143 enum rcu_try_flip_states rcu_try_flip_state; /* The current state of
144 the rcu state machine */
145 spinlock_t schedlock; /* Protect rcu_sched sleep state. */
146 enum rcu_sched_sleep_states sched_sleep; /* rcu_sched state. */
147 wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
148};
149
150struct rcu_dyntick_sched {
151 int dynticks;
152 int dynticks_snap;
153 int sched_qs;
154 int sched_qs_snap;
155 int sched_dynticks_snap;
156};
157
158static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
159 .dynticks = 1,
160};
161
162void rcu_qsctr_inc(int cpu)
163{
164 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
165
166 rdssp->sched_qs++;
167}
168
169#ifdef CONFIG_NO_HZ
170
171void rcu_enter_nohz(void)
172{
173 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
174
175 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
176 __get_cpu_var(rcu_dyntick_sched).dynticks++;
177 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
178}
179
180void rcu_exit_nohz(void)
181{
182 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
183
184 __get_cpu_var(rcu_dyntick_sched).dynticks++;
185 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
186 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
187 &rs);
188}
189
190#endif /* CONFIG_NO_HZ */
191
192
193static DEFINE_PER_CPU(struct rcu_data, rcu_data);
194
195static struct rcu_ctrlblk rcu_ctrlblk = {
196 .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
197 .completed = 0,
198 .rcu_try_flip_state = rcu_try_flip_idle_state,
199 .schedlock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.schedlock),
200 .sched_sleep = rcu_sched_not_sleeping,
201 .sched_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rcu_ctrlblk.sched_wq),
202};
203
204static struct task_struct *rcu_sched_grace_period_task;
205
206#ifdef CONFIG_RCU_TRACE
207static char *rcu_try_flip_state_names[] =
208 { "idle", "waitack", "waitzero", "waitmb" };
209#endif /* #ifdef CONFIG_RCU_TRACE */
210
211static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
212 = CPU_BITS_NONE;
213
214/*
215 * Enum and per-CPU flag to determine when each CPU has seen
216 * the most recent counter flip.
217 */
218
219enum rcu_flip_flag_values {
220 rcu_flip_seen, /* Steady/initial state, last flip seen. */
221 /* Only GP detector can update. */
222 rcu_flipped /* Flip just completed, need confirmation. */
223 /* Only corresponding CPU can update. */
224};
225static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag)
226 = rcu_flip_seen;
227
228/*
229 * Enum and per-CPU flag to determine when each CPU has executed the
230 * needed memory barrier to fence in memory references from its last RCU
231 * read-side critical section in the just-completed grace period.
232 */
233
234enum rcu_mb_flag_values {
235 rcu_mb_done, /* Steady/initial state, no mb()s required. */
236 /* Only GP detector can update. */
237 rcu_mb_needed /* Flip just completed, need an mb(). */
238 /* Only corresponding CPU can update. */
239};
240static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag)
241 = rcu_mb_done;
242
243/*
244 * RCU_DATA_ME: find the current CPU's rcu_data structure.
245 * RCU_DATA_CPU: find the specified CPU's rcu_data structure.
246 */
247#define RCU_DATA_ME() (&__get_cpu_var(rcu_data))
248#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu))
249
250/*
251 * Helper macro for tracing when the appropriate rcu_data is not
252 * cached in a local variable, but where the CPU number is so cached.
253 */
254#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace));
255
256/*
257 * Helper macro for tracing when the appropriate rcu_data is not
258 * cached in a local variable.
259 */
260#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace));
261
262/*
263 * Helper macro for tracing when the appropriate rcu_data is pointed
264 * to by a local variable.
265 */
266#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace));
267
268#define RCU_SCHED_BATCH_TIME (HZ / 50)
269
270/*
271 * Return the number of RCU batches processed thus far. Useful
272 * for debug and statistics.
273 */
274long rcu_batches_completed(void)
275{
276 return rcu_ctrlblk.completed;
277}
278EXPORT_SYMBOL_GPL(rcu_batches_completed);
279
280void __rcu_read_lock(void)
281{
282 int idx;
283 struct task_struct *t = current;
284 int nesting;
285
286 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
287 if (nesting != 0) {
288
289 /* An earlier rcu_read_lock() covers us, just count it. */
290
291 t->rcu_read_lock_nesting = nesting + 1;
292
293 } else {
294 unsigned long flags;
295
296 /*
297 * We disable interrupts for the following reasons:
298 * - If we get scheduling clock interrupt here, and we
299 * end up acking the counter flip, it's like a promise
300 * that we will never increment the old counter again.
301 * Thus we will break that promise if that
302 * scheduling clock interrupt happens between the time
303 * we pick the .completed field and the time that we
304 * increment our counter.
305 *
306 * - We don't want to be preempted out here.
307 *
308 * NMIs can still occur, of course, and might themselves
309 * contain rcu_read_lock().
310 */
311
312 local_irq_save(flags);
313
314 /*
315 * Outermost nesting of rcu_read_lock(), so increment
316 * the current counter for the current CPU. Use volatile
317 * casts to prevent the compiler from reordering.
318 */
319
320 idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1;
321 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++;
322
323 /*
324 * Now that the per-CPU counter has been incremented, we
325 * are protected from races with rcu_read_lock() invoked
326 * from NMI handlers on this CPU. We can therefore safely
327 * increment the nesting counter, relieving further NMIs
328 * of the need to increment the per-CPU counter.
329 */
330
331 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1;
332
333 /*
334 * Now that we have preventing any NMIs from storing
335 * to the ->rcu_flipctr_idx, we can safely use it to
336 * remember which counter to decrement in the matching
337 * rcu_read_unlock().
338 */
339
340 ACCESS_ONCE(t->rcu_flipctr_idx) = idx;
341 local_irq_restore(flags);
342 }
343}
344EXPORT_SYMBOL_GPL(__rcu_read_lock);
345
346void __rcu_read_unlock(void)
347{
348 int idx;
349 struct task_struct *t = current;
350 int nesting;
351
352 nesting = ACCESS_ONCE(t->rcu_read_lock_nesting);
353 if (nesting > 1) {
354
355 /*
356 * We are still protected by the enclosing rcu_read_lock(),
357 * so simply decrement the counter.
358 */
359
360 t->rcu_read_lock_nesting = nesting - 1;
361
362 } else {
363 unsigned long flags;
364
365 /*
366 * Disable local interrupts to prevent the grace-period
367 * detection state machine from seeing us half-done.
368 * NMIs can still occur, of course, and might themselves
369 * contain rcu_read_lock() and rcu_read_unlock().
370 */
371
372 local_irq_save(flags);
373
374 /*
375 * Outermost nesting of rcu_read_unlock(), so we must
376 * decrement the current counter for the current CPU.
377 * This must be done carefully, because NMIs can
378 * occur at any point in this code, and any rcu_read_lock()
379 * and rcu_read_unlock() pairs in the NMI handlers
380 * must interact non-destructively with this code.
381 * Lots of volatile casts, and -very- careful ordering.
382 *
383 * Changes to this code, including this one, must be
384 * inspected, validated, and tested extremely carefully!!!
385 */
386
387 /*
388 * First, pick up the index.
389 */
390
391 idx = ACCESS_ONCE(t->rcu_flipctr_idx);
392
393 /*
394 * Now that we have fetched the counter index, it is
395 * safe to decrement the per-task RCU nesting counter.
396 * After this, any interrupts or NMIs will increment and
397 * decrement the per-CPU counters.
398 */
399 ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1;
400
401 /*
402 * It is now safe to decrement this task's nesting count.
403 * NMIs that occur after this statement will route their
404 * rcu_read_lock() calls through this "else" clause, and
405 * will thus start incrementing the per-CPU counter on
406 * their own. They will also clobber ->rcu_flipctr_idx,
407 * but that is OK, since we have already fetched it.
408 */
409
410 ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--;
411 local_irq_restore(flags);
412 }
413}
414EXPORT_SYMBOL_GPL(__rcu_read_unlock);
415
416/*
417 * If a global counter flip has occurred since the last time that we
418 * advanced callbacks, advance them. Hardware interrupts must be
419 * disabled when calling this function.
420 */
421static void __rcu_advance_callbacks(struct rcu_data *rdp)
422{
423 int cpu;
424 int i;
425 int wlc = 0;
426
427 if (rdp->completed != rcu_ctrlblk.completed) {
428 if (rdp->waitlist[GP_STAGES - 1] != NULL) {
429 *rdp->donetail = rdp->waitlist[GP_STAGES - 1];
430 rdp->donetail = rdp->waittail[GP_STAGES - 1];
431 RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp);
432 }
433 for (i = GP_STAGES - 2; i >= 0; i--) {
434 if (rdp->waitlist[i] != NULL) {
435 rdp->waitlist[i + 1] = rdp->waitlist[i];
436 rdp->waittail[i + 1] = rdp->waittail[i];
437 wlc++;
438 } else {
439 rdp->waitlist[i + 1] = NULL;
440 rdp->waittail[i + 1] =
441 &rdp->waitlist[i + 1];
442 }
443 }
444 if (rdp->nextlist != NULL) {
445 rdp->waitlist[0] = rdp->nextlist;
446 rdp->waittail[0] = rdp->nexttail;
447 wlc++;
448 rdp->nextlist = NULL;
449 rdp->nexttail = &rdp->nextlist;
450 RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp);
451 } else {
452 rdp->waitlist[0] = NULL;
453 rdp->waittail[0] = &rdp->waitlist[0];
454 }
455 rdp->waitlistcount = wlc;
456 rdp->completed = rcu_ctrlblk.completed;
457 }
458
459 /*
460 * Check to see if this CPU needs to report that it has seen
461 * the most recent counter flip, thereby declaring that all
462 * subsequent rcu_read_lock() invocations will respect this flip.
463 */
464
465 cpu = raw_smp_processor_id();
466 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
467 smp_mb(); /* Subsequent counter accesses must see new value */
468 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
469 smp_mb(); /* Subsequent RCU read-side critical sections */
470 /* seen -after- acknowledgement. */
471 }
472}
473
474#ifdef CONFIG_NO_HZ
475static DEFINE_PER_CPU(int, rcu_update_flag);
476
477/**
478 * rcu_irq_enter - Called from Hard irq handlers and NMI/SMI.
479 *
480 * If the CPU was idle with dynamic ticks active, this updates the
481 * rcu_dyntick_sched.dynticks to let the RCU handling know that the
482 * CPU is active.
483 */
484void rcu_irq_enter(void)
485{
486 int cpu = smp_processor_id();
487 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
488
489 if (per_cpu(rcu_update_flag, cpu))
490 per_cpu(rcu_update_flag, cpu)++;
491
492 /*
493 * Only update if we are coming from a stopped ticks mode
494 * (rcu_dyntick_sched.dynticks is even).
495 */
496 if (!in_interrupt() &&
497 (rdssp->dynticks & 0x1) == 0) {
498 /*
499 * The following might seem like we could have a race
500 * with NMI/SMIs. But this really isn't a problem.
501 * Here we do a read/modify/write, and the race happens
502 * when an NMI/SMI comes in after the read and before
503 * the write. But NMI/SMIs will increment this counter
504 * twice before returning, so the zero bit will not
505 * be corrupted by the NMI/SMI which is the most important
506 * part.
507 *
508 * The only thing is that we would bring back the counter
509 * to a postion that it was in during the NMI/SMI.
510 * But the zero bit would be set, so the rest of the
511 * counter would again be ignored.
512 *
513 * On return from the IRQ, the counter may have the zero
514 * bit be 0 and the counter the same as the return from
515 * the NMI/SMI. If the state machine was so unlucky to
516 * see that, it still doesn't matter, since all
517 * RCU read-side critical sections on this CPU would
518 * have already completed.
519 */
520 rdssp->dynticks++;
521 /*
522 * The following memory barrier ensures that any
523 * rcu_read_lock() primitives in the irq handler
524 * are seen by other CPUs to follow the above
525 * increment to rcu_dyntick_sched.dynticks. This is
526 * required in order for other CPUs to correctly
527 * determine when it is safe to advance the RCU
528 * grace-period state machine.
529 */
530 smp_mb(); /* see above block comment. */
531 /*
532 * Since we can't determine the dynamic tick mode from
533 * the rcu_dyntick_sched.dynticks after this routine,
534 * we use a second flag to acknowledge that we came
535 * from an idle state with ticks stopped.
536 */
537 per_cpu(rcu_update_flag, cpu)++;
538 /*
539 * If we take an NMI/SMI now, they will also increment
540 * the rcu_update_flag, and will not update the
541 * rcu_dyntick_sched.dynticks on exit. That is for
542 * this IRQ to do.
543 */
544 }
545}
546
547/**
548 * rcu_irq_exit - Called from exiting Hard irq context.
549 *
550 * If the CPU was idle with dynamic ticks active, update the
551 * rcu_dyntick_sched.dynticks to put let the RCU handling be
552 * aware that the CPU is going back to idle with no ticks.
553 */
554void rcu_irq_exit(void)
555{
556 int cpu = smp_processor_id();
557 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
558
559 /*
560 * rcu_update_flag is set if we interrupted the CPU
561 * when it was idle with ticks stopped.
562 * Once this occurs, we keep track of interrupt nesting
563 * because a NMI/SMI could also come in, and we still
564 * only want the IRQ that started the increment of the
565 * rcu_dyntick_sched.dynticks to be the one that modifies
566 * it on exit.
567 */
568 if (per_cpu(rcu_update_flag, cpu)) {
569 if (--per_cpu(rcu_update_flag, cpu))
570 return;
571
572 /* This must match the interrupt nesting */
573 WARN_ON(in_interrupt());
574
575 /*
576 * If an NMI/SMI happens now we are still
577 * protected by the rcu_dyntick_sched.dynticks being odd.
578 */
579
580 /*
581 * The following memory barrier ensures that any
582 * rcu_read_unlock() primitives in the irq handler
583 * are seen by other CPUs to preceed the following
584 * increment to rcu_dyntick_sched.dynticks. This
585 * is required in order for other CPUs to determine
586 * when it is safe to advance the RCU grace-period
587 * state machine.
588 */
589 smp_mb(); /* see above block comment. */
590 rdssp->dynticks++;
591 WARN_ON(rdssp->dynticks & 0x1);
592 }
593}
594
595void rcu_nmi_enter(void)
596{
597 rcu_irq_enter();
598}
599
600void rcu_nmi_exit(void)
601{
602 rcu_irq_exit();
603}
604
605static void dyntick_save_progress_counter(int cpu)
606{
607 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
608
609 rdssp->dynticks_snap = rdssp->dynticks;
610}
611
612static inline int
613rcu_try_flip_waitack_needed(int cpu)
614{
615 long curr;
616 long snap;
617 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
618
619 curr = rdssp->dynticks;
620 snap = rdssp->dynticks_snap;
621 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
622
623 /*
624 * If the CPU remained in dynticks mode for the entire time
625 * and didn't take any interrupts, NMIs, SMIs, or whatever,
626 * then it cannot be in the middle of an rcu_read_lock(), so
627 * the next rcu_read_lock() it executes must use the new value
628 * of the counter. So we can safely pretend that this CPU
629 * already acknowledged the counter.
630 */
631
632 if ((curr == snap) && ((curr & 0x1) == 0))
633 return 0;
634
635 /*
636 * If the CPU passed through or entered a dynticks idle phase with
637 * no active irq handlers, then, as above, we can safely pretend
638 * that this CPU already acknowledged the counter.
639 */
640
641 if ((curr - snap) > 2 || (curr & 0x1) == 0)
642 return 0;
643
644 /* We need this CPU to explicitly acknowledge the counter flip. */
645
646 return 1;
647}
648
649static inline int
650rcu_try_flip_waitmb_needed(int cpu)
651{
652 long curr;
653 long snap;
654 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
655
656 curr = rdssp->dynticks;
657 snap = rdssp->dynticks_snap;
658 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
659
660 /*
661 * If the CPU remained in dynticks mode for the entire time
662 * and didn't take any interrupts, NMIs, SMIs, or whatever,
663 * then it cannot have executed an RCU read-side critical section
664 * during that time, so there is no need for it to execute a
665 * memory barrier.
666 */
667
668 if ((curr == snap) && ((curr & 0x1) == 0))
669 return 0;
670
671 /*
672 * If the CPU either entered or exited an outermost interrupt,
673 * SMI, NMI, or whatever handler, then we know that it executed
674 * a memory barrier when doing so. So we don't need another one.
675 */
676 if (curr != snap)
677 return 0;
678
679 /* We need the CPU to execute a memory barrier. */
680
681 return 1;
682}
683
684static void dyntick_save_progress_counter_sched(int cpu)
685{
686 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
687
688 rdssp->sched_dynticks_snap = rdssp->dynticks;
689}
690
691static int rcu_qsctr_inc_needed_dyntick(int cpu)
692{
693 long curr;
694 long snap;
695 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
696
697 curr = rdssp->dynticks;
698 snap = rdssp->sched_dynticks_snap;
699 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
700
701 /*
702 * If the CPU remained in dynticks mode for the entire time
703 * and didn't take any interrupts, NMIs, SMIs, or whatever,
704 * then it cannot be in the middle of an rcu_read_lock(), so
705 * the next rcu_read_lock() it executes must use the new value
706 * of the counter. Therefore, this CPU has been in a quiescent
707 * state the entire time, and we don't need to wait for it.
708 */
709
710 if ((curr == snap) && ((curr & 0x1) == 0))
711 return 0;
712
713 /*
714 * If the CPU passed through or entered a dynticks idle phase with
715 * no active irq handlers, then, as above, this CPU has already
716 * passed through a quiescent state.
717 */
718
719 if ((curr - snap) > 2 || (snap & 0x1) == 0)
720 return 0;
721
722 /* We need this CPU to go through a quiescent state. */
723
724 return 1;
725}
726
727#else /* !CONFIG_NO_HZ */
728
729# define dyntick_save_progress_counter(cpu) do { } while (0)
730# define rcu_try_flip_waitack_needed(cpu) (1)
731# define rcu_try_flip_waitmb_needed(cpu) (1)
732
733# define dyntick_save_progress_counter_sched(cpu) do { } while (0)
734# define rcu_qsctr_inc_needed_dyntick(cpu) (1)
735
736#endif /* CONFIG_NO_HZ */
737
738static void save_qsctr_sched(int cpu)
739{
740 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
741
742 rdssp->sched_qs_snap = rdssp->sched_qs;
743}
744
745static inline int rcu_qsctr_inc_needed(int cpu)
746{
747 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
748
749 /*
750 * If there has been a quiescent state, no more need to wait
751 * on this CPU.
752 */
753
754 if (rdssp->sched_qs != rdssp->sched_qs_snap) {
755 smp_mb(); /* force ordering with cpu entering schedule(). */
756 return 0;
757 }
758
759 /* We need this CPU to go through a quiescent state. */
760
761 return 1;
762}
763
764/*
765 * Get here when RCU is idle. Decide whether we need to
766 * move out of idle state, and return non-zero if so.
767 * "Straightforward" approach for the moment, might later
768 * use callback-list lengths, grace-period duration, or
769 * some such to determine when to exit idle state.
770 * Might also need a pre-idle test that does not acquire
771 * the lock, but let's get the simple case working first...
772 */
773
774static int
775rcu_try_flip_idle(void)
776{
777 int cpu;
778
779 RCU_TRACE_ME(rcupreempt_trace_try_flip_i1);
780 if (!rcu_pending(smp_processor_id())) {
781 RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1);
782 return 0;
783 }
784
785 /*
786 * Do the flip.
787 */
788
789 RCU_TRACE_ME(rcupreempt_trace_try_flip_g1);
790 rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */
791
792 /*
793 * Need a memory barrier so that other CPUs see the new
794 * counter value before they see the subsequent change of all
795 * the rcu_flip_flag instances to rcu_flipped.
796 */
797
798 smp_mb(); /* see above block comment. */
799
800 /* Now ask each CPU for acknowledgement of the flip. */
801
802 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
803 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
804 dyntick_save_progress_counter(cpu);
805 }
806
807 return 1;
808}
809
810/*
811 * Wait for CPUs to acknowledge the flip.
812 */
813
814static int
815rcu_try_flip_waitack(void)
816{
817 int cpu;
818
819 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
820 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
821 if (rcu_try_flip_waitack_needed(cpu) &&
822 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
823 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
824 return 0;
825 }
826
827 /*
828 * Make sure our checks above don't bleed into subsequent
829 * waiting for the sum of the counters to reach zero.
830 */
831
832 smp_mb(); /* see above block comment. */
833 RCU_TRACE_ME(rcupreempt_trace_try_flip_a2);
834 return 1;
835}
836
837/*
838 * Wait for collective ``last'' counter to reach zero,
839 * then tell all CPUs to do an end-of-grace-period memory barrier.
840 */
841
842static int
843rcu_try_flip_waitzero(void)
844{
845 int cpu;
846 int lastidx = !(rcu_ctrlblk.completed & 0x1);
847 int sum = 0;
848
849 /* Check to see if the sum of the "last" counters is zero. */
850
851 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
852 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
853 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
854 if (sum != 0) {
855 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
856 return 0;
857 }
858
859 /*
860 * This ensures that the other CPUs see the call for
861 * memory barriers -after- the sum to zero has been
862 * detected here
863 */
864 smp_mb(); /* ^^^^^^^^^^^^ */
865
866 /* Call for a memory barrier from each CPU. */
867 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
868 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
869 dyntick_save_progress_counter(cpu);
870 }
871
872 RCU_TRACE_ME(rcupreempt_trace_try_flip_z2);
873 return 1;
874}
875
876/*
877 * Wait for all CPUs to do their end-of-grace-period memory barrier.
878 * Return 0 once all CPUs have done so.
879 */
880
881static int
882rcu_try_flip_waitmb(void)
883{
884 int cpu;
885
886 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
887 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
888 if (rcu_try_flip_waitmb_needed(cpu) &&
889 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
890 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
891 return 0;
892 }
893
894 smp_mb(); /* Ensure that the above checks precede any following flip. */
895 RCU_TRACE_ME(rcupreempt_trace_try_flip_m2);
896 return 1;
897}
898
899/*
900 * Attempt a single flip of the counters. Remember, a single flip does
901 * -not- constitute a grace period. Instead, the interval between
902 * at least GP_STAGES consecutive flips is a grace period.
903 *
904 * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation
905 * on a large SMP, they might want to use a hierarchical organization of
906 * the per-CPU-counter pairs.
907 */
908static void rcu_try_flip(void)
909{
910 unsigned long flags;
911
912 RCU_TRACE_ME(rcupreempt_trace_try_flip_1);
913 if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) {
914 RCU_TRACE_ME(rcupreempt_trace_try_flip_e1);
915 return;
916 }
917
918 /*
919 * Take the next transition(s) through the RCU grace-period
920 * flip-counter state machine.
921 */
922
923 switch (rcu_ctrlblk.rcu_try_flip_state) {
924 case rcu_try_flip_idle_state:
925 if (rcu_try_flip_idle())
926 rcu_ctrlblk.rcu_try_flip_state =
927 rcu_try_flip_waitack_state;
928 break;
929 case rcu_try_flip_waitack_state:
930 if (rcu_try_flip_waitack())
931 rcu_ctrlblk.rcu_try_flip_state =
932 rcu_try_flip_waitzero_state;
933 break;
934 case rcu_try_flip_waitzero_state:
935 if (rcu_try_flip_waitzero())
936 rcu_ctrlblk.rcu_try_flip_state =
937 rcu_try_flip_waitmb_state;
938 break;
939 case rcu_try_flip_waitmb_state:
940 if (rcu_try_flip_waitmb())
941 rcu_ctrlblk.rcu_try_flip_state =
942 rcu_try_flip_idle_state;
943 }
944 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
945}
946
947/*
948 * Check to see if this CPU needs to do a memory barrier in order to
949 * ensure that any prior RCU read-side critical sections have committed
950 * their counter manipulations and critical-section memory references
951 * before declaring the grace period to be completed.
952 */
953static void rcu_check_mb(int cpu)
954{
955 if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) {
956 smp_mb(); /* Ensure RCU read-side accesses are visible. */
957 per_cpu(rcu_mb_flag, cpu) = rcu_mb_done;
958 }
959}
960
961void rcu_check_callbacks(int cpu, int user)
962{
963 unsigned long flags;
964 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
965
966 /*
967 * If this CPU took its interrupt from user mode or from the
968 * idle loop, and this is not a nested interrupt, then
969 * this CPU has to have exited all prior preept-disable
970 * sections of code. So increment the counter to note this.
971 *
972 * The memory barrier is needed to handle the case where
973 * writes from a preempt-disable section of code get reordered
974 * into schedule() by this CPU's write buffer. So the memory
975 * barrier makes sure that the rcu_qsctr_inc() is seen by other
976 * CPUs to happen after any such write.
977 */
978
979 if (user ||
980 (idle_cpu(cpu) && !in_softirq() &&
981 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
982 smp_mb(); /* Guard against aggressive schedule(). */
983 rcu_qsctr_inc(cpu);
984 }
985
986 rcu_check_mb(cpu);
987 if (rcu_ctrlblk.completed == rdp->completed)
988 rcu_try_flip();
989 spin_lock_irqsave(&rdp->lock, flags);
990 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
991 __rcu_advance_callbacks(rdp);
992 if (rdp->donelist == NULL) {
993 spin_unlock_irqrestore(&rdp->lock, flags);
994 } else {
995 spin_unlock_irqrestore(&rdp->lock, flags);
996 raise_softirq(RCU_SOFTIRQ);
997 }
998}
999
1000/*
1001 * Needed by dynticks, to make sure all RCU processing has finished
1002 * when we go idle:
1003 */
1004void rcu_advance_callbacks(int cpu, int user)
1005{
1006 unsigned long flags;
1007 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1008
1009 if (rcu_ctrlblk.completed == rdp->completed) {
1010 rcu_try_flip();
1011 if (rcu_ctrlblk.completed == rdp->completed)
1012 return;
1013 }
1014 spin_lock_irqsave(&rdp->lock, flags);
1015 RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp);
1016 __rcu_advance_callbacks(rdp);
1017 spin_unlock_irqrestore(&rdp->lock, flags);
1018}
1019
1020#ifdef CONFIG_HOTPLUG_CPU
1021#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \
1022 *dsttail = srclist; \
1023 if (srclist != NULL) { \
1024 dsttail = srctail; \
1025 srclist = NULL; \
1026 srctail = &srclist;\
1027 } \
1028 } while (0)
1029
1030void rcu_offline_cpu(int cpu)
1031{
1032 int i;
1033 struct rcu_head *list = NULL;
1034 unsigned long flags;
1035 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1036 struct rcu_head *schedlist = NULL;
1037 struct rcu_head **schedtail = &schedlist;
1038 struct rcu_head **tail = &list;
1039
1040 /*
1041 * Remove all callbacks from the newly dead CPU, retaining order.
1042 * Otherwise rcu_barrier() will fail
1043 */
1044
1045 spin_lock_irqsave(&rdp->lock, flags);
1046 rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail);
1047 for (i = GP_STAGES - 1; i >= 0; i--)
1048 rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i],
1049 list, tail);
1050 rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail);
1051 rcu_offline_cpu_enqueue(rdp->waitschedlist, rdp->waitschedtail,
1052 schedlist, schedtail);
1053 rcu_offline_cpu_enqueue(rdp->nextschedlist, rdp->nextschedtail,
1054 schedlist, schedtail);
1055 rdp->rcu_sched_sleeping = 0;
1056 spin_unlock_irqrestore(&rdp->lock, flags);
1057 rdp->waitlistcount = 0;
1058
1059 /* Disengage the newly dead CPU from the grace-period computation. */
1060
1061 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1062 rcu_check_mb(cpu);
1063 if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) {
1064 smp_mb(); /* Subsequent counter accesses must see new value */
1065 per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen;
1066 smp_mb(); /* Subsequent RCU read-side critical sections */
1067 /* seen -after- acknowledgement. */
1068 }
1069
1070 RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1071 RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1];
1072
1073 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
1074 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
1075
1076 cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1077
1078 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1079
1080 /*
1081 * Place the removed callbacks on the current CPU's queue.
1082 * Make them all start a new grace period: simple approach,
1083 * in theory could starve a given set of callbacks, but
1084 * you would need to be doing some serious CPU hotplugging
1085 * to make this happen. If this becomes a problem, adding
1086 * a synchronize_rcu() to the hotplug path would be a simple
1087 * fix.
1088 */
1089
1090 local_irq_save(flags); /* disable preempt till we know what lock. */
1091 rdp = RCU_DATA_ME();
1092 spin_lock(&rdp->lock);
1093 *rdp->nexttail = list;
1094 if (list)
1095 rdp->nexttail = tail;
1096 *rdp->nextschedtail = schedlist;
1097 if (schedlist)
1098 rdp->nextschedtail = schedtail;
1099 spin_unlock_irqrestore(&rdp->lock, flags);
1100}
1101
1102#else /* #ifdef CONFIG_HOTPLUG_CPU */
1103
1104void rcu_offline_cpu(int cpu)
1105{
1106}
1107
1108#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1109
1110void __cpuinit rcu_online_cpu(int cpu)
1111{
1112 unsigned long flags;
1113 struct rcu_data *rdp;
1114
1115 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1116 cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1117 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1118
1119 /*
1120 * The rcu_sched grace-period processing might have bypassed
1121 * this CPU, given that it was not in the rcu_cpu_online_map
1122 * when the grace-period scan started. This means that the
1123 * grace-period task might sleep. So make sure that if this
1124 * should happen, the first callback posted to this CPU will
1125 * wake up the grace-period task if need be.
1126 */
1127
1128 rdp = RCU_DATA_CPU(cpu);
1129 spin_lock_irqsave(&rdp->lock, flags);
1130 rdp->rcu_sched_sleeping = 1;
1131 spin_unlock_irqrestore(&rdp->lock, flags);
1132}
1133
1134static void rcu_process_callbacks(struct softirq_action *unused)
1135{
1136 unsigned long flags;
1137 struct rcu_head *next, *list;
1138 struct rcu_data *rdp;
1139
1140 local_irq_save(flags);
1141 rdp = RCU_DATA_ME();
1142 spin_lock(&rdp->lock);
1143 list = rdp->donelist;
1144 if (list == NULL) {
1145 spin_unlock_irqrestore(&rdp->lock, flags);
1146 return;
1147 }
1148 rdp->donelist = NULL;
1149 rdp->donetail = &rdp->donelist;
1150 RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp);
1151 spin_unlock_irqrestore(&rdp->lock, flags);
1152 while (list) {
1153 next = list->next;
1154 list->func(list);
1155 list = next;
1156 RCU_TRACE_ME(rcupreempt_trace_invoke);
1157 }
1158}
1159
1160void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1161{
1162 unsigned long flags;
1163 struct rcu_data *rdp;
1164
1165 head->func = func;
1166 head->next = NULL;
1167 local_irq_save(flags);
1168 rdp = RCU_DATA_ME();
1169 spin_lock(&rdp->lock);
1170 __rcu_advance_callbacks(rdp);
1171 *rdp->nexttail = head;
1172 rdp->nexttail = &head->next;
1173 RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp);
1174 spin_unlock_irqrestore(&rdp->lock, flags);
1175}
1176EXPORT_SYMBOL_GPL(call_rcu);
1177
1178void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1179{
1180 unsigned long flags;
1181 struct rcu_data *rdp;
1182 int wake_gp = 0;
1183
1184 head->func = func;
1185 head->next = NULL;
1186 local_irq_save(flags);
1187 rdp = RCU_DATA_ME();
1188 spin_lock(&rdp->lock);
1189 *rdp->nextschedtail = head;
1190 rdp->nextschedtail = &head->next;
1191 if (rdp->rcu_sched_sleeping) {
1192
1193 /* Grace-period processing might be sleeping... */
1194
1195 rdp->rcu_sched_sleeping = 0;
1196 wake_gp = 1;
1197 }
1198 spin_unlock_irqrestore(&rdp->lock, flags);
1199 if (wake_gp) {
1200
1201 /* Wake up grace-period processing, unless someone beat us. */
1202
1203 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1204 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleeping)
1205 wake_gp = 0;
1206 rcu_ctrlblk.sched_sleep = rcu_sched_not_sleeping;
1207 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1208 if (wake_gp)
1209 wake_up_interruptible(&rcu_ctrlblk.sched_wq);
1210 }
1211}
1212EXPORT_SYMBOL_GPL(call_rcu_sched);
1213
1214/*
1215 * Wait until all currently running preempt_disable() code segments
1216 * (including hardware-irq-disable segments) complete. Note that
1217 * in -rt this does -not- necessarily result in all currently executing
1218 * interrupt -handlers- having completed.
1219 */
1220void __synchronize_sched(void)
1221{
1222 struct rcu_synchronize rcu;
1223
1224 if (num_online_cpus() == 1)
1225 return; /* blocking is gp if only one CPU! */
1226
1227 init_completion(&rcu.completion);
1228 /* Will wake me after RCU finished. */
1229 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1230 /* Wait for it. */
1231 wait_for_completion(&rcu.completion);
1232}
1233EXPORT_SYMBOL_GPL(__synchronize_sched);
1234
1235/*
1236 * kthread function that manages call_rcu_sched grace periods.
1237 */
1238static int rcu_sched_grace_period(void *arg)
1239{
1240 int couldsleep; /* might sleep after current pass. */
1241 int couldsleepnext = 0; /* might sleep after next pass. */
1242 int cpu;
1243 unsigned long flags;
1244 struct rcu_data *rdp;
1245 int ret;
1246
1247 /*
1248 * Each pass through the following loop handles one
1249 * rcu_sched grace period cycle.
1250 */
1251 do {
1252 /* Save each CPU's current state. */
1253
1254 for_each_online_cpu(cpu) {
1255 dyntick_save_progress_counter_sched(cpu);
1256 save_qsctr_sched(cpu);
1257 }
1258
1259 /*
1260 * Sleep for about an RCU grace-period's worth to
1261 * allow better batching and to consume less CPU.
1262 */
1263 schedule_timeout_interruptible(RCU_SCHED_BATCH_TIME);
1264
1265 /*
1266 * If there was nothing to do last time, prepare to
1267 * sleep at the end of the current grace period cycle.
1268 */
1269 couldsleep = couldsleepnext;
1270 couldsleepnext = 1;
1271 if (couldsleep) {
1272 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1273 rcu_ctrlblk.sched_sleep = rcu_sched_sleep_prep;
1274 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1275 }
1276
1277 /*
1278 * Wait on each CPU in turn to have either visited
1279 * a quiescent state or been in dynticks-idle mode.
1280 */
1281 for_each_online_cpu(cpu) {
1282 while (rcu_qsctr_inc_needed(cpu) &&
1283 rcu_qsctr_inc_needed_dyntick(cpu)) {
1284 /* resched_cpu(cpu); @@@ */
1285 schedule_timeout_interruptible(1);
1286 }
1287 }
1288
1289 /* Advance callbacks for each CPU. */
1290
1291 for_each_online_cpu(cpu) {
1292
1293 rdp = RCU_DATA_CPU(cpu);
1294 spin_lock_irqsave(&rdp->lock, flags);
1295
1296 /*
1297 * We are running on this CPU irq-disabled, so no
1298 * CPU can go offline until we re-enable irqs.
1299 * The current CPU might have already gone
1300 * offline (between the for_each_offline_cpu and
1301 * the spin_lock_irqsave), but in that case all its
1302 * callback lists will be empty, so no harm done.
1303 *
1304 * Advance the callbacks! We share normal RCU's
1305 * donelist, since callbacks are invoked the
1306 * same way in either case.
1307 */
1308 if (rdp->waitschedlist != NULL) {
1309 *rdp->donetail = rdp->waitschedlist;
1310 rdp->donetail = rdp->waitschedtail;
1311
1312 /*
1313 * Next rcu_check_callbacks() will
1314 * do the required raise_softirq().
1315 */
1316 }
1317 if (rdp->nextschedlist != NULL) {
1318 rdp->waitschedlist = rdp->nextschedlist;
1319 rdp->waitschedtail = rdp->nextschedtail;
1320 couldsleep = 0;
1321 couldsleepnext = 0;
1322 } else {
1323 rdp->waitschedlist = NULL;
1324 rdp->waitschedtail = &rdp->waitschedlist;
1325 }
1326 rdp->nextschedlist = NULL;
1327 rdp->nextschedtail = &rdp->nextschedlist;
1328
1329 /* Mark sleep intention. */
1330
1331 rdp->rcu_sched_sleeping = couldsleep;
1332
1333 spin_unlock_irqrestore(&rdp->lock, flags);
1334 }
1335
1336 /* If we saw callbacks on the last scan, go deal with them. */
1337
1338 if (!couldsleep)
1339 continue;
1340
1341 /* Attempt to block... */
1342
1343 spin_lock_irqsave(&rcu_ctrlblk.schedlock, flags);
1344 if (rcu_ctrlblk.sched_sleep != rcu_sched_sleep_prep) {
1345
1346 /*
1347 * Someone posted a callback after we scanned.
1348 * Go take care of it.
1349 */
1350 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1351 couldsleepnext = 0;
1352 continue;
1353 }
1354
1355 /* Block until the next person posts a callback. */
1356
1357 rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
1358 spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
1359 ret = 0; /* unused */
1360 __wait_event_interruptible(rcu_ctrlblk.sched_wq,
1361 rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
1362 ret);
1363
1364 couldsleepnext = 0;
1365
1366 } while (!kthread_should_stop());
1367
1368 return (0);
1369}
1370
1371/*
1372 * Check to see if any future RCU-related work will need to be done
1373 * by the current CPU, even if none need be done immediately, returning
1374 * 1 if so. Assumes that notifiers would take care of handling any
1375 * outstanding requests from the RCU core.
1376 *
1377 * This function is part of the RCU implementation; it is -not-
1378 * an exported member of the RCU API.
1379 */
1380int rcu_needs_cpu(int cpu)
1381{
1382 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1383
1384 return (rdp->donelist != NULL ||
1385 !!rdp->waitlistcount ||
1386 rdp->nextlist != NULL ||
1387 rdp->nextschedlist != NULL ||
1388 rdp->waitschedlist != NULL);
1389}
1390
1391int rcu_pending(int cpu)
1392{
1393 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1394
1395 /* The CPU has at least one callback queued somewhere. */
1396
1397 if (rdp->donelist != NULL ||
1398 !!rdp->waitlistcount ||
1399 rdp->nextlist != NULL ||
1400 rdp->nextschedlist != NULL ||
1401 rdp->waitschedlist != NULL)
1402 return 1;
1403
1404 /* The RCU core needs an acknowledgement from this CPU. */
1405
1406 if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) ||
1407 (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed))
1408 return 1;
1409
1410 /* This CPU has fallen behind the global grace-period number. */
1411
1412 if (rdp->completed != rcu_ctrlblk.completed)
1413 return 1;
1414
1415 /* Nothing needed from this CPU. */
1416
1417 return 0;
1418}
1419
1420static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1421 unsigned long action, void *hcpu)
1422{
1423 long cpu = (long)hcpu;
1424
1425 switch (action) {
1426 case CPU_UP_PREPARE:
1427 case CPU_UP_PREPARE_FROZEN:
1428 rcu_online_cpu(cpu);
1429 break;
1430 case CPU_UP_CANCELED:
1431 case CPU_UP_CANCELED_FROZEN:
1432 case CPU_DEAD:
1433 case CPU_DEAD_FROZEN:
1434 rcu_offline_cpu(cpu);
1435 break;
1436 default:
1437 break;
1438 }
1439 return NOTIFY_OK;
1440}
1441
1442static struct notifier_block __cpuinitdata rcu_nb = {
1443 .notifier_call = rcu_cpu_notify,
1444};
1445
1446void __init __rcu_init(void)
1447{
1448 int cpu;
1449 int i;
1450 struct rcu_data *rdp;
1451
1452 printk(KERN_NOTICE "Preemptible RCU implementation.\n");
1453 for_each_possible_cpu(cpu) {
1454 rdp = RCU_DATA_CPU(cpu);
1455 spin_lock_init(&rdp->lock);
1456 rdp->completed = 0;
1457 rdp->waitlistcount = 0;
1458 rdp->nextlist = NULL;
1459 rdp->nexttail = &rdp->nextlist;
1460 for (i = 0; i < GP_STAGES; i++) {
1461 rdp->waitlist[i] = NULL;
1462 rdp->waittail[i] = &rdp->waitlist[i];
1463 }
1464 rdp->donelist = NULL;
1465 rdp->donetail = &rdp->donelist;
1466 rdp->rcu_flipctr[0] = 0;
1467 rdp->rcu_flipctr[1] = 0;
1468 rdp->nextschedlist = NULL;
1469 rdp->nextschedtail = &rdp->nextschedlist;
1470 rdp->waitschedlist = NULL;
1471 rdp->waitschedtail = &rdp->waitschedlist;
1472 rdp->rcu_sched_sleeping = 0;
1473 }
1474 register_cpu_notifier(&rcu_nb);
1475
1476 /*
1477 * We don't need protection against CPU-Hotplug here
1478 * since
1479 * a) If a CPU comes online while we are iterating over the
1480 * cpu_online_mask below, we would only end up making a
1481 * duplicate call to rcu_online_cpu() which sets the corresponding
1482 * CPU's mask in the rcu_cpu_online_map.
1483 *
1484 * b) A CPU cannot go offline at this point in time since the user
1485 * does not have access to the sysfs interface, nor do we
1486 * suspend the system.
1487 */
1488 for_each_online_cpu(cpu)
1489 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
1490
1491 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1492}
1493
1494/*
1495 * Late-boot-time RCU initialization that must wait until after scheduler
1496 * has been initialized.
1497 */
1498void __init rcu_init_sched(void)
1499{
1500 rcu_sched_grace_period_task = kthread_run(rcu_sched_grace_period,
1501 NULL,
1502 "rcu_sched_grace_period");
1503 WARN_ON(IS_ERR(rcu_sched_grace_period_task));
1504}
1505
1506#ifdef CONFIG_RCU_TRACE
1507long *rcupreempt_flipctr(int cpu)
1508{
1509 return &RCU_DATA_CPU(cpu)->rcu_flipctr[0];
1510}
1511EXPORT_SYMBOL_GPL(rcupreempt_flipctr);
1512
1513int rcupreempt_flip_flag(int cpu)
1514{
1515 return per_cpu(rcu_flip_flag, cpu);
1516}
1517EXPORT_SYMBOL_GPL(rcupreempt_flip_flag);
1518
1519int rcupreempt_mb_flag(int cpu)
1520{
1521 return per_cpu(rcu_mb_flag, cpu);
1522}
1523EXPORT_SYMBOL_GPL(rcupreempt_mb_flag);
1524
1525char *rcupreempt_try_flip_state_name(void)
1526{
1527 return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state];
1528}
1529EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name);
1530
1531struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu)
1532{
1533 struct rcu_data *rdp = RCU_DATA_CPU(cpu);
1534
1535 return &rdp->trace;
1536}
1537EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu);
1538
1539#endif /* #ifdef RCU_TRACE */
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
deleted file mode 100644
index 7c2665cac172..000000000000
--- a/kernel/rcupreempt_trace.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Read-Copy Update tracing for realtime implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2006
19 *
20 * Papers: http://www.rdrop.com/users/paulmck/RCU
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
24 *
25 */
26#include <linux/types.h>
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/spinlock.h>
30#include <linux/smp.h>
31#include <linux/rcupdate.h>
32#include <linux/interrupt.h>
33#include <linux/sched.h>
34#include <asm/atomic.h>
35#include <linux/bitops.h>
36#include <linux/module.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <linux/percpu.h>
40#include <linux/notifier.h>
41#include <linux/cpu.h>
42#include <linux/mutex.h>
43#include <linux/rcupreempt_trace.h>
44#include <linux/debugfs.h>
45
46static struct mutex rcupreempt_trace_mutex;
47static char *rcupreempt_trace_buf;
48#define RCUPREEMPT_TRACE_BUF_SIZE 4096
49
50void rcupreempt_trace_move2done(struct rcupreempt_trace *trace)
51{
52 trace->done_length += trace->wait_length;
53 trace->done_add += trace->wait_length;
54 trace->wait_length = 0;
55}
56void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace)
57{
58 trace->wait_length += trace->next_length;
59 trace->wait_add += trace->next_length;
60 trace->next_length = 0;
61}
62void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace)
63{
64 atomic_inc(&trace->rcu_try_flip_1);
65}
66void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace)
67{
68 atomic_inc(&trace->rcu_try_flip_e1);
69}
70void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace)
71{
72 trace->rcu_try_flip_i1++;
73}
74void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace)
75{
76 trace->rcu_try_flip_ie1++;
77}
78void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace)
79{
80 trace->rcu_try_flip_g1++;
81}
82void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace)
83{
84 trace->rcu_try_flip_a1++;
85}
86void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace)
87{
88 trace->rcu_try_flip_ae1++;
89}
90void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace)
91{
92 trace->rcu_try_flip_a2++;
93}
94void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace)
95{
96 trace->rcu_try_flip_z1++;
97}
98void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace)
99{
100 trace->rcu_try_flip_ze1++;
101}
102void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace)
103{
104 trace->rcu_try_flip_z2++;
105}
106void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace)
107{
108 trace->rcu_try_flip_m1++;
109}
110void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace)
111{
112 trace->rcu_try_flip_me1++;
113}
114void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace)
115{
116 trace->rcu_try_flip_m2++;
117}
118void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace)
119{
120 trace->rcu_check_callbacks++;
121}
122void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace)
123{
124 trace->done_remove += trace->done_length;
125 trace->done_length = 0;
126}
127void rcupreempt_trace_invoke(struct rcupreempt_trace *trace)
128{
129 atomic_inc(&trace->done_invoked);
130}
131void rcupreempt_trace_next_add(struct rcupreempt_trace *trace)
132{
133 trace->next_add++;
134 trace->next_length++;
135}
136
137static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
138{
139 struct rcupreempt_trace *cp;
140 int cpu;
141
142 memset(sp, 0, sizeof(*sp));
143 for_each_possible_cpu(cpu) {
144 cp = rcupreempt_trace_cpu(cpu);
145 sp->next_length += cp->next_length;
146 sp->next_add += cp->next_add;
147 sp->wait_length += cp->wait_length;
148 sp->wait_add += cp->wait_add;
149 sp->done_length += cp->done_length;
150 sp->done_add += cp->done_add;
151 sp->done_remove += cp->done_remove;
152 atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
153 sp->rcu_check_callbacks += cp->rcu_check_callbacks;
154 atomic_add(atomic_read(&cp->rcu_try_flip_1),
155 &sp->rcu_try_flip_1);
156 atomic_add(atomic_read(&cp->rcu_try_flip_e1),
157 &sp->rcu_try_flip_e1);
158 sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
159 sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
160 sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
161 sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1;
162 sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1;
163 sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2;
164 sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1;
165 sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1;
166 sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2;
167 sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1;
168 sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1;
169 sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2;
170 }
171}
172
173static ssize_t rcustats_read(struct file *filp, char __user *buffer,
174 size_t count, loff_t *ppos)
175{
176 struct rcupreempt_trace trace;
177 ssize_t bcount;
178 int cnt = 0;
179
180 rcupreempt_trace_sum(&trace);
181 mutex_lock(&rcupreempt_trace_mutex);
182 snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
183 "ggp=%ld rcc=%ld\n",
184 rcu_batches_completed(),
185 trace.rcu_check_callbacks);
186 snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt,
187 "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n"
188 "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n"
189 "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n",
190
191 trace.next_add, trace.next_length,
192 trace.wait_add, trace.wait_length,
193 trace.done_add, trace.done_length,
194 trace.done_remove, atomic_read(&trace.done_invoked),
195 atomic_read(&trace.rcu_try_flip_1),
196 atomic_read(&trace.rcu_try_flip_e1),
197 trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1,
198 trace.rcu_try_flip_g1,
199 trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1,
200 trace.rcu_try_flip_a2,
201 trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1,
202 trace.rcu_try_flip_z2,
203 trace.rcu_try_flip_m1, trace.rcu_try_flip_me1,
204 trace.rcu_try_flip_m2);
205 bcount = simple_read_from_buffer(buffer, count, ppos,
206 rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
207 mutex_unlock(&rcupreempt_trace_mutex);
208 return bcount;
209}
210
211static ssize_t rcugp_read(struct file *filp, char __user *buffer,
212 size_t count, loff_t *ppos)
213{
214 long oldgp = rcu_batches_completed();
215 ssize_t bcount;
216
217 mutex_lock(&rcupreempt_trace_mutex);
218 synchronize_rcu();
219 snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
220 "oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed());
221 bcount = simple_read_from_buffer(buffer, count, ppos,
222 rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
223 mutex_unlock(&rcupreempt_trace_mutex);
224 return bcount;
225}
226
227static ssize_t rcuctrs_read(struct file *filp, char __user *buffer,
228 size_t count, loff_t *ppos)
229{
230 int cnt = 0;
231 int cpu;
232 int f = rcu_batches_completed() & 0x1;
233 ssize_t bcount;
234
235 mutex_lock(&rcupreempt_trace_mutex);
236
237 cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE,
238 "CPU last cur F M\n");
239 for_each_online_cpu(cpu) {
240 long *flipctr = rcupreempt_flipctr(cpu);
241 cnt += snprintf(&rcupreempt_trace_buf[cnt],
242 RCUPREEMPT_TRACE_BUF_SIZE - cnt,
243 "%3d %4ld %3ld %d %d\n",
244 cpu,
245 flipctr[!f],
246 flipctr[f],
247 rcupreempt_flip_flag(cpu),
248 rcupreempt_mb_flag(cpu));
249 }
250 cnt += snprintf(&rcupreempt_trace_buf[cnt],
251 RCUPREEMPT_TRACE_BUF_SIZE - cnt,
252 "ggp = %ld, state = %s\n",
253 rcu_batches_completed(),
254 rcupreempt_try_flip_state_name());
255 cnt += snprintf(&rcupreempt_trace_buf[cnt],
256 RCUPREEMPT_TRACE_BUF_SIZE - cnt,
257 "\n");
258 bcount = simple_read_from_buffer(buffer, count, ppos,
259 rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
260 mutex_unlock(&rcupreempt_trace_mutex);
261 return bcount;
262}
263
264static struct file_operations rcustats_fops = {
265 .owner = THIS_MODULE,
266 .read = rcustats_read,
267};
268
269static struct file_operations rcugp_fops = {
270 .owner = THIS_MODULE,
271 .read = rcugp_read,
272};
273
274static struct file_operations rcuctrs_fops = {
275 .owner = THIS_MODULE,
276 .read = rcuctrs_read,
277};
278
279static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir;
280static int rcupreempt_debugfs_init(void)
281{
282 rcudir = debugfs_create_dir("rcu", NULL);
283 if (!rcudir)
284 goto out;
285 statdir = debugfs_create_file("rcustats", 0444, rcudir,
286 NULL, &rcustats_fops);
287 if (!statdir)
288 goto free_out;
289
290 gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
291 if (!gpdir)
292 goto free_out;
293
294 ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir,
295 NULL, &rcuctrs_fops);
296 if (!ctrsdir)
297 goto free_out;
298 return 0;
299free_out:
300 if (statdir)
301 debugfs_remove(statdir);
302 if (gpdir)
303 debugfs_remove(gpdir);
304 debugfs_remove(rcudir);
305out:
306 return 1;
307}
308
309static int __init rcupreempt_trace_init(void)
310{
311 int ret;
312
313 mutex_init(&rcupreempt_trace_mutex);
314 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
315 if (!rcupreempt_trace_buf)
316 return 1;
317 ret = rcupreempt_debugfs_init();
318 if (ret)
319 kfree(rcupreempt_trace_buf);
320 return ret;
321}
322
323static void __exit rcupreempt_trace_cleanup(void)
324{
325 debugfs_remove(statdir);
326 debugfs_remove(gpdir);
327 debugfs_remove(ctrsdir);
328 debugfs_remove(rcudir);
329 kfree(rcupreempt_trace_buf);
330}
331
332
333module_init(rcupreempt_trace_init);
334module_exit(rcupreempt_trace_cleanup);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9b4a975a4b4a..b33db539a8ad 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -257,14 +257,14 @@ struct rcu_torture_ops {
257 void (*init)(void); 257 void (*init)(void);
258 void (*cleanup)(void); 258 void (*cleanup)(void);
259 int (*readlock)(void); 259 int (*readlock)(void);
260 void (*readdelay)(struct rcu_random_state *rrsp); 260 void (*read_delay)(struct rcu_random_state *rrsp);
261 void (*readunlock)(int idx); 261 void (*readunlock)(int idx);
262 int (*completed)(void); 262 int (*completed)(void);
263 void (*deferredfree)(struct rcu_torture *p); 263 void (*deferred_free)(struct rcu_torture *p);
264 void (*sync)(void); 264 void (*sync)(void);
265 void (*cb_barrier)(void); 265 void (*cb_barrier)(void);
266 int (*stats)(char *page); 266 int (*stats)(char *page);
267 int irqcapable; 267 int irq_capable;
268 char *name; 268 char *name;
269}; 269};
270static struct rcu_torture_ops *cur_ops = NULL; 270static struct rcu_torture_ops *cur_ops = NULL;
@@ -320,7 +320,7 @@ rcu_torture_cb(struct rcu_head *p)
320 rp->rtort_mbtest = 0; 320 rp->rtort_mbtest = 0;
321 rcu_torture_free(rp); 321 rcu_torture_free(rp);
322 } else 322 } else
323 cur_ops->deferredfree(rp); 323 cur_ops->deferred_free(rp);
324} 324}
325 325
326static void rcu_torture_deferred_free(struct rcu_torture *p) 326static void rcu_torture_deferred_free(struct rcu_torture *p)
@@ -329,18 +329,18 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
329} 329}
330 330
331static struct rcu_torture_ops rcu_ops = { 331static struct rcu_torture_ops rcu_ops = {
332 .init = NULL, 332 .init = NULL,
333 .cleanup = NULL, 333 .cleanup = NULL,
334 .readlock = rcu_torture_read_lock, 334 .readlock = rcu_torture_read_lock,
335 .readdelay = rcu_read_delay, 335 .read_delay = rcu_read_delay,
336 .readunlock = rcu_torture_read_unlock, 336 .readunlock = rcu_torture_read_unlock,
337 .completed = rcu_torture_completed, 337 .completed = rcu_torture_completed,
338 .deferredfree = rcu_torture_deferred_free, 338 .deferred_free = rcu_torture_deferred_free,
339 .sync = synchronize_rcu, 339 .sync = synchronize_rcu,
340 .cb_barrier = rcu_barrier, 340 .cb_barrier = rcu_barrier,
341 .stats = NULL, 341 .stats = NULL,
342 .irqcapable = 1, 342 .irq_capable = 1,
343 .name = "rcu" 343 .name = "rcu"
344}; 344};
345 345
346static void rcu_sync_torture_deferred_free(struct rcu_torture *p) 346static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
@@ -370,18 +370,18 @@ static void rcu_sync_torture_init(void)
370} 370}
371 371
372static struct rcu_torture_ops rcu_sync_ops = { 372static struct rcu_torture_ops rcu_sync_ops = {
373 .init = rcu_sync_torture_init, 373 .init = rcu_sync_torture_init,
374 .cleanup = NULL, 374 .cleanup = NULL,
375 .readlock = rcu_torture_read_lock, 375 .readlock = rcu_torture_read_lock,
376 .readdelay = rcu_read_delay, 376 .read_delay = rcu_read_delay,
377 .readunlock = rcu_torture_read_unlock, 377 .readunlock = rcu_torture_read_unlock,
378 .completed = rcu_torture_completed, 378 .completed = rcu_torture_completed,
379 .deferredfree = rcu_sync_torture_deferred_free, 379 .deferred_free = rcu_sync_torture_deferred_free,
380 .sync = synchronize_rcu, 380 .sync = synchronize_rcu,
381 .cb_barrier = NULL, 381 .cb_barrier = NULL,
382 .stats = NULL, 382 .stats = NULL,
383 .irqcapable = 1, 383 .irq_capable = 1,
384 .name = "rcu_sync" 384 .name = "rcu_sync"
385}; 385};
386 386
387/* 387/*
@@ -432,33 +432,33 @@ static void rcu_bh_torture_synchronize(void)
432} 432}
433 433
434static struct rcu_torture_ops rcu_bh_ops = { 434static struct rcu_torture_ops rcu_bh_ops = {
435 .init = NULL, 435 .init = NULL,
436 .cleanup = NULL, 436 .cleanup = NULL,
437 .readlock = rcu_bh_torture_read_lock, 437 .readlock = rcu_bh_torture_read_lock,
438 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 438 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
439 .readunlock = rcu_bh_torture_read_unlock, 439 .readunlock = rcu_bh_torture_read_unlock,
440 .completed = rcu_bh_torture_completed, 440 .completed = rcu_bh_torture_completed,
441 .deferredfree = rcu_bh_torture_deferred_free, 441 .deferred_free = rcu_bh_torture_deferred_free,
442 .sync = rcu_bh_torture_synchronize, 442 .sync = rcu_bh_torture_synchronize,
443 .cb_barrier = rcu_barrier_bh, 443 .cb_barrier = rcu_barrier_bh,
444 .stats = NULL, 444 .stats = NULL,
445 .irqcapable = 1, 445 .irq_capable = 1,
446 .name = "rcu_bh" 446 .name = "rcu_bh"
447}; 447};
448 448
449static struct rcu_torture_ops rcu_bh_sync_ops = { 449static struct rcu_torture_ops rcu_bh_sync_ops = {
450 .init = rcu_sync_torture_init, 450 .init = rcu_sync_torture_init,
451 .cleanup = NULL, 451 .cleanup = NULL,
452 .readlock = rcu_bh_torture_read_lock, 452 .readlock = rcu_bh_torture_read_lock,
453 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 453 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
454 .readunlock = rcu_bh_torture_read_unlock, 454 .readunlock = rcu_bh_torture_read_unlock,
455 .completed = rcu_bh_torture_completed, 455 .completed = rcu_bh_torture_completed,
456 .deferredfree = rcu_sync_torture_deferred_free, 456 .deferred_free = rcu_sync_torture_deferred_free,
457 .sync = rcu_bh_torture_synchronize, 457 .sync = rcu_bh_torture_synchronize,
458 .cb_barrier = NULL, 458 .cb_barrier = NULL,
459 .stats = NULL, 459 .stats = NULL,
460 .irqcapable = 1, 460 .irq_capable = 1,
461 .name = "rcu_bh_sync" 461 .name = "rcu_bh_sync"
462}; 462};
463 463
464/* 464/*
@@ -530,17 +530,17 @@ static int srcu_torture_stats(char *page)
530} 530}
531 531
532static struct rcu_torture_ops srcu_ops = { 532static struct rcu_torture_ops srcu_ops = {
533 .init = srcu_torture_init, 533 .init = srcu_torture_init,
534 .cleanup = srcu_torture_cleanup, 534 .cleanup = srcu_torture_cleanup,
535 .readlock = srcu_torture_read_lock, 535 .readlock = srcu_torture_read_lock,
536 .readdelay = srcu_read_delay, 536 .read_delay = srcu_read_delay,
537 .readunlock = srcu_torture_read_unlock, 537 .readunlock = srcu_torture_read_unlock,
538 .completed = srcu_torture_completed, 538 .completed = srcu_torture_completed,
539 .deferredfree = rcu_sync_torture_deferred_free, 539 .deferred_free = rcu_sync_torture_deferred_free,
540 .sync = srcu_torture_synchronize, 540 .sync = srcu_torture_synchronize,
541 .cb_barrier = NULL, 541 .cb_barrier = NULL,
542 .stats = srcu_torture_stats, 542 .stats = srcu_torture_stats,
543 .name = "srcu" 543 .name = "srcu"
544}; 544};
545 545
546/* 546/*
@@ -574,32 +574,49 @@ static void sched_torture_synchronize(void)
574} 574}
575 575
576static struct rcu_torture_ops sched_ops = { 576static struct rcu_torture_ops sched_ops = {
577 .init = rcu_sync_torture_init, 577 .init = rcu_sync_torture_init,
578 .cleanup = NULL, 578 .cleanup = NULL,
579 .readlock = sched_torture_read_lock, 579 .readlock = sched_torture_read_lock,
580 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 580 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
581 .readunlock = sched_torture_read_unlock, 581 .readunlock = sched_torture_read_unlock,
582 .completed = sched_torture_completed, 582 .completed = sched_torture_completed,
583 .deferredfree = rcu_sched_torture_deferred_free, 583 .deferred_free = rcu_sched_torture_deferred_free,
584 .sync = sched_torture_synchronize, 584 .sync = sched_torture_synchronize,
585 .cb_barrier = rcu_barrier_sched, 585 .cb_barrier = rcu_barrier_sched,
586 .stats = NULL, 586 .stats = NULL,
587 .irqcapable = 1, 587 .irq_capable = 1,
588 .name = "sched" 588 .name = "sched"
589}; 589};
590 590
591static struct rcu_torture_ops sched_ops_sync = { 591static struct rcu_torture_ops sched_ops_sync = {
592 .init = rcu_sync_torture_init, 592 .init = rcu_sync_torture_init,
593 .cleanup = NULL, 593 .cleanup = NULL,
594 .readlock = sched_torture_read_lock, 594 .readlock = sched_torture_read_lock,
595 .readdelay = rcu_read_delay, /* just reuse rcu's version. */ 595 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
596 .readunlock = sched_torture_read_unlock, 596 .readunlock = sched_torture_read_unlock,
597 .completed = sched_torture_completed, 597 .completed = sched_torture_completed,
598 .deferredfree = rcu_sync_torture_deferred_free, 598 .deferred_free = rcu_sync_torture_deferred_free,
599 .sync = sched_torture_synchronize, 599 .sync = sched_torture_synchronize,
600 .cb_barrier = NULL, 600 .cb_barrier = NULL,
601 .stats = NULL, 601 .stats = NULL,
602 .name = "sched_sync" 602 .name = "sched_sync"
603};
604
605extern int rcu_expedited_torture_stats(char *page);
606
607static struct rcu_torture_ops sched_expedited_ops = {
608 .init = rcu_sync_torture_init,
609 .cleanup = NULL,
610 .readlock = sched_torture_read_lock,
611 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
612 .readunlock = sched_torture_read_unlock,
613 .completed = sched_torture_completed,
614 .deferred_free = rcu_sync_torture_deferred_free,
615 .sync = synchronize_sched_expedited,
616 .cb_barrier = NULL,
617 .stats = rcu_expedited_torture_stats,
618 .irq_capable = 1,
619 .name = "sched_expedited"
603}; 620};
604 621
605/* 622/*
@@ -635,7 +652,7 @@ rcu_torture_writer(void *arg)
635 i = RCU_TORTURE_PIPE_LEN; 652 i = RCU_TORTURE_PIPE_LEN;
636 atomic_inc(&rcu_torture_wcount[i]); 653 atomic_inc(&rcu_torture_wcount[i]);
637 old_rp->rtort_pipe_count++; 654 old_rp->rtort_pipe_count++;
638 cur_ops->deferredfree(old_rp); 655 cur_ops->deferred_free(old_rp);
639 } 656 }
640 rcu_torture_current_version++; 657 rcu_torture_current_version++;
641 oldbatch = cur_ops->completed(); 658 oldbatch = cur_ops->completed();
@@ -700,7 +717,7 @@ static void rcu_torture_timer(unsigned long unused)
700 if (p->rtort_mbtest == 0) 717 if (p->rtort_mbtest == 0)
701 atomic_inc(&n_rcu_torture_mberror); 718 atomic_inc(&n_rcu_torture_mberror);
702 spin_lock(&rand_lock); 719 spin_lock(&rand_lock);
703 cur_ops->readdelay(&rand); 720 cur_ops->read_delay(&rand);
704 n_rcu_torture_timers++; 721 n_rcu_torture_timers++;
705 spin_unlock(&rand_lock); 722 spin_unlock(&rand_lock);
706 preempt_disable(); 723 preempt_disable();
@@ -738,11 +755,11 @@ rcu_torture_reader(void *arg)
738 755
739 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 756 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
740 set_user_nice(current, 19); 757 set_user_nice(current, 19);
741 if (irqreader && cur_ops->irqcapable) 758 if (irqreader && cur_ops->irq_capable)
742 setup_timer_on_stack(&t, rcu_torture_timer, 0); 759 setup_timer_on_stack(&t, rcu_torture_timer, 0);
743 760
744 do { 761 do {
745 if (irqreader && cur_ops->irqcapable) { 762 if (irqreader && cur_ops->irq_capable) {
746 if (!timer_pending(&t)) 763 if (!timer_pending(&t))
747 mod_timer(&t, 1); 764 mod_timer(&t, 1);
748 } 765 }
@@ -757,7 +774,7 @@ rcu_torture_reader(void *arg)
757 } 774 }
758 if (p->rtort_mbtest == 0) 775 if (p->rtort_mbtest == 0)
759 atomic_inc(&n_rcu_torture_mberror); 776 atomic_inc(&n_rcu_torture_mberror);
760 cur_ops->readdelay(&rand); 777 cur_ops->read_delay(&rand);
761 preempt_disable(); 778 preempt_disable();
762 pipe_count = p->rtort_pipe_count; 779 pipe_count = p->rtort_pipe_count;
763 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 780 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -778,7 +795,7 @@ rcu_torture_reader(void *arg)
778 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); 795 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
779 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 796 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
780 rcutorture_shutdown_absorb("rcu_torture_reader"); 797 rcutorture_shutdown_absorb("rcu_torture_reader");
781 if (irqreader && cur_ops->irqcapable) 798 if (irqreader && cur_ops->irq_capable)
782 del_timer_sync(&t); 799 del_timer_sync(&t);
783 while (!kthread_should_stop()) 800 while (!kthread_should_stop())
784 schedule_timeout_uninterruptible(1); 801 schedule_timeout_uninterruptible(1);
@@ -1078,6 +1095,7 @@ rcu_torture_init(void)
1078 int firsterr = 0; 1095 int firsterr = 0;
1079 static struct rcu_torture_ops *torture_ops[] = 1096 static struct rcu_torture_ops *torture_ops[] =
1080 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1097 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
1098 &sched_expedited_ops,
1081 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1099 &srcu_ops, &sched_ops, &sched_ops_sync, };
1082 1100
1083 mutex_lock(&fullstop_mutex); 1101 mutex_lock(&fullstop_mutex);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7717b95c2027..6b11b07cfe7f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -35,6 +35,7 @@
35#include <linux/rcupdate.h> 35#include <linux/rcupdate.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/nmi.h>
38#include <asm/atomic.h> 39#include <asm/atomic.h>
39#include <linux/bitops.h> 40#include <linux/bitops.h>
40#include <linux/module.h> 41#include <linux/module.h>
@@ -46,6 +47,8 @@
46#include <linux/mutex.h> 47#include <linux/mutex.h>
47#include <linux/time.h> 48#include <linux/time.h>
48 49
50#include "rcutree.h"
51
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 52#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 53static struct lock_class_key rcu_lock_key;
51struct lockdep_map rcu_lock_map = 54struct lockdep_map rcu_lock_map =
@@ -72,30 +75,59 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
72 .n_force_qs_ngp = 0, \ 75 .n_force_qs_ngp = 0, \
73} 76}
74 77
75struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); 78struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
76DEFINE_PER_CPU(struct rcu_data, rcu_data); 79DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
77 80
78struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
80 83
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100
101#include "rcutree_plugin.h"
102
81/* 103/*
82 * Increment the quiescent state counter. 104 * Note a quiescent state. Because we do not need to know
83 * The counter is a bit degenerated: We do not need to know
84 * how many quiescent states passed, just if there was at least 105 * how many quiescent states passed, just if there was at least
85 * one since the start of the grace period. Thus just a flag. 106 * one since the start of the grace period, this just sets a flag.
86 */ 107 */
87void rcu_qsctr_inc(int cpu) 108void rcu_sched_qs(int cpu)
88{ 109{
89 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 110 unsigned long flags;
111 struct rcu_data *rdp;
112
113 local_irq_save(flags);
114 rdp = &per_cpu(rcu_sched_data, cpu);
90 rdp->passed_quiesc = 1; 115 rdp->passed_quiesc = 1;
91 rdp->passed_quiesc_completed = rdp->completed; 116 rdp->passed_quiesc_completed = rdp->completed;
117 rcu_preempt_qs(cpu);
118 local_irq_restore(flags);
92} 119}
93 120
94void rcu_bh_qsctr_inc(int cpu) 121void rcu_bh_qs(int cpu)
95{ 122{
96 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 123 unsigned long flags;
124 struct rcu_data *rdp;
125
126 local_irq_save(flags);
127 rdp = &per_cpu(rcu_bh_data, cpu);
97 rdp->passed_quiesc = 1; 128 rdp->passed_quiesc = 1;
98 rdp->passed_quiesc_completed = rdp->completed; 129 rdp->passed_quiesc_completed = rdp->completed;
130 local_irq_restore(flags);
99} 131}
100 132
101#ifdef CONFIG_NO_HZ 133#ifdef CONFIG_NO_HZ
@@ -110,15 +142,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */
110static int qlowmark = 100; /* Once only this many pending, use blimit. */ 142static int qlowmark = 100; /* Once only this many pending, use blimit. */
111 143
112static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 144static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
145static int rcu_pending(int cpu);
113 146
114/* 147/*
115 * Return the number of RCU batches processed thus far for debug & stats. 148 * Return the number of RCU-sched batches processed thus far for debug & stats.
116 */ 149 */
117long rcu_batches_completed(void) 150long rcu_batches_completed_sched(void)
118{ 151{
119 return rcu_state.completed; 152 return rcu_sched_state.completed;
120} 153}
121EXPORT_SYMBOL_GPL(rcu_batches_completed); 154EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
122 155
123/* 156/*
124 * Return the number of RCU BH batches processed thus far for debug & stats. 157 * Return the number of RCU BH batches processed thus far for debug & stats.
@@ -181,6 +214,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
181 return 1; 214 return 1;
182 } 215 }
183 216
217 /* If preemptable RCU, no point in sending reschedule IPI. */
218 if (rdp->preemptable)
219 return 0;
220
184 /* The CPU is online, so send it a reschedule IPI. */ 221 /* The CPU is online, so send it a reschedule IPI. */
185 if (rdp->cpu != smp_processor_id()) 222 if (rdp->cpu != smp_processor_id())
186 smp_send_reschedule(rdp->cpu); 223 smp_send_reschedule(rdp->cpu);
@@ -193,7 +230,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
193#endif /* #ifdef CONFIG_SMP */ 230#endif /* #ifdef CONFIG_SMP */
194 231
195#ifdef CONFIG_NO_HZ 232#ifdef CONFIG_NO_HZ
196static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
197 233
198/** 234/**
199 * rcu_enter_nohz - inform RCU that current CPU is entering nohz 235 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
@@ -213,7 +249,7 @@ void rcu_enter_nohz(void)
213 rdtp = &__get_cpu_var(rcu_dynticks); 249 rdtp = &__get_cpu_var(rcu_dynticks);
214 rdtp->dynticks++; 250 rdtp->dynticks++;
215 rdtp->dynticks_nesting--; 251 rdtp->dynticks_nesting--;
216 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 252 WARN_ON_ONCE(rdtp->dynticks & 0x1);
217 local_irq_restore(flags); 253 local_irq_restore(flags);
218} 254}
219 255
@@ -232,7 +268,7 @@ void rcu_exit_nohz(void)
232 rdtp = &__get_cpu_var(rcu_dynticks); 268 rdtp = &__get_cpu_var(rcu_dynticks);
233 rdtp->dynticks++; 269 rdtp->dynticks++;
234 rdtp->dynticks_nesting++; 270 rdtp->dynticks_nesting++;
235 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 271 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
236 local_irq_restore(flags); 272 local_irq_restore(flags);
237 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 273 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
238} 274}
@@ -251,7 +287,7 @@ void rcu_nmi_enter(void)
251 if (rdtp->dynticks & 0x1) 287 if (rdtp->dynticks & 0x1)
252 return; 288 return;
253 rdtp->dynticks_nmi++; 289 rdtp->dynticks_nmi++;
254 WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); 290 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
255 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 291 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
256} 292}
257 293
@@ -270,7 +306,7 @@ void rcu_nmi_exit(void)
270 return; 306 return;
271 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 307 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
272 rdtp->dynticks_nmi++; 308 rdtp->dynticks_nmi++;
273 WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); 309 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
274} 310}
275 311
276/** 312/**
@@ -286,7 +322,7 @@ void rcu_irq_enter(void)
286 if (rdtp->dynticks_nesting++) 322 if (rdtp->dynticks_nesting++)
287 return; 323 return;
288 rdtp->dynticks++; 324 rdtp->dynticks++;
289 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 325 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
290 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 326 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
291} 327}
292 328
@@ -305,10 +341,10 @@ void rcu_irq_exit(void)
305 return; 341 return;
306 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 342 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
307 rdtp->dynticks++; 343 rdtp->dynticks++;
308 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 344 WARN_ON_ONCE(rdtp->dynticks & 0x1);
309 345
310 /* If the interrupt queued a callback, get out of dyntick mode. */ 346 /* If the interrupt queued a callback, get out of dyntick mode. */
311 if (__get_cpu_var(rcu_data).nxtlist || 347 if (__get_cpu_var(rcu_sched_data).nxtlist ||
312 __get_cpu_var(rcu_bh_data).nxtlist) 348 __get_cpu_var(rcu_bh_data).nxtlist)
313 set_need_resched(); 349 set_need_resched();
314} 350}
@@ -461,6 +497,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
461 497
462 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 498 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
463 for (; rnp_cur < rnp_end; rnp_cur++) { 499 for (; rnp_cur < rnp_end; rnp_cur++) {
500 rcu_print_task_stall(rnp);
464 if (rnp_cur->qsmask == 0) 501 if (rnp_cur->qsmask == 0)
465 continue; 502 continue;
466 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 503 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
@@ -469,6 +506,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
469 } 506 }
470 printk(" (detected by %d, t=%ld jiffies)\n", 507 printk(" (detected by %d, t=%ld jiffies)\n",
471 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 508 smp_processor_id(), (long)(jiffies - rsp->gp_start));
509 trigger_all_cpu_backtrace();
510
472 force_quiescent_state(rsp, 0); /* Kick them all. */ 511 force_quiescent_state(rsp, 0); /* Kick them all. */
473} 512}
474 513
@@ -479,12 +518,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
479 518
480 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", 519 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
481 smp_processor_id(), jiffies - rsp->gp_start); 520 smp_processor_id(), jiffies - rsp->gp_start);
482 dump_stack(); 521 trigger_all_cpu_backtrace();
522
483 spin_lock_irqsave(&rnp->lock, flags); 523 spin_lock_irqsave(&rnp->lock, flags);
484 if ((long)(jiffies - rsp->jiffies_stall) >= 0) 524 if ((long)(jiffies - rsp->jiffies_stall) >= 0)
485 rsp->jiffies_stall = 525 rsp->jiffies_stall =
486 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 526 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
487 spin_unlock_irqrestore(&rnp->lock, flags); 527 spin_unlock_irqrestore(&rnp->lock, flags);
528
488 set_need_resched(); /* kick ourselves to get things going. */ 529 set_need_resched(); /* kick ourselves to get things going. */
489} 530}
490 531
@@ -674,6 +715,19 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
674} 715}
675 716
676/* 717/*
718 * Clean up after the prior grace period and let rcu_start_gp() start up
719 * the next grace period if one is needed. Note that the caller must
720 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
721 */
722static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
723 __releases(rnp->lock)
724{
725 rsp->completed = rsp->gpnum;
726 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
727 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
728}
729
730/*
677 * Similar to cpu_quiet(), for which it is a helper function. Allows 731 * Similar to cpu_quiet(), for which it is a helper function. Allows
678 * a group of CPUs to be quieted at one go, though all the CPUs in the 732 * a group of CPUs to be quieted at one go, though all the CPUs in the
679 * group must be represented by the same leaf rcu_node structure. 733 * group must be represented by the same leaf rcu_node structure.
@@ -694,7 +748,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
694 return; 748 return;
695 } 749 }
696 rnp->qsmask &= ~mask; 750 rnp->qsmask &= ~mask;
697 if (rnp->qsmask != 0) { 751 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
698 752
699 /* Other bits still set at this level, so done. */ 753 /* Other bits still set at this level, so done. */
700 spin_unlock_irqrestore(&rnp->lock, flags); 754 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -714,14 +768,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
714 768
715 /* 769 /*
716 * Get here if we are the last CPU to pass through a quiescent 770 * Get here if we are the last CPU to pass through a quiescent
717 * state for this grace period. Clean up and let rcu_start_gp() 771 * state for this grace period. Invoke cpu_quiet_msk_finish()
718 * start up the next grace period if one is needed. Note that 772 * to clean up and start the next grace period if one is needed.
719 * we still hold rnp->lock, as required by rcu_start_gp(), which
720 * will release it.
721 */ 773 */
722 rsp->completed = rsp->gpnum; 774 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */
723 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
724 rcu_start_gp(rsp, flags); /* releases rnp->lock. */
725} 775}
726 776
727/* 777/*
@@ -828,11 +878,12 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
828 spin_lock(&rnp->lock); /* irqs already disabled. */ 878 spin_lock(&rnp->lock); /* irqs already disabled. */
829 rnp->qsmaskinit &= ~mask; 879 rnp->qsmaskinit &= ~mask;
830 if (rnp->qsmaskinit != 0) { 880 if (rnp->qsmaskinit != 0) {
831 spin_unlock(&rnp->lock); /* irqs already disabled. */ 881 spin_unlock(&rnp->lock); /* irqs remain disabled. */
832 break; 882 break;
833 } 883 }
884 rcu_preempt_offline_tasks(rsp, rnp);
834 mask = rnp->grpmask; 885 mask = rnp->grpmask;
835 spin_unlock(&rnp->lock); /* irqs already disabled. */ 886 spin_unlock(&rnp->lock); /* irqs remain disabled. */
836 rnp = rnp->parent; 887 rnp = rnp->parent;
837 } while (rnp != NULL); 888 } while (rnp != NULL);
838 lastcomp = rsp->completed; 889 lastcomp = rsp->completed;
@@ -845,7 +896,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
845 /* 896 /*
846 * Move callbacks from the outgoing CPU to the running CPU. 897 * Move callbacks from the outgoing CPU to the running CPU.
847 * Note that the outgoing CPU is now quiscent, so it is now 898 * Note that the outgoing CPU is now quiscent, so it is now
848 * (uncharacteristically) safe to access it rcu_data structure. 899 * (uncharacteristically) safe to access its rcu_data structure.
849 * Note also that we must carefully retain the order of the 900 * Note also that we must carefully retain the order of the
850 * outgoing CPU's callbacks in order for rcu_barrier() to work 901 * outgoing CPU's callbacks in order for rcu_barrier() to work
851 * correctly. Finally, note that we start all the callbacks 902 * correctly. Finally, note that we start all the callbacks
@@ -876,8 +927,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
876 */ 927 */
877static void rcu_offline_cpu(int cpu) 928static void rcu_offline_cpu(int cpu)
878{ 929{
879 __rcu_offline_cpu(cpu, &rcu_state); 930 __rcu_offline_cpu(cpu, &rcu_sched_state);
880 __rcu_offline_cpu(cpu, &rcu_bh_state); 931 __rcu_offline_cpu(cpu, &rcu_bh_state);
932 rcu_preempt_offline_cpu(cpu);
881} 933}
882 934
883#else /* #ifdef CONFIG_HOTPLUG_CPU */ 935#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -963,6 +1015,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
963 */ 1015 */
964void rcu_check_callbacks(int cpu, int user) 1016void rcu_check_callbacks(int cpu, int user)
965{ 1017{
1018 if (!rcu_pending(cpu))
1019 return; /* if nothing for RCU to do. */
966 if (user || 1020 if (user ||
967 (idle_cpu(cpu) && rcu_scheduler_active && 1021 (idle_cpu(cpu) && rcu_scheduler_active &&
968 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1022 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -971,17 +1025,16 @@ void rcu_check_callbacks(int cpu, int user)
971 * Get here if this CPU took its interrupt from user 1025 * Get here if this CPU took its interrupt from user
972 * mode or from the idle loop, and if this is not a 1026 * mode or from the idle loop, and if this is not a
973 * nested interrupt. In this case, the CPU is in 1027 * nested interrupt. In this case, the CPU is in
974 * a quiescent state, so count it. 1028 * a quiescent state, so note it.
975 * 1029 *
976 * No memory barrier is required here because both 1030 * No memory barrier is required here because both
977 * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference 1031 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
978 * only CPU-local variables that other CPUs neither 1032 * variables that other CPUs neither access nor modify,
979 * access nor modify, at least not while the corresponding 1033 * at least not while the corresponding CPU is online.
980 * CPU is online.
981 */ 1034 */
982 1035
983 rcu_qsctr_inc(cpu); 1036 rcu_sched_qs(cpu);
984 rcu_bh_qsctr_inc(cpu); 1037 rcu_bh_qs(cpu);
985 1038
986 } else if (!in_softirq()) { 1039 } else if (!in_softirq()) {
987 1040
@@ -989,11 +1042,12 @@ void rcu_check_callbacks(int cpu, int user)
989 * Get here if this CPU did not take its interrupt from 1042 * Get here if this CPU did not take its interrupt from
990 * softirq, in other words, if it is not interrupting 1043 * softirq, in other words, if it is not interrupting
991 * a rcu_bh read-side critical section. This is an _bh 1044 * a rcu_bh read-side critical section. This is an _bh
992 * critical section, so count it. 1045 * critical section, so note it.
993 */ 1046 */
994 1047
995 rcu_bh_qsctr_inc(cpu); 1048 rcu_bh_qs(cpu);
996 } 1049 }
1050 rcu_preempt_check_callbacks(cpu);
997 raise_softirq(RCU_SOFTIRQ); 1051 raise_softirq(RCU_SOFTIRQ);
998} 1052}
999 1053
@@ -1132,6 +1186,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1132{ 1186{
1133 unsigned long flags; 1187 unsigned long flags;
1134 1188
1189 WARN_ON_ONCE(rdp->beenonline == 0);
1190
1135 /* 1191 /*
1136 * If an RCU GP has gone long enough, go check for dyntick 1192 * If an RCU GP has gone long enough, go check for dyntick
1137 * idle CPUs and, if needed, send resched IPIs. 1193 * idle CPUs and, if needed, send resched IPIs.
@@ -1170,8 +1226,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1170 */ 1226 */
1171 smp_mb(); /* See above block comment. */ 1227 smp_mb(); /* See above block comment. */
1172 1228
1173 __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); 1229 __rcu_process_callbacks(&rcu_sched_state,
1230 &__get_cpu_var(rcu_sched_data));
1174 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1231 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1232 rcu_preempt_process_callbacks();
1175 1233
1176 /* 1234 /*
1177 * Memory references from any later RCU read-side critical sections 1235 * Memory references from any later RCU read-side critical sections
@@ -1227,13 +1285,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1227} 1285}
1228 1286
1229/* 1287/*
1230 * Queue an RCU callback for invocation after a grace period. 1288 * Queue an RCU-sched callback for invocation after a grace period.
1231 */ 1289 */
1232void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1290void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1233{ 1291{
1234 __call_rcu(head, func, &rcu_state); 1292 __call_rcu(head, func, &rcu_sched_state);
1235} 1293}
1236EXPORT_SYMBOL_GPL(call_rcu); 1294EXPORT_SYMBOL_GPL(call_rcu_sched);
1237 1295
1238/* 1296/*
1239 * Queue an RCU for invocation after a quicker grace period. 1297 * Queue an RCU for invocation after a quicker grace period.
@@ -1305,10 +1363,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1305 * by the current CPU, returning 1 if so. This function is part of the 1363 * by the current CPU, returning 1 if so. This function is part of the
1306 * RCU implementation; it is -not- an exported member of the RCU API. 1364 * RCU implementation; it is -not- an exported member of the RCU API.
1307 */ 1365 */
1308int rcu_pending(int cpu) 1366static int rcu_pending(int cpu)
1309{ 1367{
1310 return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || 1368 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
1311 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); 1369 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
1370 rcu_preempt_pending(cpu);
1312} 1371}
1313 1372
1314/* 1373/*
@@ -1320,27 +1379,46 @@ int rcu_pending(int cpu)
1320int rcu_needs_cpu(int cpu) 1379int rcu_needs_cpu(int cpu)
1321{ 1380{
1322 /* RCU callbacks either ready or pending? */ 1381 /* RCU callbacks either ready or pending? */
1323 return per_cpu(rcu_data, cpu).nxtlist || 1382 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1324 per_cpu(rcu_bh_data, cpu).nxtlist; 1383 per_cpu(rcu_bh_data, cpu).nxtlist ||
1384 rcu_preempt_needs_cpu(cpu);
1325} 1385}
1326 1386
1327/* 1387/*
1328 * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" 1388 * Do boot-time initialization of a CPU's per-CPU RCU data.
1329 * approach so that we don't have to worry about how long the CPU has
1330 * been gone, or whether it ever was online previously. We do trust the
1331 * ->mynode field, as it is constant for a given struct rcu_data and
1332 * initialized during early boot.
1333 *
1334 * Note that only one online or offline event can be happening at a given
1335 * time. Note also that we can accept some slop in the rsp->completed
1336 * access due to the fact that this CPU cannot possibly have any RCU
1337 * callbacks in flight yet.
1338 */ 1389 */
1339static void __cpuinit 1390static void __init
1340rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 1391rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1341{ 1392{
1342 unsigned long flags; 1393 unsigned long flags;
1343 int i; 1394 int i;
1395 struct rcu_data *rdp = rsp->rda[cpu];
1396 struct rcu_node *rnp = rcu_get_root(rsp);
1397
1398 /* Set up local state, ensuring consistent view of global state. */
1399 spin_lock_irqsave(&rnp->lock, flags);
1400 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1401 rdp->nxtlist = NULL;
1402 for (i = 0; i < RCU_NEXT_SIZE; i++)
1403 rdp->nxttail[i] = &rdp->nxtlist;
1404 rdp->qlen = 0;
1405#ifdef CONFIG_NO_HZ
1406 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1407#endif /* #ifdef CONFIG_NO_HZ */
1408 rdp->cpu = cpu;
1409 spin_unlock_irqrestore(&rnp->lock, flags);
1410}
1411
1412/*
1413 * Initialize a CPU's per-CPU RCU data. Note that only one online or
1414 * offline event can be happening at a given time. Note also that we
1415 * can accept some slop in the rsp->completed access due to the fact
1416 * that this CPU cannot possibly have any RCU callbacks in flight yet.
1417 */
1418static void __cpuinit
1419rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1420{
1421 unsigned long flags;
1344 long lastcomp; 1422 long lastcomp;
1345 unsigned long mask; 1423 unsigned long mask;
1346 struct rcu_data *rdp = rsp->rda[cpu]; 1424 struct rcu_data *rdp = rsp->rda[cpu];
@@ -1354,17 +1432,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1354 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1432 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1355 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1433 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1356 rdp->beenonline = 1; /* We have now been online. */ 1434 rdp->beenonline = 1; /* We have now been online. */
1435 rdp->preemptable = preemptable;
1357 rdp->passed_quiesc_completed = lastcomp - 1; 1436 rdp->passed_quiesc_completed = lastcomp - 1;
1358 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1359 rdp->nxtlist = NULL;
1360 for (i = 0; i < RCU_NEXT_SIZE; i++)
1361 rdp->nxttail[i] = &rdp->nxtlist;
1362 rdp->qlen = 0;
1363 rdp->blimit = blimit; 1437 rdp->blimit = blimit;
1364#ifdef CONFIG_NO_HZ
1365 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1366#endif /* #ifdef CONFIG_NO_HZ */
1367 rdp->cpu = cpu;
1368 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1438 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1369 1439
1370 /* 1440 /*
@@ -1405,16 +1475,16 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1405 1475
1406static void __cpuinit rcu_online_cpu(int cpu) 1476static void __cpuinit rcu_online_cpu(int cpu)
1407{ 1477{
1408 rcu_init_percpu_data(cpu, &rcu_state); 1478 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
1409 rcu_init_percpu_data(cpu, &rcu_bh_state); 1479 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
1410 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1480 rcu_preempt_init_percpu_data(cpu);
1411} 1481}
1412 1482
1413/* 1483/*
1414 * Handle CPU online/offline notifcation events. 1484 * Handle CPU online/offline notification events.
1415 */ 1485 */
1416static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1486int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1417 unsigned long action, void *hcpu) 1487 unsigned long action, void *hcpu)
1418{ 1488{
1419 long cpu = (long)hcpu; 1489 long cpu = (long)hcpu;
1420 1490
@@ -1486,6 +1556,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1486 rnp = rsp->level[i]; 1556 rnp = rsp->level[i];
1487 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1557 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1488 spin_lock_init(&rnp->lock); 1558 spin_lock_init(&rnp->lock);
1559 rnp->gpnum = 0;
1489 rnp->qsmask = 0; 1560 rnp->qsmask = 0;
1490 rnp->qsmaskinit = 0; 1561 rnp->qsmaskinit = 0;
1491 rnp->grplo = j * cpustride; 1562 rnp->grplo = j * cpustride;
@@ -1503,16 +1574,20 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1503 j / rsp->levelspread[i - 1]; 1574 j / rsp->levelspread[i - 1];
1504 } 1575 }
1505 rnp->level = i; 1576 rnp->level = i;
1577 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1578 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1506 } 1579 }
1507 } 1580 }
1508} 1581}
1509 1582
1510/* 1583/*
1511 * Helper macro for __rcu_init(). To be used nowhere else! 1584 * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
1512 * Assigns leaf node pointers into each CPU's rcu_data structure. 1585 * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
1586 * structure.
1513 */ 1587 */
1514#define RCU_DATA_PTR_INIT(rsp, rcu_data) \ 1588#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1515do { \ 1589do { \
1590 rcu_init_one(rsp); \
1516 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1591 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1517 j = 0; \ 1592 j = 0; \
1518 for_each_possible_cpu(i) { \ 1593 for_each_possible_cpu(i) { \
@@ -1520,32 +1595,43 @@ do { \
1520 j++; \ 1595 j++; \
1521 per_cpu(rcu_data, i).mynode = &rnp[j]; \ 1596 per_cpu(rcu_data, i).mynode = &rnp[j]; \
1522 (rsp)->rda[i] = &per_cpu(rcu_data, i); \ 1597 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1598 rcu_boot_init_percpu_data(i, rsp); \
1523 } \ 1599 } \
1524} while (0) 1600} while (0)
1525 1601
1526static struct notifier_block __cpuinitdata rcu_nb = { 1602#ifdef CONFIG_TREE_PREEMPT_RCU
1527 .notifier_call = rcu_cpu_notify, 1603
1528}; 1604void __init __rcu_init_preempt(void)
1605{
1606 int i; /* All used by RCU_INIT_FLAVOR(). */
1607 int j;
1608 struct rcu_node *rnp;
1609
1610 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1611}
1612
1613#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1614
1615void __init __rcu_init_preempt(void)
1616{
1617}
1618
1619#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1529 1620
1530void __init __rcu_init(void) 1621void __init __rcu_init(void)
1531{ 1622{
1532 int i; /* All used by RCU_DATA_PTR_INIT(). */ 1623 int i; /* All used by RCU_INIT_FLAVOR(). */
1533 int j; 1624 int j;
1534 struct rcu_node *rnp; 1625 struct rcu_node *rnp;
1535 1626
1536 printk(KERN_INFO "Hierarchical RCU implementation.\n"); 1627 rcu_bootup_announce();
1537#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1628#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1538 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1629 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1539#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1630#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1540 rcu_init_one(&rcu_state); 1631 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1541 RCU_DATA_PTR_INIT(&rcu_state, rcu_data); 1632 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1542 rcu_init_one(&rcu_bh_state); 1633 __rcu_init_preempt();
1543 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); 1634 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1544
1545 for_each_online_cpu(i)
1546 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
1547 /* Register notifier for non-boot CPUs */
1548 register_cpu_notifier(&rcu_nb);
1549} 1635}
1550 1636
1551module_param(blimit, int, 0); 1637module_param(blimit, int, 0);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 5e872bbf07f5..bf8a6f9f134d 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -1,10 +1,259 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31/*
32 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
33 * In theory, it should be possible to add more levels straightforwardly.
34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere.
36 */
37#define MAX_RCU_LVLS 3
38#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
41
42#if NR_CPUS <= RCU_FANOUT
43# define NUM_RCU_LVLS 1
44# define NUM_RCU_LVL_0 1
45# define NUM_RCU_LVL_1 (NR_CPUS)
46# define NUM_RCU_LVL_2 0
47# define NUM_RCU_LVL_3 0
48#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
59# define NUM_RCU_LVL_3 NR_CPUS
60#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
62#endif /* #if (NR_CPUS) <= RCU_FANOUT */
63
64#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
65#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
66
67/*
68 * Dynticks per-CPU state.
69 */
70struct rcu_dynticks {
71 int dynticks_nesting; /* Track nesting level, sort of. */
72 int dynticks; /* Even value for dynticks-idle, else odd. */
73 int dynticks_nmi; /* Even value for either dynticks-idle or */
74 /* not in nmi handler, else odd. So this */
75 /* remains even for nmi from irq handler. */
76};
77
78/*
79 * Definition for node within the RCU grace-period-detection hierarchy.
80 */
81struct rcu_node {
82 spinlock_t lock;
83 long gpnum; /* Current grace period for this node. */
84 /* This will either be equal to or one */
85 /* behind the root rcu_node's gpnum. */
86 unsigned long qsmask; /* CPUs or groups that need to switch in */
87 /* order for current grace period to proceed.*/
88 unsigned long qsmaskinit;
89 /* Per-GP initialization for qsmask. */
90 unsigned long grpmask; /* Mask to apply to parent qsmask. */
91 int grplo; /* lowest-numbered CPU or group here. */
92 int grphi; /* highest-numbered CPU or group here. */
93 u8 grpnum; /* CPU/group number for next level up. */
94 u8 level; /* root is at level 0. */
95 struct rcu_node *parent;
96 struct list_head blocked_tasks[2];
97 /* Tasks blocked in RCU read-side critsect. */
98} ____cacheline_internodealigned_in_smp;
99
100/* Index values for nxttail array in struct rcu_data. */
101#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
102#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
103#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
104#define RCU_NEXT_TAIL 3
105#define RCU_NEXT_SIZE 4
106
107/* Per-CPU data for read-copy update. */
108struct rcu_data {
109 /* 1) quiescent-state and grace-period handling : */
110 long completed; /* Track rsp->completed gp number */
111 /* in order to detect GP end. */
112 long gpnum; /* Highest gp number that this CPU */
113 /* is aware of having started. */
114 long passed_quiesc_completed;
115 /* Value of completed at time of qs. */
116 bool passed_quiesc; /* User-mode/idle loop etc. */
117 bool qs_pending; /* Core waits for quiesc state. */
118 bool beenonline; /* CPU online at least once. */
119 bool preemptable; /* Preemptable RCU? */
120 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
121 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
122
123 /* 2) batch handling */
124 /*
125 * If nxtlist is not NULL, it is partitioned as follows.
126 * Any of the partitions might be empty, in which case the
127 * pointer to that partition will be equal to the pointer for
128 * the following partition. When the list is empty, all of
129 * the nxttail elements point to nxtlist, which is NULL.
130 *
131 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
132 * Entries that might have arrived after current GP ended
133 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
134 * Entries known to have arrived before current GP ended
135 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
136 * Entries that batch # <= ->completed - 1: waiting for current GP
137 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
138 * Entries that batch # <= ->completed
139 * The grace period for these entries has completed, and
140 * the other grace-period-completed entries may be moved
141 * here temporarily in rcu_process_callbacks().
142 */
143 struct rcu_head *nxtlist;
144 struct rcu_head **nxttail[RCU_NEXT_SIZE];
145 long qlen; /* # of queued callbacks */
146 long blimit; /* Upper limit on a processed batch */
147
148#ifdef CONFIG_NO_HZ
149 /* 3) dynticks interface. */
150 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
151 int dynticks_snap; /* Per-GP tracking for dynticks. */
152 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
153#endif /* #ifdef CONFIG_NO_HZ */
154
155 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
156#ifdef CONFIG_NO_HZ
157 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
158#endif /* #ifdef CONFIG_NO_HZ */
159 unsigned long offline_fqs; /* Kicked due to being offline. */
160 unsigned long resched_ipi; /* Sent a resched IPI. */
161
162 /* 5) __rcu_pending() statistics. */
163 long n_rcu_pending; /* rcu_pending() calls since boot. */
164 long n_rp_qs_pending;
165 long n_rp_cb_ready;
166 long n_rp_cpu_needs_gp;
167 long n_rp_gp_completed;
168 long n_rp_gp_started;
169 long n_rp_need_fqs;
170 long n_rp_need_nothing;
171
172 int cpu;
173};
174
175/* Values for signaled field in struct rcu_state. */
176#define RCU_GP_INIT 0 /* Grace period being initialized. */
177#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
178#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
179#ifdef CONFIG_NO_HZ
180#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
181#else /* #ifdef CONFIG_NO_HZ */
182#define RCU_SIGNAL_INIT RCU_FORCE_QS
183#endif /* #else #ifdef CONFIG_NO_HZ */
184
185#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
186#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
187#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
188#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
189#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
190 /* to take at least one */
191 /* scheduling clock irq */
192 /* before ratting on them. */
193
194#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
195
196/*
197 * RCU global state, including node hierarchy. This hierarchy is
198 * represented in "heap" form in a dense array. The root (first level)
199 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
200 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
201 * and the third level in ->node[m+1] and following (->node[m+1] referenced
202 * by ->level[2]). The number of levels is determined by the number of
203 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
204 * consisting of a single rcu_node.
205 */
206struct rcu_state {
207 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
208 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
209 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
210 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
211 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
212
213 /* The following fields are guarded by the root rcu_node's lock. */
214
215 u8 signaled ____cacheline_internodealigned_in_smp;
216 /* Force QS state. */
217 long gpnum; /* Current gp number. */
218 long completed; /* # of last completed gp. */
219 spinlock_t onofflock; /* exclude on/offline and */
220 /* starting new GP. */
221 spinlock_t fqslock; /* Only one task forcing */
222 /* quiescent states. */
223 unsigned long jiffies_force_qs; /* Time at which to invoke */
224 /* force_quiescent_state(). */
225 unsigned long n_force_qs; /* Number of calls to */
226 /* force_quiescent_state(). */
227 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
228 /* due to lock unavailable. */
229 unsigned long n_force_qs_ngp; /* Number of calls leaving */
230 /* due to no GP active. */
231#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
232 unsigned long gp_start; /* Time at which GP started, */
233 /* but in jiffies. */
234 unsigned long jiffies_stall; /* Time at which to check */
235 /* for CPU stalls. */
236#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
237#ifdef CONFIG_NO_HZ
238 long dynticks_completed; /* Value of completed @ snap. */
239#endif /* #ifdef CONFIG_NO_HZ */
240};
241
242#ifdef RCU_TREE_NONCORE
1 243
2/* 244/*
3 * RCU implementation internal declarations: 245 * RCU implementation internal declarations:
4 */ 246 */
5extern struct rcu_state rcu_state; 247extern struct rcu_state rcu_sched_state;
6DECLARE_PER_CPU(struct rcu_data, rcu_data); 248DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
7 249
8extern struct rcu_state rcu_bh_state; 250extern struct rcu_state rcu_bh_state;
9DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); 251DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
10 252
253#ifdef CONFIG_TREE_PREEMPT_RCU
254extern struct rcu_state rcu_preempt_state;
255DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
256#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
257
258#endif /* #ifdef RCU_TREE_NONCORE */
259
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
new file mode 100644
index 000000000000..47789369ea59
--- /dev/null
+++ b/kernel/rcutree_plugin.h
@@ -0,0 +1,532 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27
28#ifdef CONFIG_TREE_PREEMPT_RCU
29
30struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32
33/*
34 * Tell them what RCU they are running.
35 */
36static inline void rcu_bootup_announce(void)
37{
38 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n");
40}
41
42/*
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
45 */
46long rcu_batches_completed_preempt(void)
47{
48 return rcu_preempt_state.completed;
49}
50EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
51
52/*
53 * Return the number of RCU batches processed thus far for debug & stats.
54 */
55long rcu_batches_completed(void)
56{
57 return rcu_batches_completed_preempt();
58}
59EXPORT_SYMBOL_GPL(rcu_batches_completed);
60
61/*
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
66 */
67static void rcu_preempt_qs_record(int cpu)
68{
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc = 1;
71 rdp->passed_quiesc_completed = rdp->completed;
72}
73
74/*
75 * We have entered the scheduler or are between softirqs in ksoftirqd.
76 * If we are in an RCU read-side critical section, we need to reflect
77 * that in the state of the rcu_node structure corresponding to this CPU.
78 * Caller must disable hardirqs.
79 */
80static void rcu_preempt_qs(int cpu)
81{
82 struct task_struct *t = current;
83 int phase;
84 struct rcu_data *rdp;
85 struct rcu_node *rnp;
86
87 if (t->rcu_read_lock_nesting &&
88 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
89
90 /* Possibly blocking in an RCU read-side critical section. */
91 rdp = rcu_preempt_state.rda[cpu];
92 rnp = rdp->mynode;
93 spin_lock(&rnp->lock);
94 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
95 t->rcu_blocked_node = rnp;
96
97 /*
98 * If this CPU has already checked in, then this task
99 * will hold up the next grace period rather than the
100 * current grace period. Queue the task accordingly.
101 * If the task is queued for the current grace period
102 * (i.e., this CPU has not yet passed through a quiescent
103 * state for the current grace period), then as long
104 * as that task remains queued, the current grace period
105 * cannot end.
106 */
107 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
108 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
109 smp_mb(); /* Ensure later ctxt swtch seen after above. */
110 spin_unlock(&rnp->lock);
111 }
112
113 /*
114 * Either we were not in an RCU read-side critical section to
115 * begin with, or we have now recorded that critical section
116 * globally. Either way, we can now note a quiescent state
117 * for this CPU. Again, if we were in an RCU read-side critical
118 * section, and if that critical section was blocking the current
119 * grace period, then the fact that the task has been enqueued
120 * means that we continue to block the current grace period.
121 */
122 rcu_preempt_qs_record(cpu);
123 t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS |
124 RCU_READ_UNLOCK_GOT_QS);
125}
126
127/*
128 * Tree-preemptable RCU implementation for rcu_read_lock().
129 * Just increment ->rcu_read_lock_nesting, shared state will be updated
130 * if we block.
131 */
132void __rcu_read_lock(void)
133{
134 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
135 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
136}
137EXPORT_SYMBOL_GPL(__rcu_read_lock);
138
139static void rcu_read_unlock_special(struct task_struct *t)
140{
141 int empty;
142 unsigned long flags;
143 unsigned long mask;
144 struct rcu_node *rnp;
145 int special;
146
147 /* NMI handlers cannot block and cannot safely manipulate state. */
148 if (in_nmi())
149 return;
150
151 local_irq_save(flags);
152
153 /*
154 * If RCU core is waiting for this CPU to exit critical section,
155 * let it know that we have done so.
156 */
157 special = t->rcu_read_unlock_special;
158 if (special & RCU_READ_UNLOCK_NEED_QS) {
159 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS;
161 }
162
163 /* Hardware IRQ handlers cannot block. */
164 if (in_irq()) {
165 local_irq_restore(flags);
166 return;
167 }
168
169 /* Clean up if blocked during RCU read-side critical section. */
170 if (special & RCU_READ_UNLOCK_BLOCKED) {
171 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
172
173 /*
174 * Remove this task from the list it blocked on. The
175 * task can migrate while we acquire the lock, but at
176 * most one time. So at most two passes through loop.
177 */
178 for (;;) {
179 rnp = t->rcu_blocked_node;
180 spin_lock(&rnp->lock);
181 if (rnp == t->rcu_blocked_node)
182 break;
183 spin_unlock(&rnp->lock);
184 }
185 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
186 list_del_init(&t->rcu_node_entry);
187 t->rcu_blocked_node = NULL;
188
189 /*
190 * If this was the last task on the current list, and if
191 * we aren't waiting on any CPUs, report the quiescent state.
192 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
193 * drop rnp->lock and restore irq.
194 */
195 if (!empty && rnp->qsmask == 0 &&
196 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
197 t->rcu_read_unlock_special &=
198 ~(RCU_READ_UNLOCK_NEED_QS |
199 RCU_READ_UNLOCK_GOT_QS);
200 if (rnp->parent == NULL) {
201 /* Only one rcu_node in the tree. */
202 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
203 return;
204 }
205 /* Report up the rest of the hierarchy. */
206 mask = rnp->grpmask;
207 spin_unlock_irqrestore(&rnp->lock, flags);
208 rnp = rnp->parent;
209 spin_lock_irqsave(&rnp->lock, flags);
210 cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
211 return;
212 }
213 spin_unlock(&rnp->lock);
214 }
215 local_irq_restore(flags);
216}
217
218/*
219 * Tree-preemptable RCU implementation for rcu_read_unlock().
220 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
221 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
222 * invoke rcu_read_unlock_special() to clean up after a context switch
223 * in an RCU read-side critical section and other special cases.
224 */
225void __rcu_read_unlock(void)
226{
227 struct task_struct *t = current;
228
229 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
230 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
231 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
232 rcu_read_unlock_special(t);
233}
234EXPORT_SYMBOL_GPL(__rcu_read_unlock);
235
236#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
237
238/*
239 * Scan the current list of tasks blocked within RCU read-side critical
240 * sections, printing out the tid of each.
241 */
242static void rcu_print_task_stall(struct rcu_node *rnp)
243{
244 unsigned long flags;
245 struct list_head *lp;
246 int phase = rnp->gpnum & 0x1;
247 struct task_struct *t;
248
249 if (!list_empty(&rnp->blocked_tasks[phase])) {
250 spin_lock_irqsave(&rnp->lock, flags);
251 phase = rnp->gpnum & 0x1; /* re-read under lock. */
252 lp = &rnp->blocked_tasks[phase];
253 list_for_each_entry(t, lp, rcu_node_entry)
254 printk(" P%d", t->pid);
255 spin_unlock_irqrestore(&rnp->lock, flags);
256 }
257}
258
259#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
260
261/*
262 * Check for preempted RCU readers for the specified rcu_node structure.
263 * If the caller needs a reliable answer, it must hold the rcu_node's
264 * >lock.
265 */
266static int rcu_preempted_readers(struct rcu_node *rnp)
267{
268 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
269}
270
271#ifdef CONFIG_HOTPLUG_CPU
272
273/*
274 * Handle tasklist migration for case in which all CPUs covered by the
275 * specified rcu_node have gone offline. Move them up to the root
276 * rcu_node. The reason for not just moving them to the immediate
277 * parent is to remove the need for rcu_read_unlock_special() to
278 * make more than two attempts to acquire the target rcu_node's lock.
279 *
280 * The caller must hold rnp->lock with irqs disabled.
281 */
282static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
283 struct rcu_node *rnp)
284{
285 int i;
286 struct list_head *lp;
287 struct list_head *lp_root;
288 struct rcu_node *rnp_root = rcu_get_root(rsp);
289 struct task_struct *tp;
290
291 if (rnp == rnp_root) {
292 WARN_ONCE(1, "Last CPU thought to be offlined?");
293 return; /* Shouldn't happen: at least one CPU online. */
294 }
295
296 /*
297 * Move tasks up to root rcu_node. Rely on the fact that the
298 * root rcu_node can be at most one ahead of the rest of the
299 * rcu_nodes in terms of gp_num value. This fact allows us to
300 * move the blocked_tasks[] array directly, element by element.
301 */
302 for (i = 0; i < 2; i++) {
303 lp = &rnp->blocked_tasks[i];
304 lp_root = &rnp_root->blocked_tasks[i];
305 while (!list_empty(lp)) {
306 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
307 spin_lock(&rnp_root->lock); /* irqs already disabled */
308 list_del(&tp->rcu_node_entry);
309 tp->rcu_blocked_node = rnp_root;
310 list_add(&tp->rcu_node_entry, lp_root);
311 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
312 }
313 }
314}
315
316/*
317 * Do CPU-offline processing for preemptable RCU.
318 */
319static void rcu_preempt_offline_cpu(int cpu)
320{
321 __rcu_offline_cpu(cpu, &rcu_preempt_state);
322}
323
324#endif /* #ifdef CONFIG_HOTPLUG_CPU */
325
326/*
327 * Check for a quiescent state from the current CPU. When a task blocks,
328 * the task is recorded in the corresponding CPU's rcu_node structure,
329 * which is checked elsewhere.
330 *
331 * Caller must disable hard irqs.
332 */
333static void rcu_preempt_check_callbacks(int cpu)
334{
335 struct task_struct *t = current;
336
337 if (t->rcu_read_lock_nesting == 0) {
338 t->rcu_read_unlock_special &=
339 ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS);
340 rcu_preempt_qs_record(cpu);
341 return;
342 }
343 if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
344 if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) {
345 rcu_preempt_qs_record(cpu);
346 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS;
347 } else if (!(t->rcu_read_unlock_special &
348 RCU_READ_UNLOCK_NEED_QS)) {
349 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
350 }
351 }
352}
353
354/*
355 * Process callbacks for preemptable RCU.
356 */
357static void rcu_preempt_process_callbacks(void)
358{
359 __rcu_process_callbacks(&rcu_preempt_state,
360 &__get_cpu_var(rcu_preempt_data));
361}
362
363/*
364 * Queue a preemptable-RCU callback for invocation after a grace period.
365 */
366void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
367{
368 __call_rcu(head, func, &rcu_preempt_state);
369}
370EXPORT_SYMBOL_GPL(call_rcu);
371
372/*
373 * Check to see if there is any immediate preemptable-RCU-related work
374 * to be done.
375 */
376static int rcu_preempt_pending(int cpu)
377{
378 return __rcu_pending(&rcu_preempt_state,
379 &per_cpu(rcu_preempt_data, cpu));
380}
381
382/*
383 * Does preemptable RCU need the CPU to stay out of dynticks mode?
384 */
385static int rcu_preempt_needs_cpu(int cpu)
386{
387 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
388}
389
390/*
391 * Initialize preemptable RCU's per-CPU data.
392 */
393static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
394{
395 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
396}
397
398/*
399 * Check for a task exiting while in a preemptable-RCU read-side
400 * critical section, clean up if so. No need to issue warnings,
401 * as debug_check_no_locks_held() already does this if lockdep
402 * is enabled.
403 */
404void exit_rcu(void)
405{
406 struct task_struct *t = current;
407
408 if (t->rcu_read_lock_nesting == 0)
409 return;
410 t->rcu_read_lock_nesting = 1;
411 rcu_read_unlock();
412}
413
414#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
415
416/*
417 * Tell them what RCU they are running.
418 */
419static inline void rcu_bootup_announce(void)
420{
421 printk(KERN_INFO "Hierarchical RCU implementation.\n");
422}
423
424/*
425 * Return the number of RCU batches processed thus far for debug & stats.
426 */
427long rcu_batches_completed(void)
428{
429 return rcu_batches_completed_sched();
430}
431EXPORT_SYMBOL_GPL(rcu_batches_completed);
432
433/*
434 * Because preemptable RCU does not exist, we never have to check for
435 * CPUs being in quiescent states.
436 */
437static void rcu_preempt_qs(int cpu)
438{
439}
440
441#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
442
443/*
444 * Because preemptable RCU does not exist, we never have to check for
445 * tasks blocked within RCU read-side critical sections.
446 */
447static void rcu_print_task_stall(struct rcu_node *rnp)
448{
449}
450
451#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
452
453/*
454 * Because preemptable RCU does not exist, there are never any preempted
455 * RCU readers.
456 */
457static int rcu_preempted_readers(struct rcu_node *rnp)
458{
459 return 0;
460}
461
462#ifdef CONFIG_HOTPLUG_CPU
463
464/*
465 * Because preemptable RCU does not exist, it never needs to migrate
466 * tasks that were blocked within RCU read-side critical sections.
467 */
468static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
469 struct rcu_node *rnp)
470{
471}
472
473/*
474 * Because preemptable RCU does not exist, it never needs CPU-offline
475 * processing.
476 */
477static void rcu_preempt_offline_cpu(int cpu)
478{
479}
480
481#endif /* #ifdef CONFIG_HOTPLUG_CPU */
482
483/*
484 * Because preemptable RCU does not exist, it never has any callbacks
485 * to check.
486 */
487void rcu_preempt_check_callbacks(int cpu)
488{
489}
490
491/*
492 * Because preemptable RCU does not exist, it never has any callbacks
493 * to process.
494 */
495void rcu_preempt_process_callbacks(void)
496{
497}
498
499/*
500 * In classic RCU, call_rcu() is just call_rcu_sched().
501 */
502void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
503{
504 call_rcu_sched(head, func);
505}
506EXPORT_SYMBOL_GPL(call_rcu);
507
508/*
509 * Because preemptable RCU does not exist, it never has any work to do.
510 */
511static int rcu_preempt_pending(int cpu)
512{
513 return 0;
514}
515
516/*
517 * Because preemptable RCU does not exist, it never needs any CPU.
518 */
519static int rcu_preempt_needs_cpu(int cpu)
520{
521 return 0;
522}
523
524/*
525 * Because preemptable RCU does not exist, there is no per-CPU
526 * data to initialize.
527 */
528static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
529{
530}
531
532#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index fe1dcdbf1ca3..0ea1bff69727 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -43,6 +43,7 @@
43#include <linux/debugfs.h> 43#include <linux/debugfs.h>
44#include <linux/seq_file.h> 44#include <linux/seq_file.h>
45 45
46#define RCU_TREE_NONCORE
46#include "rcutree.h" 47#include "rcutree.h"
47 48
48static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 49static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
@@ -76,8 +77,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 77
77static int show_rcudata(struct seq_file *m, void *unused) 78static int show_rcudata(struct seq_file *m, void *unused)
78{ 79{
79 seq_puts(m, "rcu:\n"); 80#ifdef CONFIG_TREE_PREEMPT_RCU
80 PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m); 81 seq_puts(m, "rcu_preempt:\n");
82 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
83#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
84 seq_puts(m, "rcu_sched:\n");
85 PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
81 seq_puts(m, "rcu_bh:\n"); 86 seq_puts(m, "rcu_bh:\n");
82 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m); 87 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
83 return 0; 88 return 0;
@@ -102,7 +107,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
102 return; 107 return;
103 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", 108 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
104 rdp->cpu, 109 rdp->cpu,
105 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", 110 cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
106 rdp->completed, rdp->gpnum, 111 rdp->completed, rdp->gpnum,
107 rdp->passed_quiesc, rdp->passed_quiesc_completed, 112 rdp->passed_quiesc, rdp->passed_quiesc_completed,
108 rdp->qs_pending); 113 rdp->qs_pending);
@@ -124,8 +129,12 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
124 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 129 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
125#endif /* #ifdef CONFIG_NO_HZ */ 130#endif /* #ifdef CONFIG_NO_HZ */
126 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n"); 131 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
127 seq_puts(m, "\"rcu:\"\n"); 132#ifdef CONFIG_TREE_PREEMPT_RCU
128 PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m); 133 seq_puts(m, "\"rcu_preempt:\"\n");
134 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
135#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
136 seq_puts(m, "\"rcu_sched:\"\n");
137 PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
129 seq_puts(m, "\"rcu_bh:\"\n"); 138 seq_puts(m, "\"rcu_bh:\"\n");
130 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m); 139 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
131 return 0; 140 return 0;
@@ -171,8 +180,12 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
171 180
172static int show_rcuhier(struct seq_file *m, void *unused) 181static int show_rcuhier(struct seq_file *m, void *unused)
173{ 182{
174 seq_puts(m, "rcu:\n"); 183#ifdef CONFIG_TREE_PREEMPT_RCU
175 print_one_rcu_state(m, &rcu_state); 184 seq_puts(m, "rcu_preempt:\n");
185 print_one_rcu_state(m, &rcu_preempt_state);
186#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
187 seq_puts(m, "rcu_sched:\n");
188 print_one_rcu_state(m, &rcu_sched_state);
176 seq_puts(m, "rcu_bh:\n"); 189 seq_puts(m, "rcu_bh:\n");
177 print_one_rcu_state(m, &rcu_bh_state); 190 print_one_rcu_state(m, &rcu_bh_state);
178 return 0; 191 return 0;
@@ -193,8 +206,12 @@ static struct file_operations rcuhier_fops = {
193 206
194static int show_rcugp(struct seq_file *m, void *unused) 207static int show_rcugp(struct seq_file *m, void *unused)
195{ 208{
196 seq_printf(m, "rcu: completed=%ld gpnum=%ld\n", 209#ifdef CONFIG_TREE_PREEMPT_RCU
197 rcu_state.completed, rcu_state.gpnum); 210 seq_printf(m, "rcu_preempt: completed=%ld gpnum=%ld\n",
211 rcu_preempt_state.completed, rcu_preempt_state.gpnum);
212#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
213 seq_printf(m, "rcu_sched: completed=%ld gpnum=%ld\n",
214 rcu_sched_state.completed, rcu_sched_state.gpnum);
198 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n", 215 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
199 rcu_bh_state.completed, rcu_bh_state.gpnum); 216 rcu_bh_state.completed, rcu_bh_state.gpnum);
200 return 0; 217 return 0;
@@ -243,8 +260,12 @@ static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
243 260
244static int show_rcu_pending(struct seq_file *m, void *unused) 261static int show_rcu_pending(struct seq_file *m, void *unused)
245{ 262{
246 seq_puts(m, "rcu:\n"); 263#ifdef CONFIG_TREE_PREEMPT_RCU
247 print_rcu_pendings(m, &rcu_state); 264 seq_puts(m, "rcu_preempt:\n");
265 print_rcu_pendings(m, &rcu_preempt_state);
266#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
267 seq_puts(m, "rcu_sched:\n");
268 print_rcu_pendings(m, &rcu_sched_state);
248 seq_puts(m, "rcu_bh:\n"); 269 seq_puts(m, "rcu_bh:\n");
249 print_rcu_pendings(m, &rcu_bh_state); 270 print_rcu_pendings(m, &rcu_bh_state);
250 return 0; 271 return 0;
@@ -264,62 +285,47 @@ static struct file_operations rcu_pending_fops = {
264}; 285};
265 286
266static struct dentry *rcudir; 287static struct dentry *rcudir;
267static struct dentry *datadir;
268static struct dentry *datadir_csv;
269static struct dentry *gpdir;
270static struct dentry *hierdir;
271static struct dentry *rcu_pendingdir;
272 288
273static int __init rcuclassic_trace_init(void) 289static int __init rcuclassic_trace_init(void)
274{ 290{
291 struct dentry *retval;
292
275 rcudir = debugfs_create_dir("rcu", NULL); 293 rcudir = debugfs_create_dir("rcu", NULL);
276 if (!rcudir) 294 if (!rcudir)
277 goto out; 295 goto free_out;
278 296
279 datadir = debugfs_create_file("rcudata", 0444, rcudir, 297 retval = debugfs_create_file("rcudata", 0444, rcudir,
280 NULL, &rcudata_fops); 298 NULL, &rcudata_fops);
281 if (!datadir) 299 if (!retval)
282 goto free_out; 300 goto free_out;
283 301
284 datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir, 302 retval = debugfs_create_file("rcudata.csv", 0444, rcudir,
285 NULL, &rcudata_csv_fops); 303 NULL, &rcudata_csv_fops);
286 if (!datadir_csv) 304 if (!retval)
287 goto free_out; 305 goto free_out;
288 306
289 gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); 307 retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
290 if (!gpdir) 308 if (!retval)
291 goto free_out; 309 goto free_out;
292 310
293 hierdir = debugfs_create_file("rcuhier", 0444, rcudir, 311 retval = debugfs_create_file("rcuhier", 0444, rcudir,
294 NULL, &rcuhier_fops); 312 NULL, &rcuhier_fops);
295 if (!hierdir) 313 if (!retval)
296 goto free_out; 314 goto free_out;
297 315
298 rcu_pendingdir = debugfs_create_file("rcu_pending", 0444, rcudir, 316 retval = debugfs_create_file("rcu_pending", 0444, rcudir,
299 NULL, &rcu_pending_fops); 317 NULL, &rcu_pending_fops);
300 if (!rcu_pendingdir) 318 if (!retval)
301 goto free_out; 319 goto free_out;
302 return 0; 320 return 0;
303free_out: 321free_out:
304 if (datadir) 322 debugfs_remove_recursive(rcudir);
305 debugfs_remove(datadir);
306 if (datadir_csv)
307 debugfs_remove(datadir_csv);
308 if (gpdir)
309 debugfs_remove(gpdir);
310 debugfs_remove(rcudir);
311out:
312 return 1; 323 return 1;
313} 324}
314 325
315static void __exit rcuclassic_trace_cleanup(void) 326static void __exit rcuclassic_trace_cleanup(void)
316{ 327{
317 debugfs_remove(datadir); 328 debugfs_remove_recursive(rcudir);
318 debugfs_remove(datadir_csv);
319 debugfs_remove(gpdir);
320 debugfs_remove(hierdir);
321 debugfs_remove(rcu_pendingdir);
322 debugfs_remove(rcudir);
323} 329}
324 330
325 331
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..e27a53685ed9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -64,7 +64,6 @@
64#include <linux/tsacct_kern.h> 64#include <linux/tsacct_kern.h>
65#include <linux/kprobes.h> 65#include <linux/kprobes.h>
66#include <linux/delayacct.h> 66#include <linux/delayacct.h>
67#include <linux/reciprocal_div.h>
68#include <linux/unistd.h> 67#include <linux/unistd.h>
69#include <linux/pagemap.h> 68#include <linux/pagemap.h>
70#include <linux/hrtimer.h> 69#include <linux/hrtimer.h>
@@ -120,30 +119,8 @@
120 */ 119 */
121#define RUNTIME_INF ((u64)~0ULL) 120#define RUNTIME_INF ((u64)~0ULL)
122 121
123#ifdef CONFIG_SMP
124
125static void double_rq_lock(struct rq *rq1, struct rq *rq2); 122static void double_rq_lock(struct rq *rq1, struct rq *rq2);
126 123
127/*
128 * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
129 * Since cpu_power is a 'constant', we can use a reciprocal divide.
130 */
131static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
132{
133 return reciprocal_divide(load, sg->reciprocal_cpu_power);
134}
135
136/*
137 * Each time a sched group cpu_power is changed,
138 * we must compute its reciprocal value
139 */
140static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
141{
142 sg->__cpu_power += val;
143 sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
144}
145#endif
146
147static inline int rt_policy(int policy) 124static inline int rt_policy(int policy)
148{ 125{
149 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) 126 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -309,8 +286,8 @@ void set_tg_uid(struct user_struct *user)
309 286
310/* 287/*
311 * Root task group. 288 * Root task group.
312 * Every UID task group (including init_task_group aka UID-0) will 289 * Every UID task group (including init_task_group aka UID-0) will
313 * be a child to this group. 290 * be a child to this group.
314 */ 291 */
315struct task_group root_task_group; 292struct task_group root_task_group;
316 293
@@ -318,7 +295,7 @@ struct task_group root_task_group;
318/* Default task group's sched entity on each cpu */ 295/* Default task group's sched entity on each cpu */
319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 296static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
320/* Default task group's cfs_rq on each cpu */ 297/* Default task group's cfs_rq on each cpu */
321static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 298static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp;
322#endif /* CONFIG_FAIR_GROUP_SCHED */ 299#endif /* CONFIG_FAIR_GROUP_SCHED */
323 300
324#ifdef CONFIG_RT_GROUP_SCHED 301#ifdef CONFIG_RT_GROUP_SCHED
@@ -616,6 +593,7 @@ struct rq {
616 593
617 unsigned char idle_at_tick; 594 unsigned char idle_at_tick;
618 /* For active balancing */ 595 /* For active balancing */
596 int post_schedule;
619 int active_balance; 597 int active_balance;
620 int push_cpu; 598 int push_cpu;
621 /* cpu of this runqueue: */ 599 /* cpu of this runqueue: */
@@ -626,6 +604,9 @@ struct rq {
626 604
627 struct task_struct *migration_thread; 605 struct task_struct *migration_thread;
628 struct list_head migration_queue; 606 struct list_head migration_queue;
607
608 u64 rt_avg;
609 u64 age_stamp;
629#endif 610#endif
630 611
631 /* calc_load related fields */ 612 /* calc_load related fields */
@@ -693,6 +674,7 @@ static inline int cpu_of(struct rq *rq)
693#define this_rq() (&__get_cpu_var(runqueues)) 674#define this_rq() (&__get_cpu_var(runqueues))
694#define task_rq(p) cpu_rq(task_cpu(p)) 675#define task_rq(p) cpu_rq(task_cpu(p))
695#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 676#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
677#define raw_rq() (&__raw_get_cpu_var(runqueues))
696 678
697inline void update_rq_clock(struct rq *rq) 679inline void update_rq_clock(struct rq *rq)
698{ 680{
@@ -861,6 +843,14 @@ unsigned int sysctl_sched_shares_ratelimit = 250000;
861unsigned int sysctl_sched_shares_thresh = 4; 843unsigned int sysctl_sched_shares_thresh = 4;
862 844
863/* 845/*
846 * period over which we average the RT time consumption, measured
847 * in ms.
848 *
849 * default: 1s
850 */
851const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
852
853/*
864 * period over which we measure -rt task cpu usage in us. 854 * period over which we measure -rt task cpu usage in us.
865 * default: 1s 855 * default: 1s
866 */ 856 */
@@ -1278,12 +1268,37 @@ void wake_up_idle_cpu(int cpu)
1278} 1268}
1279#endif /* CONFIG_NO_HZ */ 1269#endif /* CONFIG_NO_HZ */
1280 1270
1271static u64 sched_avg_period(void)
1272{
1273 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1274}
1275
1276static void sched_avg_update(struct rq *rq)
1277{
1278 s64 period = sched_avg_period();
1279
1280 while ((s64)(rq->clock - rq->age_stamp) > period) {
1281 rq->age_stamp += period;
1282 rq->rt_avg /= 2;
1283 }
1284}
1285
1286static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1287{
1288 rq->rt_avg += rt_delta;
1289 sched_avg_update(rq);
1290}
1291
1281#else /* !CONFIG_SMP */ 1292#else /* !CONFIG_SMP */
1282static void resched_task(struct task_struct *p) 1293static void resched_task(struct task_struct *p)
1283{ 1294{
1284 assert_spin_locked(&task_rq(p)->lock); 1295 assert_spin_locked(&task_rq(p)->lock);
1285 set_tsk_need_resched(p); 1296 set_tsk_need_resched(p);
1286} 1297}
1298
1299static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1300{
1301}
1287#endif /* CONFIG_SMP */ 1302#endif /* CONFIG_SMP */
1288 1303
1289#if BITS_PER_LONG == 32 1304#if BITS_PER_LONG == 32
@@ -1513,28 +1528,35 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1513 1528
1514#ifdef CONFIG_FAIR_GROUP_SCHED 1529#ifdef CONFIG_FAIR_GROUP_SCHED
1515 1530
1531struct update_shares_data {
1532 unsigned long rq_weight[NR_CPUS];
1533};
1534
1535static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1536
1516static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1537static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1517 1538
1518/* 1539/*
1519 * Calculate and set the cpu's group shares. 1540 * Calculate and set the cpu's group shares.
1520 */ 1541 */
1521static void 1542static void update_group_shares_cpu(struct task_group *tg, int cpu,
1522update_group_shares_cpu(struct task_group *tg, int cpu, 1543 unsigned long sd_shares,
1523 unsigned long sd_shares, unsigned long sd_rq_weight) 1544 unsigned long sd_rq_weight,
1545 struct update_shares_data *usd)
1524{ 1546{
1525 unsigned long shares; 1547 unsigned long shares, rq_weight;
1526 unsigned long rq_weight; 1548 int boost = 0;
1527
1528 if (!tg->se[cpu])
1529 return;
1530 1549
1531 rq_weight = tg->cfs_rq[cpu]->rq_weight; 1550 rq_weight = usd->rq_weight[cpu];
1551 if (!rq_weight) {
1552 boost = 1;
1553 rq_weight = NICE_0_LOAD;
1554 }
1532 1555
1533 /* 1556 /*
1534 * \Sum shares * rq_weight 1557 * \Sum_j shares_j * rq_weight_i
1535 * shares = ----------------------- 1558 * shares_i = -----------------------------
1536 * \Sum rq_weight 1559 * \Sum_j rq_weight_j
1537 *
1538 */ 1560 */
1539 shares = (sd_shares * rq_weight) / sd_rq_weight; 1561 shares = (sd_shares * rq_weight) / sd_rq_weight;
1540 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); 1562 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
@@ -1545,8 +1567,8 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1545 unsigned long flags; 1567 unsigned long flags;
1546 1568
1547 spin_lock_irqsave(&rq->lock, flags); 1569 spin_lock_irqsave(&rq->lock, flags);
1548 tg->cfs_rq[cpu]->shares = shares; 1570 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1549 1571 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1550 __set_se_shares(tg->se[cpu], shares); 1572 __set_se_shares(tg->se[cpu], shares);
1551 spin_unlock_irqrestore(&rq->lock, flags); 1573 spin_unlock_irqrestore(&rq->lock, flags);
1552 } 1574 }
@@ -1559,22 +1581,30 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
1559 */ 1581 */
1560static int tg_shares_up(struct task_group *tg, void *data) 1582static int tg_shares_up(struct task_group *tg, void *data)
1561{ 1583{
1562 unsigned long weight, rq_weight = 0; 1584 unsigned long weight, rq_weight = 0, shares = 0;
1563 unsigned long shares = 0; 1585 struct update_shares_data *usd;
1564 struct sched_domain *sd = data; 1586 struct sched_domain *sd = data;
1587 unsigned long flags;
1565 int i; 1588 int i;
1566 1589
1590 if (!tg->se[0])
1591 return 0;
1592
1593 local_irq_save(flags);
1594 usd = &__get_cpu_var(update_shares_data);
1595
1567 for_each_cpu(i, sched_domain_span(sd)) { 1596 for_each_cpu(i, sched_domain_span(sd)) {
1597 weight = tg->cfs_rq[i]->load.weight;
1598 usd->rq_weight[i] = weight;
1599
1568 /* 1600 /*
1569 * If there are currently no tasks on the cpu pretend there 1601 * If there are currently no tasks on the cpu pretend there
1570 * is one of average load so that when a new task gets to 1602 * is one of average load so that when a new task gets to
1571 * run here it will not get delayed by group starvation. 1603 * run here it will not get delayed by group starvation.
1572 */ 1604 */
1573 weight = tg->cfs_rq[i]->load.weight;
1574 if (!weight) 1605 if (!weight)
1575 weight = NICE_0_LOAD; 1606 weight = NICE_0_LOAD;
1576 1607
1577 tg->cfs_rq[i]->rq_weight = weight;
1578 rq_weight += weight; 1608 rq_weight += weight;
1579 shares += tg->cfs_rq[i]->shares; 1609 shares += tg->cfs_rq[i]->shares;
1580 } 1610 }
@@ -1586,7 +1616,9 @@ static int tg_shares_up(struct task_group *tg, void *data)
1586 shares = tg->shares; 1616 shares = tg->shares;
1587 1617
1588 for_each_cpu(i, sched_domain_span(sd)) 1618 for_each_cpu(i, sched_domain_span(sd))
1589 update_group_shares_cpu(tg, i, shares, rq_weight); 1619 update_group_shares_cpu(tg, i, shares, rq_weight, usd);
1620
1621 local_irq_restore(flags);
1590 1622
1591 return 0; 1623 return 0;
1592} 1624}
@@ -1616,8 +1648,14 @@ static int tg_load_down(struct task_group *tg, void *data)
1616 1648
1617static void update_shares(struct sched_domain *sd) 1649static void update_shares(struct sched_domain *sd)
1618{ 1650{
1619 u64 now = cpu_clock(raw_smp_processor_id()); 1651 s64 elapsed;
1620 s64 elapsed = now - sd->last_update; 1652 u64 now;
1653
1654 if (root_task_group_empty())
1655 return;
1656
1657 now = cpu_clock(raw_smp_processor_id());
1658 elapsed = now - sd->last_update;
1621 1659
1622 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { 1660 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1623 sd->last_update = now; 1661 sd->last_update = now;
@@ -1627,6 +1665,9 @@ static void update_shares(struct sched_domain *sd)
1627 1665
1628static void update_shares_locked(struct rq *rq, struct sched_domain *sd) 1666static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1629{ 1667{
1668 if (root_task_group_empty())
1669 return;
1670
1630 spin_unlock(&rq->lock); 1671 spin_unlock(&rq->lock);
1631 update_shares(sd); 1672 update_shares(sd);
1632 spin_lock(&rq->lock); 1673 spin_lock(&rq->lock);
@@ -1634,6 +1675,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1634 1675
1635static void update_h_load(long cpu) 1676static void update_h_load(long cpu)
1636{ 1677{
1678 if (root_task_group_empty())
1679 return;
1680
1637 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); 1681 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1638} 1682}
1639 1683
@@ -2268,8 +2312,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2268 } 2312 }
2269 2313
2270 /* Adjust by relative CPU power of the group */ 2314 /* Adjust by relative CPU power of the group */
2271 avg_load = sg_div_cpu_power(group, 2315 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
2272 avg_load * SCHED_LOAD_SCALE);
2273 2316
2274 if (local_group) { 2317 if (local_group) {
2275 this_load = avg_load; 2318 this_load = avg_load;
@@ -2637,9 +2680,32 @@ void sched_fork(struct task_struct *p, int clone_flags)
2637 set_task_cpu(p, cpu); 2680 set_task_cpu(p, cpu);
2638 2681
2639 /* 2682 /*
2640 * Make sure we do not leak PI boosting priority to the child: 2683 * Make sure we do not leak PI boosting priority to the child.
2641 */ 2684 */
2642 p->prio = current->normal_prio; 2685 p->prio = current->normal_prio;
2686
2687 /*
2688 * Revert to default priority/policy on fork if requested.
2689 */
2690 if (unlikely(p->sched_reset_on_fork)) {
2691 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR)
2692 p->policy = SCHED_NORMAL;
2693
2694 if (p->normal_prio < DEFAULT_PRIO)
2695 p->prio = DEFAULT_PRIO;
2696
2697 if (PRIO_TO_NICE(p->static_prio) < 0) {
2698 p->static_prio = NICE_TO_PRIO(0);
2699 set_load_weight(p);
2700 }
2701
2702 /*
2703 * We don't need the reset flag anymore after the fork. It has
2704 * fulfilled its duty:
2705 */
2706 p->sched_reset_on_fork = 0;
2707 }
2708
2643 if (!rt_prio(p->prio)) 2709 if (!rt_prio(p->prio))
2644 p->sched_class = &fair_sched_class; 2710 p->sched_class = &fair_sched_class;
2645 2711
@@ -2796,12 +2862,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2796{ 2862{
2797 struct mm_struct *mm = rq->prev_mm; 2863 struct mm_struct *mm = rq->prev_mm;
2798 long prev_state; 2864 long prev_state;
2799#ifdef CONFIG_SMP
2800 int post_schedule = 0;
2801
2802 if (current->sched_class->needs_post_schedule)
2803 post_schedule = current->sched_class->needs_post_schedule(rq);
2804#endif
2805 2865
2806 rq->prev_mm = NULL; 2866 rq->prev_mm = NULL;
2807 2867
@@ -2820,10 +2880,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2820 finish_arch_switch(prev); 2880 finish_arch_switch(prev);
2821 perf_counter_task_sched_in(current, cpu_of(rq)); 2881 perf_counter_task_sched_in(current, cpu_of(rq));
2822 finish_lock_switch(rq, prev); 2882 finish_lock_switch(rq, prev);
2823#ifdef CONFIG_SMP
2824 if (post_schedule)
2825 current->sched_class->post_schedule(rq);
2826#endif
2827 2883
2828 fire_sched_in_preempt_notifiers(current); 2884 fire_sched_in_preempt_notifiers(current);
2829 if (mm) 2885 if (mm)
@@ -2838,6 +2894,42 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2838 } 2894 }
2839} 2895}
2840 2896
2897#ifdef CONFIG_SMP
2898
2899/* assumes rq->lock is held */
2900static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2901{
2902 if (prev->sched_class->pre_schedule)
2903 prev->sched_class->pre_schedule(rq, prev);
2904}
2905
2906/* rq->lock is NOT held, but preemption is disabled */
2907static inline void post_schedule(struct rq *rq)
2908{
2909 if (rq->post_schedule) {
2910 unsigned long flags;
2911
2912 spin_lock_irqsave(&rq->lock, flags);
2913 if (rq->curr->sched_class->post_schedule)
2914 rq->curr->sched_class->post_schedule(rq);
2915 spin_unlock_irqrestore(&rq->lock, flags);
2916
2917 rq->post_schedule = 0;
2918 }
2919}
2920
2921#else
2922
2923static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2924{
2925}
2926
2927static inline void post_schedule(struct rq *rq)
2928{
2929}
2930
2931#endif
2932
2841/** 2933/**
2842 * schedule_tail - first thing a freshly forked thread must call. 2934 * schedule_tail - first thing a freshly forked thread must call.
2843 * @prev: the thread we just switched away from. 2935 * @prev: the thread we just switched away from.
@@ -2848,6 +2940,13 @@ asmlinkage void schedule_tail(struct task_struct *prev)
2848 struct rq *rq = this_rq(); 2940 struct rq *rq = this_rq();
2849 2941
2850 finish_task_switch(rq, prev); 2942 finish_task_switch(rq, prev);
2943
2944 /*
2945 * FIXME: do we need to worry about rq being invalidated by the
2946 * task_switch?
2947 */
2948 post_schedule(rq);
2949
2851#ifdef __ARCH_WANT_UNLOCKED_CTXSW 2950#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2852 /* In this case, finish_task_switch does not reenable preemption */ 2951 /* In this case, finish_task_switch does not reenable preemption */
2853 preempt_enable(); 2952 preempt_enable();
@@ -3379,9 +3478,10 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3379{ 3478{
3380 const struct sched_class *class; 3479 const struct sched_class *class;
3381 3480
3382 for (class = sched_class_highest; class; class = class->next) 3481 for_each_class(class) {
3383 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle)) 3482 if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
3384 return 1; 3483 return 1;
3484 }
3385 3485
3386 return 0; 3486 return 0;
3387} 3487}
@@ -3544,7 +3644,7 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
3544 * capacity but still has some space to pick up some load 3644 * capacity but still has some space to pick up some load
3545 * from other group and save more power 3645 * from other group and save more power
3546 */ 3646 */
3547 if (sgs->sum_nr_running > sgs->group_capacity - 1) 3647 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
3548 return; 3648 return;
3549 3649
3550 if (sgs->sum_nr_running > sds->leader_nr_running || 3650 if (sgs->sum_nr_running > sds->leader_nr_running ||
@@ -3611,6 +3711,77 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3611} 3711}
3612#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ 3712#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3613 3713
3714unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3715{
3716 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3717 unsigned long smt_gain = sd->smt_gain;
3718
3719 smt_gain /= weight;
3720
3721 return smt_gain;
3722}
3723
3724unsigned long scale_rt_power(int cpu)
3725{
3726 struct rq *rq = cpu_rq(cpu);
3727 u64 total, available;
3728
3729 sched_avg_update(rq);
3730
3731 total = sched_avg_period() + (rq->clock - rq->age_stamp);
3732 available = total - rq->rt_avg;
3733
3734 if (unlikely((s64)total < SCHED_LOAD_SCALE))
3735 total = SCHED_LOAD_SCALE;
3736
3737 total >>= SCHED_LOAD_SHIFT;
3738
3739 return div_u64(available, total);
3740}
3741
3742static void update_cpu_power(struct sched_domain *sd, int cpu)
3743{
3744 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3745 unsigned long power = SCHED_LOAD_SCALE;
3746 struct sched_group *sdg = sd->groups;
3747
3748 /* here we could scale based on cpufreq */
3749
3750 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3751 power *= arch_scale_smt_power(sd, cpu);
3752 power >>= SCHED_LOAD_SHIFT;
3753 }
3754
3755 power *= scale_rt_power(cpu);
3756 power >>= SCHED_LOAD_SHIFT;
3757
3758 if (!power)
3759 power = 1;
3760
3761 sdg->cpu_power = power;
3762}
3763
3764static void update_group_power(struct sched_domain *sd, int cpu)
3765{
3766 struct sched_domain *child = sd->child;
3767 struct sched_group *group, *sdg = sd->groups;
3768 unsigned long power;
3769
3770 if (!child) {
3771 update_cpu_power(sd, cpu);
3772 return;
3773 }
3774
3775 power = 0;
3776
3777 group = child->groups;
3778 do {
3779 power += group->cpu_power;
3780 group = group->next;
3781 } while (group != child->groups);
3782
3783 sdg->cpu_power = power;
3784}
3614 3785
3615/** 3786/**
3616 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3787 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
@@ -3624,7 +3795,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3624 * @balance: Should we balance. 3795 * @balance: Should we balance.
3625 * @sgs: variable to hold the statistics for this group. 3796 * @sgs: variable to hold the statistics for this group.
3626 */ 3797 */
3627static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu, 3798static inline void update_sg_lb_stats(struct sched_domain *sd,
3799 struct sched_group *group, int this_cpu,
3628 enum cpu_idle_type idle, int load_idx, int *sd_idle, 3800 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3629 int local_group, const struct cpumask *cpus, 3801 int local_group, const struct cpumask *cpus,
3630 int *balance, struct sg_lb_stats *sgs) 3802 int *balance, struct sg_lb_stats *sgs)
@@ -3635,8 +3807,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3635 unsigned long sum_avg_load_per_task; 3807 unsigned long sum_avg_load_per_task;
3636 unsigned long avg_load_per_task; 3808 unsigned long avg_load_per_task;
3637 3809
3638 if (local_group) 3810 if (local_group) {
3639 balance_cpu = group_first_cpu(group); 3811 balance_cpu = group_first_cpu(group);
3812 if (balance_cpu == this_cpu)
3813 update_group_power(sd, this_cpu);
3814 }
3640 3815
3641 /* Tally up the load of all CPUs in the group */ 3816 /* Tally up the load of all CPUs in the group */
3642 sum_avg_load_per_task = avg_load_per_task = 0; 3817 sum_avg_load_per_task = avg_load_per_task = 0;
@@ -3685,8 +3860,7 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3685 } 3860 }
3686 3861
3687 /* Adjust by relative CPU power of the group */ 3862 /* Adjust by relative CPU power of the group */
3688 sgs->avg_load = sg_div_cpu_power(group, 3863 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
3689 sgs->group_load * SCHED_LOAD_SCALE);
3690 3864
3691 3865
3692 /* 3866 /*
@@ -3698,14 +3872,14 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3698 * normalized nr_running number somewhere that negates 3872 * normalized nr_running number somewhere that negates
3699 * the hierarchy? 3873 * the hierarchy?
3700 */ 3874 */
3701 avg_load_per_task = sg_div_cpu_power(group, 3875 avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
3702 sum_avg_load_per_task * SCHED_LOAD_SCALE); 3876 group->cpu_power;
3703 3877
3704 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3878 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3705 sgs->group_imb = 1; 3879 sgs->group_imb = 1;
3706 3880
3707 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3881 sgs->group_capacity =
3708 3882 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
3709} 3883}
3710 3884
3711/** 3885/**
@@ -3723,9 +3897,13 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3723 const struct cpumask *cpus, int *balance, 3897 const struct cpumask *cpus, int *balance,
3724 struct sd_lb_stats *sds) 3898 struct sd_lb_stats *sds)
3725{ 3899{
3900 struct sched_domain *child = sd->child;
3726 struct sched_group *group = sd->groups; 3901 struct sched_group *group = sd->groups;
3727 struct sg_lb_stats sgs; 3902 struct sg_lb_stats sgs;
3728 int load_idx; 3903 int load_idx, prefer_sibling = 0;
3904
3905 if (child && child->flags & SD_PREFER_SIBLING)
3906 prefer_sibling = 1;
3729 3907
3730 init_sd_power_savings_stats(sd, sds, idle); 3908 init_sd_power_savings_stats(sd, sds, idle);
3731 load_idx = get_sd_load_idx(sd, idle); 3909 load_idx = get_sd_load_idx(sd, idle);
@@ -3736,14 +3914,22 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3736 local_group = cpumask_test_cpu(this_cpu, 3914 local_group = cpumask_test_cpu(this_cpu,
3737 sched_group_cpus(group)); 3915 sched_group_cpus(group));
3738 memset(&sgs, 0, sizeof(sgs)); 3916 memset(&sgs, 0, sizeof(sgs));
3739 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle, 3917 update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
3740 local_group, cpus, balance, &sgs); 3918 local_group, cpus, balance, &sgs);
3741 3919
3742 if (local_group && balance && !(*balance)) 3920 if (local_group && balance && !(*balance))
3743 return; 3921 return;
3744 3922
3745 sds->total_load += sgs.group_load; 3923 sds->total_load += sgs.group_load;
3746 sds->total_pwr += group->__cpu_power; 3924 sds->total_pwr += group->cpu_power;
3925
3926 /*
3927 * In case the child domain prefers tasks go to siblings
3928 * first, lower the group capacity to one so that we'll try
3929 * and move all the excess tasks away.
3930 */
3931 if (prefer_sibling)
3932 sgs.group_capacity = min(sgs.group_capacity, 1UL);
3747 3933
3748 if (local_group) { 3934 if (local_group) {
3749 sds->this_load = sgs.avg_load; 3935 sds->this_load = sgs.avg_load;
@@ -3763,7 +3949,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3763 update_sd_power_savings_stats(group, sds, local_group, &sgs); 3949 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3764 group = group->next; 3950 group = group->next;
3765 } while (group != sd->groups); 3951 } while (group != sd->groups);
3766
3767} 3952}
3768 3953
3769/** 3954/**
@@ -3801,28 +3986,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3801 * moving them. 3986 * moving them.
3802 */ 3987 */
3803 3988
3804 pwr_now += sds->busiest->__cpu_power * 3989 pwr_now += sds->busiest->cpu_power *
3805 min(sds->busiest_load_per_task, sds->max_load); 3990 min(sds->busiest_load_per_task, sds->max_load);
3806 pwr_now += sds->this->__cpu_power * 3991 pwr_now += sds->this->cpu_power *
3807 min(sds->this_load_per_task, sds->this_load); 3992 min(sds->this_load_per_task, sds->this_load);
3808 pwr_now /= SCHED_LOAD_SCALE; 3993 pwr_now /= SCHED_LOAD_SCALE;
3809 3994
3810 /* Amount of load we'd subtract */ 3995 /* Amount of load we'd subtract */
3811 tmp = sg_div_cpu_power(sds->busiest, 3996 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3812 sds->busiest_load_per_task * SCHED_LOAD_SCALE); 3997 sds->busiest->cpu_power;
3813 if (sds->max_load > tmp) 3998 if (sds->max_load > tmp)
3814 pwr_move += sds->busiest->__cpu_power * 3999 pwr_move += sds->busiest->cpu_power *
3815 min(sds->busiest_load_per_task, sds->max_load - tmp); 4000 min(sds->busiest_load_per_task, sds->max_load - tmp);
3816 4001
3817 /* Amount of load we'd add */ 4002 /* Amount of load we'd add */
3818 if (sds->max_load * sds->busiest->__cpu_power < 4003 if (sds->max_load * sds->busiest->cpu_power <
3819 sds->busiest_load_per_task * SCHED_LOAD_SCALE) 4004 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3820 tmp = sg_div_cpu_power(sds->this, 4005 tmp = (sds->max_load * sds->busiest->cpu_power) /
3821 sds->max_load * sds->busiest->__cpu_power); 4006 sds->this->cpu_power;
3822 else 4007 else
3823 tmp = sg_div_cpu_power(sds->this, 4008 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
3824 sds->busiest_load_per_task * SCHED_LOAD_SCALE); 4009 sds->this->cpu_power;
3825 pwr_move += sds->this->__cpu_power * 4010 pwr_move += sds->this->cpu_power *
3826 min(sds->this_load_per_task, sds->this_load + tmp); 4011 min(sds->this_load_per_task, sds->this_load + tmp);
3827 pwr_move /= SCHED_LOAD_SCALE; 4012 pwr_move /= SCHED_LOAD_SCALE;
3828 4013
@@ -3857,8 +4042,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3857 sds->max_load - sds->busiest_load_per_task); 4042 sds->max_load - sds->busiest_load_per_task);
3858 4043
3859 /* How much load to actually move to equalise the imbalance */ 4044 /* How much load to actually move to equalise the imbalance */
3860 *imbalance = min(max_pull * sds->busiest->__cpu_power, 4045 *imbalance = min(max_pull * sds->busiest->cpu_power,
3861 (sds->avg_load - sds->this_load) * sds->this->__cpu_power) 4046 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
3862 / SCHED_LOAD_SCALE; 4047 / SCHED_LOAD_SCALE;
3863 4048
3864 /* 4049 /*
@@ -3976,6 +4161,26 @@ ret:
3976 return NULL; 4161 return NULL;
3977} 4162}
3978 4163
4164static struct sched_group *group_of(int cpu)
4165{
4166 struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
4167
4168 if (!sd)
4169 return NULL;
4170
4171 return sd->groups;
4172}
4173
4174static unsigned long power_of(int cpu)
4175{
4176 struct sched_group *group = group_of(cpu);
4177
4178 if (!group)
4179 return SCHED_LOAD_SCALE;
4180
4181 return group->cpu_power;
4182}
4183
3979/* 4184/*
3980 * find_busiest_queue - find the busiest runqueue among the cpus in group. 4185 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3981 */ 4186 */
@@ -3988,15 +4193,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3988 int i; 4193 int i;
3989 4194
3990 for_each_cpu(i, sched_group_cpus(group)) { 4195 for_each_cpu(i, sched_group_cpus(group)) {
4196 unsigned long power = power_of(i);
4197 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
3991 unsigned long wl; 4198 unsigned long wl;
3992 4199
3993 if (!cpumask_test_cpu(i, cpus)) 4200 if (!cpumask_test_cpu(i, cpus))
3994 continue; 4201 continue;
3995 4202
3996 rq = cpu_rq(i); 4203 rq = cpu_rq(i);
3997 wl = weighted_cpuload(i); 4204 wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
4205 wl /= power;
3998 4206
3999 if (rq->nr_running == 1 && wl > imbalance) 4207 if (capacity && rq->nr_running == 1 && wl > imbalance)
4000 continue; 4208 continue;
4001 4209
4002 if (wl > max_load) { 4210 if (wl > max_load) {
@@ -5325,7 +5533,7 @@ need_resched:
5325 preempt_disable(); 5533 preempt_disable();
5326 cpu = smp_processor_id(); 5534 cpu = smp_processor_id();
5327 rq = cpu_rq(cpu); 5535 rq = cpu_rq(cpu);
5328 rcu_qsctr_inc(cpu); 5536 rcu_sched_qs(cpu);
5329 prev = rq->curr; 5537 prev = rq->curr;
5330 switch_count = &prev->nivcsw; 5538 switch_count = &prev->nivcsw;
5331 5539
@@ -5349,10 +5557,7 @@ need_resched_nonpreemptible:
5349 switch_count = &prev->nvcsw; 5557 switch_count = &prev->nvcsw;
5350 } 5558 }
5351 5559
5352#ifdef CONFIG_SMP 5560 pre_schedule(rq, prev);
5353 if (prev->sched_class->pre_schedule)
5354 prev->sched_class->pre_schedule(rq, prev);
5355#endif
5356 5561
5357 if (unlikely(!rq->nr_running)) 5562 if (unlikely(!rq->nr_running))
5358 idle_balance(cpu, rq); 5563 idle_balance(cpu, rq);
@@ -5378,6 +5583,8 @@ need_resched_nonpreemptible:
5378 } else 5583 } else
5379 spin_unlock_irq(&rq->lock); 5584 spin_unlock_irq(&rq->lock);
5380 5585
5586 post_schedule(rq);
5587
5381 if (unlikely(reacquire_kernel_lock(current) < 0)) 5588 if (unlikely(reacquire_kernel_lock(current) < 0))
5382 goto need_resched_nonpreemptible; 5589 goto need_resched_nonpreemptible;
5383 5590
@@ -6123,17 +6330,25 @@ static int __sched_setscheduler(struct task_struct *p, int policy,
6123 unsigned long flags; 6330 unsigned long flags;
6124 const struct sched_class *prev_class = p->sched_class; 6331 const struct sched_class *prev_class = p->sched_class;
6125 struct rq *rq; 6332 struct rq *rq;
6333 int reset_on_fork;
6126 6334
6127 /* may grab non-irq protected spin_locks */ 6335 /* may grab non-irq protected spin_locks */
6128 BUG_ON(in_interrupt()); 6336 BUG_ON(in_interrupt());
6129recheck: 6337recheck:
6130 /* double check policy once rq lock held */ 6338 /* double check policy once rq lock held */
6131 if (policy < 0) 6339 if (policy < 0) {
6340 reset_on_fork = p->sched_reset_on_fork;
6132 policy = oldpolicy = p->policy; 6341 policy = oldpolicy = p->policy;
6133 else if (policy != SCHED_FIFO && policy != SCHED_RR && 6342 } else {
6134 policy != SCHED_NORMAL && policy != SCHED_BATCH && 6343 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
6135 policy != SCHED_IDLE) 6344 policy &= ~SCHED_RESET_ON_FORK;
6136 return -EINVAL; 6345
6346 if (policy != SCHED_FIFO && policy != SCHED_RR &&
6347 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
6348 policy != SCHED_IDLE)
6349 return -EINVAL;
6350 }
6351
6137 /* 6352 /*
6138 * Valid priorities for SCHED_FIFO and SCHED_RR are 6353 * Valid priorities for SCHED_FIFO and SCHED_RR are
6139 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, 6354 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
@@ -6177,6 +6392,10 @@ recheck:
6177 /* can't change other user's priorities */ 6392 /* can't change other user's priorities */
6178 if (!check_same_owner(p)) 6393 if (!check_same_owner(p))
6179 return -EPERM; 6394 return -EPERM;
6395
6396 /* Normal users shall not reset the sched_reset_on_fork flag */
6397 if (p->sched_reset_on_fork && !reset_on_fork)
6398 return -EPERM;
6180 } 6399 }
6181 6400
6182 if (user) { 6401 if (user) {
@@ -6220,6 +6439,8 @@ recheck:
6220 if (running) 6439 if (running)
6221 p->sched_class->put_prev_task(rq, p); 6440 p->sched_class->put_prev_task(rq, p);
6222 6441
6442 p->sched_reset_on_fork = reset_on_fork;
6443
6223 oldprio = p->prio; 6444 oldprio = p->prio;
6224 __setscheduler(rq, p, policy, param->sched_priority); 6445 __setscheduler(rq, p, policy, param->sched_priority);
6225 6446
@@ -6336,14 +6557,15 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
6336 if (p) { 6557 if (p) {
6337 retval = security_task_getscheduler(p); 6558 retval = security_task_getscheduler(p);
6338 if (!retval) 6559 if (!retval)
6339 retval = p->policy; 6560 retval = p->policy
6561 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
6340 } 6562 }
6341 read_unlock(&tasklist_lock); 6563 read_unlock(&tasklist_lock);
6342 return retval; 6564 return retval;
6343} 6565}
6344 6566
6345/** 6567/**
6346 * sys_sched_getscheduler - get the RT priority of a thread 6568 * sys_sched_getparam - get the RT priority of a thread
6347 * @pid: the pid in question. 6569 * @pid: the pid in question.
6348 * @param: structure containing the RT priority. 6570 * @param: structure containing the RT priority.
6349 */ 6571 */
@@ -6571,19 +6793,9 @@ static inline int should_resched(void)
6571 6793
6572static void __cond_resched(void) 6794static void __cond_resched(void)
6573{ 6795{
6574#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 6796 add_preempt_count(PREEMPT_ACTIVE);
6575 __might_sleep(__FILE__, __LINE__); 6797 schedule();
6576#endif 6798 sub_preempt_count(PREEMPT_ACTIVE);
6577 /*
6578 * The BKS might be reacquired before we have dropped
6579 * PREEMPT_ACTIVE, which could trigger a second
6580 * cond_resched() call.
6581 */
6582 do {
6583 add_preempt_count(PREEMPT_ACTIVE);
6584 schedule();
6585 sub_preempt_count(PREEMPT_ACTIVE);
6586 } while (need_resched());
6587} 6799}
6588 6800
6589int __sched _cond_resched(void) 6801int __sched _cond_resched(void)
@@ -6597,18 +6809,20 @@ int __sched _cond_resched(void)
6597EXPORT_SYMBOL(_cond_resched); 6809EXPORT_SYMBOL(_cond_resched);
6598 6810
6599/* 6811/*
6600 * cond_resched_lock() - if a reschedule is pending, drop the given lock, 6812 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
6601 * call schedule, and on return reacquire the lock. 6813 * call schedule, and on return reacquire the lock.
6602 * 6814 *
6603 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level 6815 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
6604 * operations here to prevent schedule() from being called twice (once via 6816 * operations here to prevent schedule() from being called twice (once via
6605 * spin_unlock(), once by hand). 6817 * spin_unlock(), once by hand).
6606 */ 6818 */
6607int cond_resched_lock(spinlock_t *lock) 6819int __cond_resched_lock(spinlock_t *lock)
6608{ 6820{
6609 int resched = should_resched(); 6821 int resched = should_resched();
6610 int ret = 0; 6822 int ret = 0;
6611 6823
6824 lockdep_assert_held(lock);
6825
6612 if (spin_needbreak(lock) || resched) { 6826 if (spin_needbreak(lock) || resched) {
6613 spin_unlock(lock); 6827 spin_unlock(lock);
6614 if (resched) 6828 if (resched)
@@ -6620,9 +6834,9 @@ int cond_resched_lock(spinlock_t *lock)
6620 } 6834 }
6621 return ret; 6835 return ret;
6622} 6836}
6623EXPORT_SYMBOL(cond_resched_lock); 6837EXPORT_SYMBOL(__cond_resched_lock);
6624 6838
6625int __sched cond_resched_softirq(void) 6839int __sched __cond_resched_softirq(void)
6626{ 6840{
6627 BUG_ON(!in_softirq()); 6841 BUG_ON(!in_softirq());
6628 6842
@@ -6634,7 +6848,7 @@ int __sched cond_resched_softirq(void)
6634 } 6848 }
6635 return 0; 6849 return 0;
6636} 6850}
6637EXPORT_SYMBOL(cond_resched_softirq); 6851EXPORT_SYMBOL(__cond_resched_softirq);
6638 6852
6639/** 6853/**
6640 * yield - yield the current processor to other threads. 6854 * yield - yield the current processor to other threads.
@@ -6658,11 +6872,13 @@ EXPORT_SYMBOL(yield);
6658 */ 6872 */
6659void __sched io_schedule(void) 6873void __sched io_schedule(void)
6660{ 6874{
6661 struct rq *rq = &__raw_get_cpu_var(runqueues); 6875 struct rq *rq = raw_rq();
6662 6876
6663 delayacct_blkio_start(); 6877 delayacct_blkio_start();
6664 atomic_inc(&rq->nr_iowait); 6878 atomic_inc(&rq->nr_iowait);
6879 current->in_iowait = 1;
6665 schedule(); 6880 schedule();
6881 current->in_iowait = 0;
6666 atomic_dec(&rq->nr_iowait); 6882 atomic_dec(&rq->nr_iowait);
6667 delayacct_blkio_end(); 6883 delayacct_blkio_end();
6668} 6884}
@@ -6670,12 +6886,14 @@ EXPORT_SYMBOL(io_schedule);
6670 6886
6671long __sched io_schedule_timeout(long timeout) 6887long __sched io_schedule_timeout(long timeout)
6672{ 6888{
6673 struct rq *rq = &__raw_get_cpu_var(runqueues); 6889 struct rq *rq = raw_rq();
6674 long ret; 6890 long ret;
6675 6891
6676 delayacct_blkio_start(); 6892 delayacct_blkio_start();
6677 atomic_inc(&rq->nr_iowait); 6893 atomic_inc(&rq->nr_iowait);
6894 current->in_iowait = 1;
6678 ret = schedule_timeout(timeout); 6895 ret = schedule_timeout(timeout);
6896 current->in_iowait = 0;
6679 atomic_dec(&rq->nr_iowait); 6897 atomic_dec(&rq->nr_iowait);
6680 delayacct_blkio_end(); 6898 delayacct_blkio_end();
6681 return ret; 6899 return ret;
@@ -6992,8 +7210,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
6992 7210
6993 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { 7211 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
6994 /* Need help from migration thread: drop lock and wait. */ 7212 /* Need help from migration thread: drop lock and wait. */
7213 struct task_struct *mt = rq->migration_thread;
7214
7215 get_task_struct(mt);
6995 task_rq_unlock(rq, &flags); 7216 task_rq_unlock(rq, &flags);
6996 wake_up_process(rq->migration_thread); 7217 wake_up_process(rq->migration_thread);
7218 put_task_struct(mt);
6997 wait_for_completion(&req.done); 7219 wait_for_completion(&req.done);
6998 tlb_migrate_finish(p->mm); 7220 tlb_migrate_finish(p->mm);
6999 return 0; 7221 return 0;
@@ -7051,6 +7273,11 @@ fail:
7051 return ret; 7273 return ret;
7052} 7274}
7053 7275
7276#define RCU_MIGRATION_IDLE 0
7277#define RCU_MIGRATION_NEED_QS 1
7278#define RCU_MIGRATION_GOT_QS 2
7279#define RCU_MIGRATION_MUST_SYNC 3
7280
7054/* 7281/*
7055 * migration_thread - this is a highprio system thread that performs 7282 * migration_thread - this is a highprio system thread that performs
7056 * thread migration by bumping thread off CPU then 'pushing' onto 7283 * thread migration by bumping thread off CPU then 'pushing' onto
@@ -7058,6 +7285,7 @@ fail:
7058 */ 7285 */
7059static int migration_thread(void *data) 7286static int migration_thread(void *data)
7060{ 7287{
7288 int badcpu;
7061 int cpu = (long)data; 7289 int cpu = (long)data;
7062 struct rq *rq; 7290 struct rq *rq;
7063 7291
@@ -7092,8 +7320,17 @@ static int migration_thread(void *data)
7092 req = list_entry(head->next, struct migration_req, list); 7320 req = list_entry(head->next, struct migration_req, list);
7093 list_del_init(head->next); 7321 list_del_init(head->next);
7094 7322
7095 spin_unlock(&rq->lock); 7323 if (req->task != NULL) {
7096 __migrate_task(req->task, cpu, req->dest_cpu); 7324 spin_unlock(&rq->lock);
7325 __migrate_task(req->task, cpu, req->dest_cpu);
7326 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
7327 req->dest_cpu = RCU_MIGRATION_GOT_QS;
7328 spin_unlock(&rq->lock);
7329 } else {
7330 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
7331 spin_unlock(&rq->lock);
7332 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
7333 }
7097 local_irq_enable(); 7334 local_irq_enable();
7098 7335
7099 complete(&req->done); 7336 complete(&req->done);
@@ -7625,7 +7862,7 @@ static int __init migration_init(void)
7625 migration_call(&migration_notifier, CPU_ONLINE, cpu); 7862 migration_call(&migration_notifier, CPU_ONLINE, cpu);
7626 register_cpu_notifier(&migration_notifier); 7863 register_cpu_notifier(&migration_notifier);
7627 7864
7628 return err; 7865 return 0;
7629} 7866}
7630early_initcall(migration_init); 7867early_initcall(migration_init);
7631#endif 7868#endif
@@ -7672,7 +7909,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7672 break; 7909 break;
7673 } 7910 }
7674 7911
7675 if (!group->__cpu_power) { 7912 if (!group->cpu_power) {
7676 printk(KERN_CONT "\n"); 7913 printk(KERN_CONT "\n");
7677 printk(KERN_ERR "ERROR: domain->cpu_power not " 7914 printk(KERN_ERR "ERROR: domain->cpu_power not "
7678 "set\n"); 7915 "set\n");
@@ -7696,9 +7933,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7696 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7933 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7697 7934
7698 printk(KERN_CONT " %s", str); 7935 printk(KERN_CONT " %s", str);
7699 if (group->__cpu_power != SCHED_LOAD_SCALE) { 7936 if (group->cpu_power != SCHED_LOAD_SCALE) {
7700 printk(KERN_CONT " (__cpu_power = %d)", 7937 printk(KERN_CONT " (cpu_power = %d)",
7701 group->__cpu_power); 7938 group->cpu_power);
7702 } 7939 }
7703 7940
7704 group = group->next; 7941 group = group->next;
@@ -7841,7 +8078,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7841 rq->rd = rd; 8078 rq->rd = rd;
7842 8079
7843 cpumask_set_cpu(rq->cpu, rd->span); 8080 cpumask_set_cpu(rq->cpu, rd->span);
7844 if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) 8081 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
7845 set_rq_online(rq); 8082 set_rq_online(rq);
7846 8083
7847 spin_unlock_irqrestore(&rq->lock, flags); 8084 spin_unlock_irqrestore(&rq->lock, flags);
@@ -7983,7 +8220,7 @@ init_sched_build_groups(const struct cpumask *span,
7983 continue; 8220 continue;
7984 8221
7985 cpumask_clear(sched_group_cpus(sg)); 8222 cpumask_clear(sched_group_cpus(sg));
7986 sg->__cpu_power = 0; 8223 sg->cpu_power = 0;
7987 8224
7988 for_each_cpu(j, span) { 8225 for_each_cpu(j, span) {
7989 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 8226 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
@@ -8091,6 +8328,39 @@ struct static_sched_domain {
8091 DECLARE_BITMAP(span, CONFIG_NR_CPUS); 8328 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
8092}; 8329};
8093 8330
8331struct s_data {
8332#ifdef CONFIG_NUMA
8333 int sd_allnodes;
8334 cpumask_var_t domainspan;
8335 cpumask_var_t covered;
8336 cpumask_var_t notcovered;
8337#endif
8338 cpumask_var_t nodemask;
8339 cpumask_var_t this_sibling_map;
8340 cpumask_var_t this_core_map;
8341 cpumask_var_t send_covered;
8342 cpumask_var_t tmpmask;
8343 struct sched_group **sched_group_nodes;
8344 struct root_domain *rd;
8345};
8346
8347enum s_alloc {
8348 sa_sched_groups = 0,
8349 sa_rootdomain,
8350 sa_tmpmask,
8351 sa_send_covered,
8352 sa_this_core_map,
8353 sa_this_sibling_map,
8354 sa_nodemask,
8355 sa_sched_group_nodes,
8356#ifdef CONFIG_NUMA
8357 sa_notcovered,
8358 sa_covered,
8359 sa_domainspan,
8360#endif
8361 sa_none,
8362};
8363
8094/* 8364/*
8095 * SMT sched-domains: 8365 * SMT sched-domains:
8096 */ 8366 */
@@ -8208,11 +8478,76 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
8208 continue; 8478 continue;
8209 } 8479 }
8210 8480
8211 sg_inc_cpu_power(sg, sd->groups->__cpu_power); 8481 sg->cpu_power += sd->groups->cpu_power;
8212 } 8482 }
8213 sg = sg->next; 8483 sg = sg->next;
8214 } while (sg != group_head); 8484 } while (sg != group_head);
8215} 8485}
8486
8487static int build_numa_sched_groups(struct s_data *d,
8488 const struct cpumask *cpu_map, int num)
8489{
8490 struct sched_domain *sd;
8491 struct sched_group *sg, *prev;
8492 int n, j;
8493
8494 cpumask_clear(d->covered);
8495 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
8496 if (cpumask_empty(d->nodemask)) {
8497 d->sched_group_nodes[num] = NULL;
8498 goto out;
8499 }
8500
8501 sched_domain_node_span(num, d->domainspan);
8502 cpumask_and(d->domainspan, d->domainspan, cpu_map);
8503
8504 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8505 GFP_KERNEL, num);
8506 if (!sg) {
8507 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
8508 num);
8509 return -ENOMEM;
8510 }
8511 d->sched_group_nodes[num] = sg;
8512
8513 for_each_cpu(j, d->nodemask) {
8514 sd = &per_cpu(node_domains, j).sd;
8515 sd->groups = sg;
8516 }
8517
8518 sg->cpu_power = 0;
8519 cpumask_copy(sched_group_cpus(sg), d->nodemask);
8520 sg->next = sg;
8521 cpumask_or(d->covered, d->covered, d->nodemask);
8522
8523 prev = sg;
8524 for (j = 0; j < nr_node_ids; j++) {
8525 n = (num + j) % nr_node_ids;
8526 cpumask_complement(d->notcovered, d->covered);
8527 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
8528 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
8529 if (cpumask_empty(d->tmpmask))
8530 break;
8531 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
8532 if (cpumask_empty(d->tmpmask))
8533 continue;
8534 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
8535 GFP_KERNEL, num);
8536 if (!sg) {
8537 printk(KERN_WARNING
8538 "Can not alloc domain group for node %d\n", j);
8539 return -ENOMEM;
8540 }
8541 sg->cpu_power = 0;
8542 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
8543 sg->next = prev->next;
8544 cpumask_or(d->covered, d->covered, d->tmpmask);
8545 prev->next = sg;
8546 prev = sg;
8547 }
8548out:
8549 return 0;
8550}
8216#endif /* CONFIG_NUMA */ 8551#endif /* CONFIG_NUMA */
8217 8552
8218#ifdef CONFIG_NUMA 8553#ifdef CONFIG_NUMA
@@ -8266,15 +8601,13 @@ static void free_sched_groups(const struct cpumask *cpu_map,
8266 * there are asymmetries in the topology. If there are asymmetries, group 8601 * there are asymmetries in the topology. If there are asymmetries, group
8267 * having more cpu_power will pickup more load compared to the group having 8602 * having more cpu_power will pickup more load compared to the group having
8268 * less cpu_power. 8603 * less cpu_power.
8269 *
8270 * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
8271 * the maximum number of tasks a group can handle in the presence of other idle
8272 * or lightly loaded groups in the same sched domain.
8273 */ 8604 */
8274static void init_sched_groups_power(int cpu, struct sched_domain *sd) 8605static void init_sched_groups_power(int cpu, struct sched_domain *sd)
8275{ 8606{
8276 struct sched_domain *child; 8607 struct sched_domain *child;
8277 struct sched_group *group; 8608 struct sched_group *group;
8609 long power;
8610 int weight;
8278 8611
8279 WARN_ON(!sd || !sd->groups); 8612 WARN_ON(!sd || !sd->groups);
8280 8613
@@ -8283,28 +8616,32 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
8283 8616
8284 child = sd->child; 8617 child = sd->child;
8285 8618
8286 sd->groups->__cpu_power = 0; 8619 sd->groups->cpu_power = 0;
8287 8620
8288 /* 8621 if (!child) {
8289 * For perf policy, if the groups in child domain share resources 8622 power = SCHED_LOAD_SCALE;
8290 * (for example cores sharing some portions of the cache hierarchy 8623 weight = cpumask_weight(sched_domain_span(sd));
8291 * or SMT), then set this domain groups cpu_power such that each group 8624 /*
8292 * can handle only one task, when there are other idle groups in the 8625 * SMT siblings share the power of a single core.
8293 * same sched domain. 8626 * Usually multiple threads get a better yield out of
8294 */ 8627 * that one core than a single thread would have,
8295 if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) && 8628 * reflect that in sd->smt_gain.
8296 (child->flags & 8629 */
8297 (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) { 8630 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
8298 sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE); 8631 power *= sd->smt_gain;
8632 power /= weight;
8633 power >>= SCHED_LOAD_SHIFT;
8634 }
8635 sd->groups->cpu_power += power;
8299 return; 8636 return;
8300 } 8637 }
8301 8638
8302 /* 8639 /*
8303 * add cpu_power of each child group to this groups cpu_power 8640 * Add cpu_power of each child group to this groups cpu_power.
8304 */ 8641 */
8305 group = child->groups; 8642 group = child->groups;
8306 do { 8643 do {
8307 sg_inc_cpu_power(sd->groups, group->__cpu_power); 8644 sd->groups->cpu_power += group->cpu_power;
8308 group = group->next; 8645 group = group->next;
8309 } while (group != child->groups); 8646 } while (group != child->groups);
8310} 8647}
@@ -8378,280 +8715,285 @@ static void set_domain_attribute(struct sched_domain *sd,
8378 } 8715 }
8379} 8716}
8380 8717
8381/* 8718static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
8382 * Build sched domains for a given set of cpus and attach the sched domains 8719 const struct cpumask *cpu_map)
8383 * to the individual cpus 8720{
8384 */ 8721 switch (what) {
8385static int __build_sched_domains(const struct cpumask *cpu_map, 8722 case sa_sched_groups:
8386 struct sched_domain_attr *attr) 8723 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
8387{ 8724 d->sched_group_nodes = NULL;
8388 int i, err = -ENOMEM; 8725 case sa_rootdomain:
8389 struct root_domain *rd; 8726 free_rootdomain(d->rd); /* fall through */
8390 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, 8727 case sa_tmpmask:
8391 tmpmask; 8728 free_cpumask_var(d->tmpmask); /* fall through */
8729 case sa_send_covered:
8730 free_cpumask_var(d->send_covered); /* fall through */
8731 case sa_this_core_map:
8732 free_cpumask_var(d->this_core_map); /* fall through */
8733 case sa_this_sibling_map:
8734 free_cpumask_var(d->this_sibling_map); /* fall through */
8735 case sa_nodemask:
8736 free_cpumask_var(d->nodemask); /* fall through */
8737 case sa_sched_group_nodes:
8392#ifdef CONFIG_NUMA 8738#ifdef CONFIG_NUMA
8393 cpumask_var_t domainspan, covered, notcovered; 8739 kfree(d->sched_group_nodes); /* fall through */
8394 struct sched_group **sched_group_nodes = NULL; 8740 case sa_notcovered:
8395 int sd_allnodes = 0; 8741 free_cpumask_var(d->notcovered); /* fall through */
8396 8742 case sa_covered:
8397 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) 8743 free_cpumask_var(d->covered); /* fall through */
8398 goto out; 8744 case sa_domainspan:
8399 if (!alloc_cpumask_var(&covered, GFP_KERNEL)) 8745 free_cpumask_var(d->domainspan); /* fall through */
8400 goto free_domainspan; 8746#endif
8401 if (!alloc_cpumask_var(&notcovered, GFP_KERNEL)) 8747 case sa_none:
8402 goto free_covered; 8748 break;
8403#endif 8749 }
8404 8750}
8405 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
8406 goto free_notcovered;
8407 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
8408 goto free_nodemask;
8409 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
8410 goto free_this_sibling_map;
8411 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
8412 goto free_this_core_map;
8413 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
8414 goto free_send_covered;
8415 8751
8752static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
8753 const struct cpumask *cpu_map)
8754{
8416#ifdef CONFIG_NUMA 8755#ifdef CONFIG_NUMA
8417 /* 8756 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
8418 * Allocate the per-node list of sched groups 8757 return sa_none;
8419 */ 8758 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
8420 sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *), 8759 return sa_domainspan;
8421 GFP_KERNEL); 8760 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
8422 if (!sched_group_nodes) { 8761 return sa_covered;
8762 /* Allocate the per-node list of sched groups */
8763 d->sched_group_nodes = kcalloc(nr_node_ids,
8764 sizeof(struct sched_group *), GFP_KERNEL);
8765 if (!d->sched_group_nodes) {
8423 printk(KERN_WARNING "Can not alloc sched group node list\n"); 8766 printk(KERN_WARNING "Can not alloc sched group node list\n");
8424 goto free_tmpmask; 8767 return sa_notcovered;
8425 } 8768 }
8426#endif 8769 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
8427 8770#endif
8428 rd = alloc_rootdomain(); 8771 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
8429 if (!rd) { 8772 return sa_sched_group_nodes;
8773 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
8774 return sa_nodemask;
8775 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
8776 return sa_this_sibling_map;
8777 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
8778 return sa_this_core_map;
8779 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
8780 return sa_send_covered;
8781 d->rd = alloc_rootdomain();
8782 if (!d->rd) {
8430 printk(KERN_WARNING "Cannot alloc root domain\n"); 8783 printk(KERN_WARNING "Cannot alloc root domain\n");
8431 goto free_sched_groups; 8784 return sa_tmpmask;
8432 } 8785 }
8786 return sa_rootdomain;
8787}
8433 8788
8789static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
8790 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
8791{
8792 struct sched_domain *sd = NULL;
8434#ifdef CONFIG_NUMA 8793#ifdef CONFIG_NUMA
8435 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; 8794 struct sched_domain *parent;
8436#endif
8437
8438 /*
8439 * Set up domains for cpus specified by the cpu_map.
8440 */
8441 for_each_cpu(i, cpu_map) {
8442 struct sched_domain *sd = NULL, *p;
8443
8444 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
8445
8446#ifdef CONFIG_NUMA
8447 if (cpumask_weight(cpu_map) >
8448 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
8449 sd = &per_cpu(allnodes_domains, i).sd;
8450 SD_INIT(sd, ALLNODES);
8451 set_domain_attribute(sd, attr);
8452 cpumask_copy(sched_domain_span(sd), cpu_map);
8453 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
8454 p = sd;
8455 sd_allnodes = 1;
8456 } else
8457 p = NULL;
8458 8795
8459 sd = &per_cpu(node_domains, i).sd; 8796 d->sd_allnodes = 0;
8460 SD_INIT(sd, NODE); 8797 if (cpumask_weight(cpu_map) >
8798 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
8799 sd = &per_cpu(allnodes_domains, i).sd;
8800 SD_INIT(sd, ALLNODES);
8461 set_domain_attribute(sd, attr); 8801 set_domain_attribute(sd, attr);
8462 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); 8802 cpumask_copy(sched_domain_span(sd), cpu_map);
8463 sd->parent = p; 8803 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
8464 if (p) 8804 d->sd_allnodes = 1;
8465 p->child = sd; 8805 }
8466 cpumask_and(sched_domain_span(sd), 8806 parent = sd;
8467 sched_domain_span(sd), cpu_map); 8807
8808 sd = &per_cpu(node_domains, i).sd;
8809 SD_INIT(sd, NODE);
8810 set_domain_attribute(sd, attr);
8811 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
8812 sd->parent = parent;
8813 if (parent)
8814 parent->child = sd;
8815 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
8468#endif 8816#endif
8817 return sd;
8818}
8469 8819
8470 p = sd; 8820static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
8471 sd = &per_cpu(phys_domains, i).sd; 8821 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8472 SD_INIT(sd, CPU); 8822 struct sched_domain *parent, int i)
8473 set_domain_attribute(sd, attr); 8823{
8474 cpumask_copy(sched_domain_span(sd), nodemask); 8824 struct sched_domain *sd;
8475 sd->parent = p; 8825 sd = &per_cpu(phys_domains, i).sd;
8476 if (p) 8826 SD_INIT(sd, CPU);
8477 p->child = sd; 8827 set_domain_attribute(sd, attr);
8478 cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask); 8828 cpumask_copy(sched_domain_span(sd), d->nodemask);
8829 sd->parent = parent;
8830 if (parent)
8831 parent->child = sd;
8832 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
8833 return sd;
8834}
8479 8835
8836static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
8837 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8838 struct sched_domain *parent, int i)
8839{
8840 struct sched_domain *sd = parent;
8480#ifdef CONFIG_SCHED_MC 8841#ifdef CONFIG_SCHED_MC
8481 p = sd; 8842 sd = &per_cpu(core_domains, i).sd;
8482 sd = &per_cpu(core_domains, i).sd; 8843 SD_INIT(sd, MC);
8483 SD_INIT(sd, MC); 8844 set_domain_attribute(sd, attr);
8484 set_domain_attribute(sd, attr); 8845 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
8485 cpumask_and(sched_domain_span(sd), cpu_map, 8846 sd->parent = parent;
8486 cpu_coregroup_mask(i)); 8847 parent->child = sd;
8487 sd->parent = p; 8848 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
8488 p->child = sd;
8489 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
8490#endif 8849#endif
8850 return sd;
8851}
8491 8852
8853static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
8854 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
8855 struct sched_domain *parent, int i)
8856{
8857 struct sched_domain *sd = parent;
8492#ifdef CONFIG_SCHED_SMT 8858#ifdef CONFIG_SCHED_SMT
8493 p = sd; 8859 sd = &per_cpu(cpu_domains, i).sd;
8494 sd = &per_cpu(cpu_domains, i).sd; 8860 SD_INIT(sd, SIBLING);
8495 SD_INIT(sd, SIBLING); 8861 set_domain_attribute(sd, attr);
8496 set_domain_attribute(sd, attr); 8862 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
8497 cpumask_and(sched_domain_span(sd), 8863 sd->parent = parent;
8498 topology_thread_cpumask(i), cpu_map); 8864 parent->child = sd;
8499 sd->parent = p; 8865 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
8500 p->child = sd;
8501 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
8502#endif 8866#endif
8503 } 8867 return sd;
8868}
8504 8869
8870static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
8871 const struct cpumask *cpu_map, int cpu)
8872{
8873 switch (l) {
8505#ifdef CONFIG_SCHED_SMT 8874#ifdef CONFIG_SCHED_SMT
8506 /* Set up CPU (sibling) groups */ 8875 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
8507 for_each_cpu(i, cpu_map) { 8876 cpumask_and(d->this_sibling_map, cpu_map,
8508 cpumask_and(this_sibling_map, 8877 topology_thread_cpumask(cpu));
8509 topology_thread_cpumask(i), cpu_map); 8878 if (cpu == cpumask_first(d->this_sibling_map))
8510 if (i != cpumask_first(this_sibling_map)) 8879 init_sched_build_groups(d->this_sibling_map, cpu_map,
8511 continue; 8880 &cpu_to_cpu_group,
8512 8881 d->send_covered, d->tmpmask);
8513 init_sched_build_groups(this_sibling_map, cpu_map, 8882 break;
8514 &cpu_to_cpu_group,
8515 send_covered, tmpmask);
8516 }
8517#endif 8883#endif
8518
8519#ifdef CONFIG_SCHED_MC 8884#ifdef CONFIG_SCHED_MC
8520 /* Set up multi-core groups */ 8885 case SD_LV_MC: /* set up multi-core groups */
8521 for_each_cpu(i, cpu_map) { 8886 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
8522 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); 8887 if (cpu == cpumask_first(d->this_core_map))
8523 if (i != cpumask_first(this_core_map)) 8888 init_sched_build_groups(d->this_core_map, cpu_map,
8524 continue; 8889 &cpu_to_core_group,
8525 8890 d->send_covered, d->tmpmask);
8526 init_sched_build_groups(this_core_map, cpu_map, 8891 break;
8527 &cpu_to_core_group,
8528 send_covered, tmpmask);
8529 }
8530#endif 8892#endif
8531 8893 case SD_LV_CPU: /* set up physical groups */
8532 /* Set up physical groups */ 8894 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
8533 for (i = 0; i < nr_node_ids; i++) { 8895 if (!cpumask_empty(d->nodemask))
8534 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 8896 init_sched_build_groups(d->nodemask, cpu_map,
8535 if (cpumask_empty(nodemask)) 8897 &cpu_to_phys_group,
8536 continue; 8898 d->send_covered, d->tmpmask);
8537 8899 break;
8538 init_sched_build_groups(nodemask, cpu_map,
8539 &cpu_to_phys_group,
8540 send_covered, tmpmask);
8541 }
8542
8543#ifdef CONFIG_NUMA 8900#ifdef CONFIG_NUMA
8544 /* Set up node groups */ 8901 case SD_LV_ALLNODES:
8545 if (sd_allnodes) { 8902 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
8546 init_sched_build_groups(cpu_map, cpu_map, 8903 d->send_covered, d->tmpmask);
8547 &cpu_to_allnodes_group, 8904 break;
8548 send_covered, tmpmask); 8905#endif
8906 default:
8907 break;
8549 } 8908 }
8909}
8550 8910
8551 for (i = 0; i < nr_node_ids; i++) { 8911/*
8552 /* Set up node groups */ 8912 * Build sched domains for a given set of cpus and attach the sched domains
8553 struct sched_group *sg, *prev; 8913 * to the individual cpus
8554 int j; 8914 */
8555 8915static int __build_sched_domains(const struct cpumask *cpu_map,
8556 cpumask_clear(covered); 8916 struct sched_domain_attr *attr)
8557 cpumask_and(nodemask, cpumask_of_node(i), cpu_map); 8917{
8558 if (cpumask_empty(nodemask)) { 8918 enum s_alloc alloc_state = sa_none;
8559 sched_group_nodes[i] = NULL; 8919 struct s_data d;
8560 continue; 8920 struct sched_domain *sd;
8561 } 8921 int i;
8922#ifdef CONFIG_NUMA
8923 d.sd_allnodes = 0;
8924#endif
8562 8925
8563 sched_domain_node_span(i, domainspan); 8926 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
8564 cpumask_and(domainspan, domainspan, cpu_map); 8927 if (alloc_state != sa_rootdomain)
8928 goto error;
8929 alloc_state = sa_sched_groups;
8565 8930
8566 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), 8931 /*
8567 GFP_KERNEL, i); 8932 * Set up domains for cpus specified by the cpu_map.
8568 if (!sg) { 8933 */
8569 printk(KERN_WARNING "Can not alloc domain group for " 8934 for_each_cpu(i, cpu_map) {
8570 "node %d\n", i); 8935 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
8571 goto error; 8936 cpu_map);
8572 }
8573 sched_group_nodes[i] = sg;
8574 for_each_cpu(j, nodemask) {
8575 struct sched_domain *sd;
8576 8937
8577 sd = &per_cpu(node_domains, j).sd; 8938 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
8578 sd->groups = sg; 8939 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
8579 } 8940 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
8580 sg->__cpu_power = 0; 8941 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
8581 cpumask_copy(sched_group_cpus(sg), nodemask); 8942 }
8582 sg->next = sg;
8583 cpumask_or(covered, covered, nodemask);
8584 prev = sg;
8585 8943
8586 for (j = 0; j < nr_node_ids; j++) { 8944 for_each_cpu(i, cpu_map) {
8587 int n = (i + j) % nr_node_ids; 8945 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
8946 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
8947 }
8588 8948
8589 cpumask_complement(notcovered, covered); 8949 /* Set up physical groups */
8590 cpumask_and(tmpmask, notcovered, cpu_map); 8950 for (i = 0; i < nr_node_ids; i++)
8591 cpumask_and(tmpmask, tmpmask, domainspan); 8951 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
8592 if (cpumask_empty(tmpmask))
8593 break;
8594 8952
8595 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); 8953#ifdef CONFIG_NUMA
8596 if (cpumask_empty(tmpmask)) 8954 /* Set up node groups */
8597 continue; 8955 if (d.sd_allnodes)
8956 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
8598 8957
8599 sg = kmalloc_node(sizeof(struct sched_group) + 8958 for (i = 0; i < nr_node_ids; i++)
8600 cpumask_size(), 8959 if (build_numa_sched_groups(&d, cpu_map, i))
8601 GFP_KERNEL, i); 8960 goto error;
8602 if (!sg) {
8603 printk(KERN_WARNING
8604 "Can not alloc domain group for node %d\n", j);
8605 goto error;
8606 }
8607 sg->__cpu_power = 0;
8608 cpumask_copy(sched_group_cpus(sg), tmpmask);
8609 sg->next = prev->next;
8610 cpumask_or(covered, covered, tmpmask);
8611 prev->next = sg;
8612 prev = sg;
8613 }
8614 }
8615#endif 8961#endif
8616 8962
8617 /* Calculate CPU power for physical packages and nodes */ 8963 /* Calculate CPU power for physical packages and nodes */
8618#ifdef CONFIG_SCHED_SMT 8964#ifdef CONFIG_SCHED_SMT
8619 for_each_cpu(i, cpu_map) { 8965 for_each_cpu(i, cpu_map) {
8620 struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; 8966 sd = &per_cpu(cpu_domains, i).sd;
8621
8622 init_sched_groups_power(i, sd); 8967 init_sched_groups_power(i, sd);
8623 } 8968 }
8624#endif 8969#endif
8625#ifdef CONFIG_SCHED_MC 8970#ifdef CONFIG_SCHED_MC
8626 for_each_cpu(i, cpu_map) { 8971 for_each_cpu(i, cpu_map) {
8627 struct sched_domain *sd = &per_cpu(core_domains, i).sd; 8972 sd = &per_cpu(core_domains, i).sd;
8628
8629 init_sched_groups_power(i, sd); 8973 init_sched_groups_power(i, sd);
8630 } 8974 }
8631#endif 8975#endif
8632 8976
8633 for_each_cpu(i, cpu_map) { 8977 for_each_cpu(i, cpu_map) {
8634 struct sched_domain *sd = &per_cpu(phys_domains, i).sd; 8978 sd = &per_cpu(phys_domains, i).sd;
8635
8636 init_sched_groups_power(i, sd); 8979 init_sched_groups_power(i, sd);
8637 } 8980 }
8638 8981
8639#ifdef CONFIG_NUMA 8982#ifdef CONFIG_NUMA
8640 for (i = 0; i < nr_node_ids; i++) 8983 for (i = 0; i < nr_node_ids; i++)
8641 init_numa_sched_groups_power(sched_group_nodes[i]); 8984 init_numa_sched_groups_power(d.sched_group_nodes[i]);
8642 8985
8643 if (sd_allnodes) { 8986 if (d.sd_allnodes) {
8644 struct sched_group *sg; 8987 struct sched_group *sg;
8645 8988
8646 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, 8989 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
8647 tmpmask); 8990 d.tmpmask);
8648 init_numa_sched_groups_power(sg); 8991 init_numa_sched_groups_power(sg);
8649 } 8992 }
8650#endif 8993#endif
8651 8994
8652 /* Attach the domains */ 8995 /* Attach the domains */
8653 for_each_cpu(i, cpu_map) { 8996 for_each_cpu(i, cpu_map) {
8654 struct sched_domain *sd;
8655#ifdef CONFIG_SCHED_SMT 8997#ifdef CONFIG_SCHED_SMT
8656 sd = &per_cpu(cpu_domains, i).sd; 8998 sd = &per_cpu(cpu_domains, i).sd;
8657#elif defined(CONFIG_SCHED_MC) 8999#elif defined(CONFIG_SCHED_MC)
@@ -8659,44 +9001,16 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
8659#else 9001#else
8660 sd = &per_cpu(phys_domains, i).sd; 9002 sd = &per_cpu(phys_domains, i).sd;
8661#endif 9003#endif
8662 cpu_attach_domain(sd, rd, i); 9004 cpu_attach_domain(sd, d.rd, i);
8663 } 9005 }
8664 9006
8665 err = 0; 9007 d.sched_group_nodes = NULL; /* don't free this we still need it */
8666 9008 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
8667free_tmpmask: 9009 return 0;
8668 free_cpumask_var(tmpmask);
8669free_send_covered:
8670 free_cpumask_var(send_covered);
8671free_this_core_map:
8672 free_cpumask_var(this_core_map);
8673free_this_sibling_map:
8674 free_cpumask_var(this_sibling_map);
8675free_nodemask:
8676 free_cpumask_var(nodemask);
8677free_notcovered:
8678#ifdef CONFIG_NUMA
8679 free_cpumask_var(notcovered);
8680free_covered:
8681 free_cpumask_var(covered);
8682free_domainspan:
8683 free_cpumask_var(domainspan);
8684out:
8685#endif
8686 return err;
8687
8688free_sched_groups:
8689#ifdef CONFIG_NUMA
8690 kfree(sched_group_nodes);
8691#endif
8692 goto free_tmpmask;
8693 9010
8694#ifdef CONFIG_NUMA
8695error: 9011error:
8696 free_sched_groups(cpu_map, tmpmask); 9012 __free_domain_allocs(&d, alloc_state, cpu_map);
8697 free_rootdomain(rd); 9013 return -ENOMEM;
8698 goto free_tmpmask;
8699#endif
8700} 9014}
8701 9015
8702static int build_sched_domains(const struct cpumask *cpu_map) 9016static int build_sched_domains(const struct cpumask *cpu_map)
@@ -9304,11 +9618,11 @@ void __init sched_init(void)
9304 * system cpu resource, based on the weight assigned to root 9618 * system cpu resource, based on the weight assigned to root
9305 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished 9619 * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
9306 * by letting tasks of init_task_group sit in a separate cfs_rq 9620 * by letting tasks of init_task_group sit in a separate cfs_rq
9307 * (init_cfs_rq) and having one entity represent this group of 9621 * (init_tg_cfs_rq) and having one entity represent this group of
9308 * tasks in rq->cfs (i.e init_task_group->se[] != NULL). 9622 * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
9309 */ 9623 */
9310 init_tg_cfs_entry(&init_task_group, 9624 init_tg_cfs_entry(&init_task_group,
9311 &per_cpu(init_cfs_rq, i), 9625 &per_cpu(init_tg_cfs_rq, i),
9312 &per_cpu(init_sched_entity, i), i, 1, 9626 &per_cpu(init_sched_entity, i), i, 1,
9313 root_task_group.se[i]); 9627 root_task_group.se[i]);
9314 9628
@@ -9334,6 +9648,7 @@ void __init sched_init(void)
9334#ifdef CONFIG_SMP 9648#ifdef CONFIG_SMP
9335 rq->sd = NULL; 9649 rq->sd = NULL;
9336 rq->rd = NULL; 9650 rq->rd = NULL;
9651 rq->post_schedule = 0;
9337 rq->active_balance = 0; 9652 rq->active_balance = 0;
9338 rq->next_balance = jiffies; 9653 rq->next_balance = jiffies;
9339 rq->push_cpu = 0; 9654 rq->push_cpu = 0;
@@ -9398,13 +9713,20 @@ void __init sched_init(void)
9398} 9713}
9399 9714
9400#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 9715#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
9401void __might_sleep(char *file, int line) 9716static inline int preempt_count_equals(int preempt_offset)
9717{
9718 int nested = preempt_count() & ~PREEMPT_ACTIVE;
9719
9720 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
9721}
9722
9723void __might_sleep(char *file, int line, int preempt_offset)
9402{ 9724{
9403#ifdef in_atomic 9725#ifdef in_atomic
9404 static unsigned long prev_jiffy; /* ratelimiting */ 9726 static unsigned long prev_jiffy; /* ratelimiting */
9405 9727
9406 if ((!in_atomic() && !irqs_disabled()) || 9728 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
9407 system_state != SYSTEM_RUNNING || oops_in_progress) 9729 system_state != SYSTEM_RUNNING || oops_in_progress)
9408 return; 9730 return;
9409 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 9731 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
9410 return; 9732 return;
@@ -10581,3 +10903,113 @@ struct cgroup_subsys cpuacct_subsys = {
10581 .subsys_id = cpuacct_subsys_id, 10903 .subsys_id = cpuacct_subsys_id,
10582}; 10904};
10583#endif /* CONFIG_CGROUP_CPUACCT */ 10905#endif /* CONFIG_CGROUP_CPUACCT */
10906
10907#ifndef CONFIG_SMP
10908
10909int rcu_expedited_torture_stats(char *page)
10910{
10911 return 0;
10912}
10913EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10914
10915void synchronize_sched_expedited(void)
10916{
10917}
10918EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
10919
10920#else /* #ifndef CONFIG_SMP */
10921
10922static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
10923static DEFINE_MUTEX(rcu_sched_expedited_mutex);
10924
10925#define RCU_EXPEDITED_STATE_POST -2
10926#define RCU_EXPEDITED_STATE_IDLE -1
10927
10928static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10929
10930int rcu_expedited_torture_stats(char *page)
10931{
10932 int cnt = 0;
10933 int cpu;
10934
10935 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
10936 for_each_online_cpu(cpu) {
10937 cnt += sprintf(&page[cnt], " %d:%d",
10938 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
10939 }
10940 cnt += sprintf(&page[cnt], "\n");
10941 return cnt;
10942}
10943EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
10944
10945static long synchronize_sched_expedited_count;
10946
10947/*
10948 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
10949 * approach to force grace period to end quickly. This consumes
10950 * significant time on all CPUs, and is thus not recommended for
10951 * any sort of common-case code.
10952 *
10953 * Note that it is illegal to call this function while holding any
10954 * lock that is acquired by a CPU-hotplug notifier. Failing to
10955 * observe this restriction will result in deadlock.
10956 */
10957void synchronize_sched_expedited(void)
10958{
10959 int cpu;
10960 unsigned long flags;
10961 bool need_full_sync = 0;
10962 struct rq *rq;
10963 struct migration_req *req;
10964 long snap;
10965 int trycount = 0;
10966
10967 smp_mb(); /* ensure prior mod happens before capturing snap. */
10968 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
10969 get_online_cpus();
10970 while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
10971 put_online_cpus();
10972 if (trycount++ < 10)
10973 udelay(trycount * num_online_cpus());
10974 else {
10975 synchronize_sched();
10976 return;
10977 }
10978 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
10979 smp_mb(); /* ensure test happens before caller kfree */
10980 return;
10981 }
10982 get_online_cpus();
10983 }
10984 rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
10985 for_each_online_cpu(cpu) {
10986 rq = cpu_rq(cpu);
10987 req = &per_cpu(rcu_migration_req, cpu);
10988 init_completion(&req->done);
10989 req->task = NULL;
10990 req->dest_cpu = RCU_MIGRATION_NEED_QS;
10991 spin_lock_irqsave(&rq->lock, flags);
10992 list_add(&req->list, &rq->migration_queue);
10993 spin_unlock_irqrestore(&rq->lock, flags);
10994 wake_up_process(rq->migration_thread);
10995 }
10996 for_each_online_cpu(cpu) {
10997 rcu_expedited_state = cpu;
10998 req = &per_cpu(rcu_migration_req, cpu);
10999 rq = cpu_rq(cpu);
11000 wait_for_completion(&req->done);
11001 spin_lock_irqsave(&rq->lock, flags);
11002 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
11003 need_full_sync = 1;
11004 req->dest_cpu = RCU_MIGRATION_IDLE;
11005 spin_unlock_irqrestore(&rq->lock, flags);
11006 }
11007 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
11008 mutex_unlock(&rcu_sched_expedited_mutex);
11009 put_online_cpus();
11010 if (need_full_sync)
11011 synchronize_sched();
11012}
11013EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
11014
11015#endif /* #else #ifndef CONFIG_SMP */
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index d014efbf947a..0f052fc674d5 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -127,21 +127,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
127 127
128 /* 128 /*
129 * If the cpu was currently mapped to a different value, we 129 * If the cpu was currently mapped to a different value, we
130 * first need to unmap the old value 130 * need to map it to the new value then remove the old value.
131 * Note, we must add the new value first, otherwise we risk the
132 * cpu being cleared from pri_active, and this cpu could be
133 * missed for a push or pull.
131 */ 134 */
132 if (likely(oldpri != CPUPRI_INVALID)) {
133 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
134
135 spin_lock_irqsave(&vec->lock, flags);
136
137 vec->count--;
138 if (!vec->count)
139 clear_bit(oldpri, cp->pri_active);
140 cpumask_clear_cpu(cpu, vec->mask);
141
142 spin_unlock_irqrestore(&vec->lock, flags);
143 }
144
145 if (likely(newpri != CPUPRI_INVALID)) { 135 if (likely(newpri != CPUPRI_INVALID)) {
146 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
147 137
@@ -154,6 +144,18 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
154 144
155 spin_unlock_irqrestore(&vec->lock, flags); 145 spin_unlock_irqrestore(&vec->lock, flags);
156 } 146 }
147 if (likely(oldpri != CPUPRI_INVALID)) {
148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
149
150 spin_lock_irqsave(&vec->lock, flags);
151
152 vec->count--;
153 if (!vec->count)
154 clear_bit(oldpri, cp->pri_active);
155 cpumask_clear_cpu(cpu, vec->mask);
156
157 spin_unlock_irqrestore(&vec->lock, flags);
158 }
157 159
158 *currpri = newpri; 160 *currpri = newpri;
159} 161}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 70c7e0b79946..5ddbd0891267 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -409,6 +409,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
409 PN(se.wait_max); 409 PN(se.wait_max);
410 PN(se.wait_sum); 410 PN(se.wait_sum);
411 P(se.wait_count); 411 P(se.wait_count);
412 PN(se.iowait_sum);
413 P(se.iowait_count);
412 P(sched_info.bkl_count); 414 P(sched_info.bkl_count);
413 P(se.nr_migrations); 415 P(se.nr_migrations);
414 P(se.nr_migrations_cold); 416 P(se.nr_migrations_cold);
@@ -479,6 +481,8 @@ void proc_sched_set_task(struct task_struct *p)
479 p->se.wait_max = 0; 481 p->se.wait_max = 0;
480 p->se.wait_sum = 0; 482 p->se.wait_sum = 0;
481 p->se.wait_count = 0; 483 p->se.wait_count = 0;
484 p->se.iowait_sum = 0;
485 p->se.iowait_count = 0;
482 p->se.sleep_max = 0; 486 p->se.sleep_max = 0;
483 p->se.sum_sleep_runtime = 0; 487 p->se.sum_sleep_runtime = 0;
484 p->se.block_max = 0; 488 p->se.block_max = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 652e8bdef9aa..aa7f84121016 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -24,7 +24,7 @@
24 24
25/* 25/*
26 * Targeted preemption latency for CPU-bound tasks: 26 * Targeted preemption latency for CPU-bound tasks:
27 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) 27 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
28 * 28 *
29 * NOTE: this latency value is not the same as the concept of 29 * NOTE: this latency value is not the same as the concept of
30 * 'timeslice length' - timeslices in CFS are of variable length 30 * 'timeslice length' - timeslices in CFS are of variable length
@@ -34,13 +34,13 @@
34 * (to see the precise effective timeslice length of your workload, 34 * (to see the precise effective timeslice length of your workload,
35 * run vmstat and monitor the context-switches (cs) field) 35 * run vmstat and monitor the context-switches (cs) field)
36 */ 36 */
37unsigned int sysctl_sched_latency = 20000000ULL; 37unsigned int sysctl_sched_latency = 5000000ULL;
38 38
39/* 39/*
40 * Minimal preemption granularity for CPU-bound tasks: 40 * Minimal preemption granularity for CPU-bound tasks:
41 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) 41 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
42 */ 42 */
43unsigned int sysctl_sched_min_granularity = 4000000ULL; 43unsigned int sysctl_sched_min_granularity = 1000000ULL;
44 44
45/* 45/*
46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
@@ -48,10 +48,10 @@ unsigned int sysctl_sched_min_granularity = 4000000ULL;
48static unsigned int sched_nr_latency = 5; 48static unsigned int sched_nr_latency = 5;
49 49
50/* 50/*
51 * After fork, child runs first. (default) If set to 0 then 51 * After fork, child runs first. If set to 0 (default) then
52 * parent will (try to) run first. 52 * parent will (try to) run first.
53 */ 53 */
54const_debug unsigned int sysctl_sched_child_runs_first = 1; 54unsigned int sysctl_sched_child_runs_first __read_mostly;
55 55
56/* 56/*
57 * sys_sched_yield() compat mode 57 * sys_sched_yield() compat mode
@@ -63,13 +63,13 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
63 63
64/* 64/*
65 * SCHED_OTHER wake-up granularity. 65 * SCHED_OTHER wake-up granularity.
66 * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds) 66 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
67 * 67 *
68 * This option delays the preemption effects of decoupled workloads 68 * This option delays the preemption effects of decoupled workloads
69 * and reduces their over-scheduling. Synchronous workloads will still 69 * and reduces their over-scheduling. Synchronous workloads will still
70 * have immediate wakeup/sleep latencies. 70 * have immediate wakeup/sleep latencies.
71 */ 71 */
72unsigned int sysctl_sched_wakeup_granularity = 5000000UL; 72unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
73 73
74const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
75 75
@@ -79,11 +79,6 @@ static const struct sched_class fair_sched_class;
79 * CFS operations on generic schedulable entities: 79 * CFS operations on generic schedulable entities:
80 */ 80 */
81 81
82static inline struct task_struct *task_of(struct sched_entity *se)
83{
84 return container_of(se, struct task_struct, se);
85}
86
87#ifdef CONFIG_FAIR_GROUP_SCHED 82#ifdef CONFIG_FAIR_GROUP_SCHED
88 83
89/* cpu runqueue to which this cfs_rq is attached */ 84/* cpu runqueue to which this cfs_rq is attached */
@@ -95,6 +90,14 @@ static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
95/* An entity is a task if it doesn't "own" a runqueue */ 90/* An entity is a task if it doesn't "own" a runqueue */
96#define entity_is_task(se) (!se->my_q) 91#define entity_is_task(se) (!se->my_q)
97 92
93static inline struct task_struct *task_of(struct sched_entity *se)
94{
95#ifdef CONFIG_SCHED_DEBUG
96 WARN_ON_ONCE(!entity_is_task(se));
97#endif
98 return container_of(se, struct task_struct, se);
99}
100
98/* Walk up scheduling entities hierarchy */ 101/* Walk up scheduling entities hierarchy */
99#define for_each_sched_entity(se) \ 102#define for_each_sched_entity(se) \
100 for (; se; se = se->parent) 103 for (; se; se = se->parent)
@@ -186,7 +189,12 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
186 } 189 }
187} 190}
188 191
189#else /* CONFIG_FAIR_GROUP_SCHED */ 192#else /* !CONFIG_FAIR_GROUP_SCHED */
193
194static inline struct task_struct *task_of(struct sched_entity *se)
195{
196 return container_of(se, struct task_struct, se);
197}
190 198
191static inline struct rq *rq_of(struct cfs_rq *cfs_rq) 199static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
192{ 200{
@@ -537,6 +545,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
537 schedstat_set(se->wait_count, se->wait_count + 1); 545 schedstat_set(se->wait_count, se->wait_count + 1);
538 schedstat_set(se->wait_sum, se->wait_sum + 546 schedstat_set(se->wait_sum, se->wait_sum +
539 rq_of(cfs_rq)->clock - se->wait_start); 547 rq_of(cfs_rq)->clock - se->wait_start);
548#ifdef CONFIG_SCHEDSTATS
549 if (entity_is_task(se)) {
550 trace_sched_stat_wait(task_of(se),
551 rq_of(cfs_rq)->clock - se->wait_start);
552 }
553#endif
540 schedstat_set(se->wait_start, 0); 554 schedstat_set(se->wait_start, 0);
541} 555}
542 556
@@ -628,8 +642,10 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
628 se->sleep_start = 0; 642 se->sleep_start = 0;
629 se->sum_sleep_runtime += delta; 643 se->sum_sleep_runtime += delta;
630 644
631 if (tsk) 645 if (tsk) {
632 account_scheduler_latency(tsk, delta >> 10, 1); 646 account_scheduler_latency(tsk, delta >> 10, 1);
647 trace_sched_stat_sleep(tsk, delta);
648 }
633 } 649 }
634 if (se->block_start) { 650 if (se->block_start) {
635 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 651 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
@@ -644,6 +660,12 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
644 se->sum_sleep_runtime += delta; 660 se->sum_sleep_runtime += delta;
645 661
646 if (tsk) { 662 if (tsk) {
663 if (tsk->in_iowait) {
664 se->iowait_sum += delta;
665 se->iowait_count++;
666 trace_sched_stat_iowait(tsk, delta);
667 }
668
647 /* 669 /*
648 * Blocking time is in units of nanosecs, so shift by 670 * Blocking time is in units of nanosecs, so shift by
649 * 20 to get a milliseconds-range estimation of the 671 * 20 to get a milliseconds-range estimation of the
@@ -705,11 +727,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
705 727
706 vruntime -= thresh; 728 vruntime -= thresh;
707 } 729 }
708
709 /* ensure we never gain time by being placed backwards. */
710 vruntime = max_vruntime(se->vruntime, vruntime);
711 } 730 }
712 731
732 /* ensure we never gain time by being placed backwards. */
733 vruntime = max_vruntime(se->vruntime, vruntime);
734
713 se->vruntime = vruntime; 735 se->vruntime = vruntime;
714} 736}
715 737
@@ -1046,17 +1068,21 @@ static void yield_task_fair(struct rq *rq)
1046 * search starts with cpus closest then further out as needed, 1068 * search starts with cpus closest then further out as needed,
1047 * so we always favor a closer, idle cpu. 1069 * so we always favor a closer, idle cpu.
1048 * Domains may include CPUs that are not usable for migration, 1070 * Domains may include CPUs that are not usable for migration,
1049 * hence we need to mask them out (cpu_active_mask) 1071 * hence we need to mask them out (rq->rd->online)
1050 * 1072 *
1051 * Returns the CPU we should wake onto. 1073 * Returns the CPU we should wake onto.
1052 */ 1074 */
1053#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1075#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1076
1077#define cpu_rd_active(cpu, rq) cpumask_test_cpu(cpu, rq->rd->online)
1078
1054static int wake_idle(int cpu, struct task_struct *p) 1079static int wake_idle(int cpu, struct task_struct *p)
1055{ 1080{
1056 struct sched_domain *sd; 1081 struct sched_domain *sd;
1057 int i; 1082 int i;
1058 unsigned int chosen_wakeup_cpu; 1083 unsigned int chosen_wakeup_cpu;
1059 int this_cpu; 1084 int this_cpu;
1085 struct rq *task_rq = task_rq(p);
1060 1086
1061 /* 1087 /*
1062 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu 1088 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
@@ -1089,10 +1115,10 @@ static int wake_idle(int cpu, struct task_struct *p)
1089 for_each_domain(cpu, sd) { 1115 for_each_domain(cpu, sd) {
1090 if ((sd->flags & SD_WAKE_IDLE) 1116 if ((sd->flags & SD_WAKE_IDLE)
1091 || ((sd->flags & SD_WAKE_IDLE_FAR) 1117 || ((sd->flags & SD_WAKE_IDLE_FAR)
1092 && !task_hot(p, task_rq(p)->clock, sd))) { 1118 && !task_hot(p, task_rq->clock, sd))) {
1093 for_each_cpu_and(i, sched_domain_span(sd), 1119 for_each_cpu_and(i, sched_domain_span(sd),
1094 &p->cpus_allowed) { 1120 &p->cpus_allowed) {
1095 if (cpu_active(i) && idle_cpu(i)) { 1121 if (cpu_rd_active(i, task_rq) && idle_cpu(i)) {
1096 if (i != task_cpu(p)) { 1122 if (i != task_cpu(p)) {
1097 schedstat_inc(p, 1123 schedstat_inc(p,
1098 se.nr_wakeups_idle); 1124 se.nr_wakeups_idle);
@@ -1235,7 +1261,17 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1235 tg = task_group(p); 1261 tg = task_group(p);
1236 weight = p->se.load.weight; 1262 weight = p->se.load.weight;
1237 1263
1238 balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <= 1264 /*
1265 * In low-load situations, where prev_cpu is idle and this_cpu is idle
1266 * due to the sync cause above having dropped tl to 0, we'll always have
1267 * an imbalance, but there's really nothing you can do about that, so
1268 * that's good too.
1269 *
1270 * Otherwise check if either cpus are near enough in load to allow this
1271 * task to be woken on this_cpu.
1272 */
1273 balanced = !tl ||
1274 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
1239 imbalance*(load + effective_load(tg, prev_cpu, 0, weight)); 1275 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1240 1276
1241 /* 1277 /*
@@ -1278,8 +1314,6 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1278 this_rq = cpu_rq(this_cpu); 1314 this_rq = cpu_rq(this_cpu);
1279 new_cpu = prev_cpu; 1315 new_cpu = prev_cpu;
1280 1316
1281 if (prev_cpu == this_cpu)
1282 goto out;
1283 /* 1317 /*
1284 * 'this_sd' is the first domain that both 1318 * 'this_sd' is the first domain that both
1285 * this_cpu and prev_cpu are present in: 1319 * this_cpu and prev_cpu are present in:
@@ -1721,6 +1755,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1721 sched_info_queued(p); 1755 sched_info_queued(p);
1722 1756
1723 update_curr(cfs_rq); 1757 update_curr(cfs_rq);
1758 if (curr)
1759 se->vruntime = curr->vruntime;
1724 place_entity(cfs_rq, se, 1); 1760 place_entity(cfs_rq, se, 1);
1725 1761
1726 /* 'curr' will be NULL if the child belongs to a different group */ 1762 /* 'curr' will be NULL if the child belongs to a different group */
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 4569bfa7df9b..e2dc63a5815d 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,4 +1,4 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 0)
2SCHED_FEAT(NORMALIZED_SLEEPER, 0) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1) 3SCHED_FEAT(ADAPTIVE_GRAN, 1)
4SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 3918e01994e0..2eb4bd6a526c 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,15 +3,18 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6#ifdef CONFIG_RT_GROUP_SCHED
7
8#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 10static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{ 11{
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
8 return container_of(rt_se, struct task_struct, rt); 15 return container_of(rt_se, struct task_struct, rt);
9} 16}
10 17
11#ifdef CONFIG_RT_GROUP_SCHED
12
13#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
14
15static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 18static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
16{ 19{
17 return rt_rq->rq; 20 return rt_rq->rq;
@@ -26,6 +29,11 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
26 29
27#define rt_entity_is_task(rt_se) (1) 30#define rt_entity_is_task(rt_se) (1)
28 31
32static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
29static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 37static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
30{ 38{
31 return container_of(rt_rq, struct rq, rt); 39 return container_of(rt_rq, struct rq, rt);
@@ -128,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
128 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
129} 137}
130 138
139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
131#else 144#else
132 145
133static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
@@ -602,6 +615,8 @@ static void update_curr_rt(struct rq *rq)
602 curr->se.exec_start = rq->clock; 615 curr->se.exec_start = rq->clock;
603 cpuacct_charge(curr, delta_exec); 616 cpuacct_charge(curr, delta_exec);
604 617
618 sched_rt_avg_update(rq, delta_exec);
619
605 if (!rt_bandwidth_enabled()) 620 if (!rt_bandwidth_enabled())
606 return; 621 return;
607 622
@@ -874,8 +889,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
874 889
875 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 890 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
876 enqueue_pushable_task(rq, p); 891 enqueue_pushable_task(rq, p);
877
878 inc_cpu_load(rq, p->se.load.weight);
879} 892}
880 893
881static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 894static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -886,8 +899,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
886 dequeue_rt_entity(rt_se); 899 dequeue_rt_entity(rt_se);
887 900
888 dequeue_pushable_task(rq, p); 901 dequeue_pushable_task(rq, p);
889
890 dec_cpu_load(rq, p->se.load.weight);
891} 902}
892 903
893/* 904/*
@@ -1064,6 +1075,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
1064 if (p) 1075 if (p)
1065 dequeue_pushable_task(rq, p); 1076 dequeue_pushable_task(rq, p);
1066 1077
1078#ifdef CONFIG_SMP
1079 /*
1080 * We detect this state here so that we can avoid taking the RQ
1081 * lock again later if there is no need to push
1082 */
1083 rq->post_schedule = has_pushable_tasks(rq);
1084#endif
1085
1067 return p; 1086 return p;
1068} 1087}
1069 1088
@@ -1162,13 +1181,6 @@ static int find_lowest_rq(struct task_struct *task)
1162 return -1; /* No targets found */ 1181 return -1; /* No targets found */
1163 1182
1164 /* 1183 /*
1165 * Only consider CPUs that are usable for migration.
1166 * I guess we might want to change cpupri_find() to ignore those
1167 * in the first place.
1168 */
1169 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
1170
1171 /*
1172 * At this point we have built a mask of cpus representing the 1184 * At this point we have built a mask of cpus representing the
1173 * lowest priority tasks in the system. Now we want to elect 1185 * lowest priority tasks in the system. Now we want to elect
1174 * the best one based on our affinity and topology. 1186 * the best one based on our affinity and topology.
@@ -1262,11 +1274,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1262 return lowest_rq; 1274 return lowest_rq;
1263} 1275}
1264 1276
1265static inline int has_pushable_tasks(struct rq *rq)
1266{
1267 return !plist_head_empty(&rq->rt.pushable_tasks);
1268}
1269
1270static struct task_struct *pick_next_pushable_task(struct rq *rq) 1277static struct task_struct *pick_next_pushable_task(struct rq *rq)
1271{ 1278{
1272 struct task_struct *p; 1279 struct task_struct *p;
@@ -1466,23 +1473,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1466 pull_rt_task(rq); 1473 pull_rt_task(rq);
1467} 1474}
1468 1475
1469/*
1470 * assumes rq->lock is held
1471 */
1472static int needs_post_schedule_rt(struct rq *rq)
1473{
1474 return has_pushable_tasks(rq);
1475}
1476
1477static void post_schedule_rt(struct rq *rq) 1476static void post_schedule_rt(struct rq *rq)
1478{ 1477{
1479 /*
1480 * This is only called if needs_post_schedule_rt() indicates that
1481 * we need to push tasks away
1482 */
1483 spin_lock_irq(&rq->lock);
1484 push_rt_tasks(rq); 1478 push_rt_tasks(rq);
1485 spin_unlock_irq(&rq->lock);
1486} 1479}
1487 1480
1488/* 1481/*
@@ -1758,7 +1751,6 @@ static const struct sched_class rt_sched_class = {
1758 .rq_online = rq_online_rt, 1751 .rq_online = rq_online_rt,
1759 .rq_offline = rq_offline_rt, 1752 .rq_offline = rq_offline_rt,
1760 .pre_schedule = pre_schedule_rt, 1753 .pre_schedule = pre_schedule_rt,
1761 .needs_post_schedule = needs_post_schedule_rt,
1762 .post_schedule = post_schedule_rt, 1754 .post_schedule = post_schedule_rt,
1763 .task_wake_up = task_wake_up_rt, 1755 .task_wake_up = task_wake_up_rt,
1764 .switched_from = switched_from_rt, 1756 .switched_from = switched_from_rt,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index eb5e131a0485..7db25067cd2d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -227,7 +227,7 @@ restart:
227 preempt_count() = prev_count; 227 preempt_count() = prev_count;
228 } 228 }
229 229
230 rcu_bh_qsctr_inc(cpu); 230 rcu_bh_qs(cpu);
231 } 231 }
232 h++; 232 h++;
233 pending >>= 1; 233 pending >>= 1;
@@ -721,7 +721,7 @@ static int ksoftirqd(void * __bind_cpu)
721 preempt_enable_no_resched(); 721 preempt_enable_no_resched();
722 cond_resched(); 722 cond_resched();
723 preempt_disable(); 723 preempt_disable();
724 rcu_qsctr_inc((long)__bind_cpu); 724 rcu_sched_qs((long)__bind_cpu);
725 } 725 }
726 preempt_enable(); 726 preempt_enable();
727 set_current_state(TASK_INTERRUPTIBLE); 727 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 7932653c4ebd..5ddab730cb2f 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,44 +21,29 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
24int __lockfunc _spin_trylock(spinlock_t *lock) 25int __lockfunc _spin_trylock(spinlock_t *lock)
25{ 26{
26 preempt_disable(); 27 return __spin_trylock(lock);
27 if (_raw_spin_trylock(lock)) {
28 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
29 return 1;
30 }
31
32 preempt_enable();
33 return 0;
34} 28}
35EXPORT_SYMBOL(_spin_trylock); 29EXPORT_SYMBOL(_spin_trylock);
30#endif
36 31
32#ifndef _read_trylock
37int __lockfunc _read_trylock(rwlock_t *lock) 33int __lockfunc _read_trylock(rwlock_t *lock)
38{ 34{
39 preempt_disable(); 35 return __read_trylock(lock);
40 if (_raw_read_trylock(lock)) {
41 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
42 return 1;
43 }
44
45 preempt_enable();
46 return 0;
47} 36}
48EXPORT_SYMBOL(_read_trylock); 37EXPORT_SYMBOL(_read_trylock);
38#endif
49 39
40#ifndef _write_trylock
50int __lockfunc _write_trylock(rwlock_t *lock) 41int __lockfunc _write_trylock(rwlock_t *lock)
51{ 42{
52 preempt_disable(); 43 return __write_trylock(lock);
53 if (_raw_write_trylock(lock)) {
54 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
55 return 1;
56 }
57
58 preempt_enable();
59 return 0;
60} 44}
61EXPORT_SYMBOL(_write_trylock); 45EXPORT_SYMBOL(_write_trylock);
46#endif
62 47
63/* 48/*
64 * If lockdep is enabled then we use the non-preemption spin-ops 49 * If lockdep is enabled then we use the non-preemption spin-ops
@@ -67,132 +52,101 @@ EXPORT_SYMBOL(_write_trylock);
67 */ 52 */
68#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
69 54
55#ifndef _read_lock
70void __lockfunc _read_lock(rwlock_t *lock) 56void __lockfunc _read_lock(rwlock_t *lock)
71{ 57{
72 preempt_disable(); 58 __read_lock(lock);
73 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
74 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
75} 59}
76EXPORT_SYMBOL(_read_lock); 60EXPORT_SYMBOL(_read_lock);
61#endif
77 62
63#ifndef _spin_lock_irqsave
78unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
79{ 65{
80 unsigned long flags; 66 return __spin_lock_irqsave(lock);
81
82 local_irq_save(flags);
83 preempt_disable();
84 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
85 /*
86 * On lockdep we dont want the hand-coded irq-enable of
87 * _raw_spin_lock_flags() code, because lockdep assumes
88 * that interrupts are not re-enabled during lock-acquire:
89 */
90#ifdef CONFIG_LOCKDEP
91 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
92#else
93 _raw_spin_lock_flags(lock, &flags);
94#endif
95 return flags;
96} 67}
97EXPORT_SYMBOL(_spin_lock_irqsave); 68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
98 70
71#ifndef _spin_lock_irq
99void __lockfunc _spin_lock_irq(spinlock_t *lock) 72void __lockfunc _spin_lock_irq(spinlock_t *lock)
100{ 73{
101 local_irq_disable(); 74 __spin_lock_irq(lock);
102 preempt_disable();
103 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
104 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
105} 75}
106EXPORT_SYMBOL(_spin_lock_irq); 76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
107 78
79#ifndef _spin_lock_bh
108void __lockfunc _spin_lock_bh(spinlock_t *lock) 80void __lockfunc _spin_lock_bh(spinlock_t *lock)
109{ 81{
110 local_bh_disable(); 82 __spin_lock_bh(lock);
111 preempt_disable();
112 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
113 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
114} 83}
115EXPORT_SYMBOL(_spin_lock_bh); 84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
116 86
87#ifndef _read_lock_irqsave
117unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
118{ 89{
119 unsigned long flags; 90 return __read_lock_irqsave(lock);
120
121 local_irq_save(flags);
122 preempt_disable();
123 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
124 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
125 _raw_read_lock_flags, &flags);
126 return flags;
127} 91}
128EXPORT_SYMBOL(_read_lock_irqsave); 92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
129 94
95#ifndef _read_lock_irq
130void __lockfunc _read_lock_irq(rwlock_t *lock) 96void __lockfunc _read_lock_irq(rwlock_t *lock)
131{ 97{
132 local_irq_disable(); 98 __read_lock_irq(lock);
133 preempt_disable();
134 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
135 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
136} 99}
137EXPORT_SYMBOL(_read_lock_irq); 100EXPORT_SYMBOL(_read_lock_irq);
101#endif
138 102
103#ifndef _read_lock_bh
139void __lockfunc _read_lock_bh(rwlock_t *lock) 104void __lockfunc _read_lock_bh(rwlock_t *lock)
140{ 105{
141 local_bh_disable(); 106 __read_lock_bh(lock);
142 preempt_disable();
143 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
144 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
145} 107}
146EXPORT_SYMBOL(_read_lock_bh); 108EXPORT_SYMBOL(_read_lock_bh);
109#endif
147 110
111#ifndef _write_lock_irqsave
148unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
149{ 113{
150 unsigned long flags; 114 return __write_lock_irqsave(lock);
151
152 local_irq_save(flags);
153 preempt_disable();
154 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
155 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
156 _raw_write_lock_flags, &flags);
157 return flags;
158} 115}
159EXPORT_SYMBOL(_write_lock_irqsave); 116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
160 118
119#ifndef _write_lock_irq
161void __lockfunc _write_lock_irq(rwlock_t *lock) 120void __lockfunc _write_lock_irq(rwlock_t *lock)
162{ 121{
163 local_irq_disable(); 122 __write_lock_irq(lock);
164 preempt_disable();
165 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
166 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
167} 123}
168EXPORT_SYMBOL(_write_lock_irq); 124EXPORT_SYMBOL(_write_lock_irq);
125#endif
169 126
127#ifndef _write_lock_bh
170void __lockfunc _write_lock_bh(rwlock_t *lock) 128void __lockfunc _write_lock_bh(rwlock_t *lock)
171{ 129{
172 local_bh_disable(); 130 __write_lock_bh(lock);
173 preempt_disable();
174 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
175 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
176} 131}
177EXPORT_SYMBOL(_write_lock_bh); 132EXPORT_SYMBOL(_write_lock_bh);
133#endif
178 134
135#ifndef _spin_lock
179void __lockfunc _spin_lock(spinlock_t *lock) 136void __lockfunc _spin_lock(spinlock_t *lock)
180{ 137{
181 preempt_disable(); 138 __spin_lock(lock);
182 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
183 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
184} 139}
185
186EXPORT_SYMBOL(_spin_lock); 140EXPORT_SYMBOL(_spin_lock);
141#endif
187 142
143#ifndef _write_lock
188void __lockfunc _write_lock(rwlock_t *lock) 144void __lockfunc _write_lock(rwlock_t *lock)
189{ 145{
190 preempt_disable(); 146 __write_lock(lock);
191 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
192 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
193} 147}
194
195EXPORT_SYMBOL(_write_lock); 148EXPORT_SYMBOL(_write_lock);
149#endif
196 150
197#else /* CONFIG_PREEMPT: */ 151#else /* CONFIG_PREEMPT: */
198 152
@@ -318,125 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
318 272
319#endif 273#endif
320 274
275#ifndef _spin_unlock
321void __lockfunc _spin_unlock(spinlock_t *lock) 276void __lockfunc _spin_unlock(spinlock_t *lock)
322{ 277{
323 spin_release(&lock->dep_map, 1, _RET_IP_); 278 __spin_unlock(lock);
324 _raw_spin_unlock(lock);
325 preempt_enable();
326} 279}
327EXPORT_SYMBOL(_spin_unlock); 280EXPORT_SYMBOL(_spin_unlock);
281#endif
328 282
283#ifndef _write_unlock
329void __lockfunc _write_unlock(rwlock_t *lock) 284void __lockfunc _write_unlock(rwlock_t *lock)
330{ 285{
331 rwlock_release(&lock->dep_map, 1, _RET_IP_); 286 __write_unlock(lock);
332 _raw_write_unlock(lock);
333 preempt_enable();
334} 287}
335EXPORT_SYMBOL(_write_unlock); 288EXPORT_SYMBOL(_write_unlock);
289#endif
336 290
291#ifndef _read_unlock
337void __lockfunc _read_unlock(rwlock_t *lock) 292void __lockfunc _read_unlock(rwlock_t *lock)
338{ 293{
339 rwlock_release(&lock->dep_map, 1, _RET_IP_); 294 __read_unlock(lock);
340 _raw_read_unlock(lock);
341 preempt_enable();
342} 295}
343EXPORT_SYMBOL(_read_unlock); 296EXPORT_SYMBOL(_read_unlock);
297#endif
344 298
299#ifndef _spin_unlock_irqrestore
345void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
346{ 301{
347 spin_release(&lock->dep_map, 1, _RET_IP_); 302 __spin_unlock_irqrestore(lock, flags);
348 _raw_spin_unlock(lock);
349 local_irq_restore(flags);
350 preempt_enable();
351} 303}
352EXPORT_SYMBOL(_spin_unlock_irqrestore); 304EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif
353 306
307#ifndef _spin_unlock_irq
354void __lockfunc _spin_unlock_irq(spinlock_t *lock) 308void __lockfunc _spin_unlock_irq(spinlock_t *lock)
355{ 309{
356 spin_release(&lock->dep_map, 1, _RET_IP_); 310 __spin_unlock_irq(lock);
357 _raw_spin_unlock(lock);
358 local_irq_enable();
359 preempt_enable();
360} 311}
361EXPORT_SYMBOL(_spin_unlock_irq); 312EXPORT_SYMBOL(_spin_unlock_irq);
313#endif
362 314
315#ifndef _spin_unlock_bh
363void __lockfunc _spin_unlock_bh(spinlock_t *lock) 316void __lockfunc _spin_unlock_bh(spinlock_t *lock)
364{ 317{
365 spin_release(&lock->dep_map, 1, _RET_IP_); 318 __spin_unlock_bh(lock);
366 _raw_spin_unlock(lock);
367 preempt_enable_no_resched();
368 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
369} 319}
370EXPORT_SYMBOL(_spin_unlock_bh); 320EXPORT_SYMBOL(_spin_unlock_bh);
321#endif
371 322
323#ifndef _read_unlock_irqrestore
372void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
373{ 325{
374 rwlock_release(&lock->dep_map, 1, _RET_IP_); 326 __read_unlock_irqrestore(lock, flags);
375 _raw_read_unlock(lock);
376 local_irq_restore(flags);
377 preempt_enable();
378} 327}
379EXPORT_SYMBOL(_read_unlock_irqrestore); 328EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif
380 330
331#ifndef _read_unlock_irq
381void __lockfunc _read_unlock_irq(rwlock_t *lock) 332void __lockfunc _read_unlock_irq(rwlock_t *lock)
382{ 333{
383 rwlock_release(&lock->dep_map, 1, _RET_IP_); 334 __read_unlock_irq(lock);
384 _raw_read_unlock(lock);
385 local_irq_enable();
386 preempt_enable();
387} 335}
388EXPORT_SYMBOL(_read_unlock_irq); 336EXPORT_SYMBOL(_read_unlock_irq);
337#endif
389 338
339#ifndef _read_unlock_bh
390void __lockfunc _read_unlock_bh(rwlock_t *lock) 340void __lockfunc _read_unlock_bh(rwlock_t *lock)
391{ 341{
392 rwlock_release(&lock->dep_map, 1, _RET_IP_); 342 __read_unlock_bh(lock);
393 _raw_read_unlock(lock);
394 preempt_enable_no_resched();
395 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
396} 343}
397EXPORT_SYMBOL(_read_unlock_bh); 344EXPORT_SYMBOL(_read_unlock_bh);
345#endif
398 346
347#ifndef _write_unlock_irqrestore
399void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
400{ 349{
401 rwlock_release(&lock->dep_map, 1, _RET_IP_); 350 __write_unlock_irqrestore(lock, flags);
402 _raw_write_unlock(lock);
403 local_irq_restore(flags);
404 preempt_enable();
405} 351}
406EXPORT_SYMBOL(_write_unlock_irqrestore); 352EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif
407 354
355#ifndef _write_unlock_irq
408void __lockfunc _write_unlock_irq(rwlock_t *lock) 356void __lockfunc _write_unlock_irq(rwlock_t *lock)
409{ 357{
410 rwlock_release(&lock->dep_map, 1, _RET_IP_); 358 __write_unlock_irq(lock);
411 _raw_write_unlock(lock);
412 local_irq_enable();
413 preempt_enable();
414} 359}
415EXPORT_SYMBOL(_write_unlock_irq); 360EXPORT_SYMBOL(_write_unlock_irq);
361#endif
416 362
363#ifndef _write_unlock_bh
417void __lockfunc _write_unlock_bh(rwlock_t *lock) 364void __lockfunc _write_unlock_bh(rwlock_t *lock)
418{ 365{
419 rwlock_release(&lock->dep_map, 1, _RET_IP_); 366 __write_unlock_bh(lock);
420 _raw_write_unlock(lock);
421 preempt_enable_no_resched();
422 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423} 367}
424EXPORT_SYMBOL(_write_unlock_bh); 368EXPORT_SYMBOL(_write_unlock_bh);
369#endif
425 370
371#ifndef _spin_trylock_bh
426int __lockfunc _spin_trylock_bh(spinlock_t *lock) 372int __lockfunc _spin_trylock_bh(spinlock_t *lock)
427{ 373{
428 local_bh_disable(); 374 return __spin_trylock_bh(lock);
429 preempt_disable();
430 if (_raw_spin_trylock(lock)) {
431 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
432 return 1;
433 }
434
435 preempt_enable_no_resched();
436 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
437 return 0;
438} 375}
439EXPORT_SYMBOL(_spin_trylock_bh); 376EXPORT_SYMBOL(_spin_trylock_bh);
377#endif
440 378
441notrace int in_lock_functions(unsigned long addr) 379notrace int in_lock_functions(unsigned long addr)
442{ 380{
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 58be76017fd0..3125cff1c570 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -49,7 +49,6 @@
49#include <linux/acpi.h> 49#include <linux/acpi.h>
50#include <linux/reboot.h> 50#include <linux/reboot.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/security.h>
53#include <linux/slow-work.h> 52#include <linux/slow-work.h>
54#include <linux/perf_counter.h> 53#include <linux/perf_counter.h>
55 54
@@ -246,6 +245,14 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
246#endif 245#endif
247 246
248static struct ctl_table kern_table[] = { 247static struct ctl_table kern_table[] = {
248 {
249 .ctl_name = CTL_UNNUMBERED,
250 .procname = "sched_child_runs_first",
251 .data = &sysctl_sched_child_runs_first,
252 .maxlen = sizeof(unsigned int),
253 .mode = 0644,
254 .proc_handler = &proc_dointvec,
255 },
249#ifdef CONFIG_SCHED_DEBUG 256#ifdef CONFIG_SCHED_DEBUG
250 { 257 {
251 .ctl_name = CTL_UNNUMBERED, 258 .ctl_name = CTL_UNNUMBERED,
@@ -300,14 +307,6 @@ static struct ctl_table kern_table[] = {
300 }, 307 },
301 { 308 {
302 .ctl_name = CTL_UNNUMBERED, 309 .ctl_name = CTL_UNNUMBERED,
303 .procname = "sched_child_runs_first",
304 .data = &sysctl_sched_child_runs_first,
305 .maxlen = sizeof(unsigned int),
306 .mode = 0644,
307 .proc_handler = &proc_dointvec,
308 },
309 {
310 .ctl_name = CTL_UNNUMBERED,
311 .procname = "sched_features", 310 .procname = "sched_features",
312 .data = &sysctl_sched_features, 311 .data = &sysctl_sched_features,
313 .maxlen = sizeof(unsigned int), 312 .maxlen = sizeof(unsigned int),
@@ -332,6 +331,14 @@ static struct ctl_table kern_table[] = {
332 }, 331 },
333 { 332 {
334 .ctl_name = CTL_UNNUMBERED, 333 .ctl_name = CTL_UNNUMBERED,
334 .procname = "sched_time_avg",
335 .data = &sysctl_sched_time_avg,
336 .maxlen = sizeof(unsigned int),
337 .mode = 0644,
338 .proc_handler = &proc_dointvec,
339 },
340 {
341 .ctl_name = CTL_UNNUMBERED,
335 .procname = "timer_migration", 342 .procname = "timer_migration",
336 .data = &sysctl_timer_migration, 343 .data = &sysctl_timer_migration,
337 .maxlen = sizeof(unsigned int), 344 .maxlen = sizeof(unsigned int),
diff --git a/kernel/timer.c b/kernel/timer.c
index a7f07d5a6241..a3d25f415019 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1156,8 +1156,7 @@ void update_process_times(int user_tick)
1156 /* Note: this timer irq context must be accounted for as well. */ 1156 /* Note: this timer irq context must be accounted for as well. */
1157 account_process_tick(p, user_tick); 1157 account_process_tick(p, user_tick);
1158 run_local_timers(); 1158 run_local_timers();
1159 if (rcu_pending(cpu)) 1159 rcu_check_callbacks(cpu, user_tick);
1160 rcu_check_callbacks(cpu, user_tick);
1161 printk_tick(); 1160 printk_tick();
1162 scheduler_tick(); 1161 scheduler_tick();
1163 run_posix_cpu_timers(p); 1162 run_posix_cpu_timers(p);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 019f380fd764..1ea0d1234f4a 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -41,7 +41,7 @@ config HAVE_FTRACE_MCOUNT_RECORD
41config HAVE_HW_BRANCH_TRACER 41config HAVE_HW_BRANCH_TRACER
42 bool 42 bool
43 43
44config HAVE_FTRACE_SYSCALLS 44config HAVE_SYSCALL_TRACEPOINTS
45 bool 45 bool
46 46
47config TRACER_MAX_TRACE 47config TRACER_MAX_TRACE
@@ -60,9 +60,14 @@ config EVENT_TRACING
60 bool 60 bool
61 61
62config CONTEXT_SWITCH_TRACER 62config CONTEXT_SWITCH_TRACER
63 select MARKERS
64 bool 63 bool
65 64
65config RING_BUFFER_ALLOW_SWAP
66 bool
67 help
68 Allow the use of ring_buffer_swap_cpu.
69 Adds a very slight overhead to tracing when enabled.
70
66# All tracer options should select GENERIC_TRACER. For those options that are 71# All tracer options should select GENERIC_TRACER. For those options that are
67# enabled by all tracers (context switch and event tracer) they select TRACING. 72# enabled by all tracers (context switch and event tracer) they select TRACING.
68# This allows those options to appear when no other tracer is selected. But the 73# This allows those options to appear when no other tracer is selected. But the
@@ -147,6 +152,7 @@ config IRQSOFF_TRACER
147 select TRACE_IRQFLAGS 152 select TRACE_IRQFLAGS
148 select GENERIC_TRACER 153 select GENERIC_TRACER
149 select TRACER_MAX_TRACE 154 select TRACER_MAX_TRACE
155 select RING_BUFFER_ALLOW_SWAP
150 help 156 help
151 This option measures the time spent in irqs-off critical 157 This option measures the time spent in irqs-off critical
152 sections, with microsecond accuracy. 158 sections, with microsecond accuracy.
@@ -168,6 +174,7 @@ config PREEMPT_TRACER
168 depends on PREEMPT 174 depends on PREEMPT
169 select GENERIC_TRACER 175 select GENERIC_TRACER
170 select TRACER_MAX_TRACE 176 select TRACER_MAX_TRACE
177 select RING_BUFFER_ALLOW_SWAP
171 help 178 help
172 This option measures the time spent in preemption off critical 179 This option measures the time spent in preemption off critical
173 sections, with microsecond accuracy. 180 sections, with microsecond accuracy.
@@ -211,7 +218,7 @@ config ENABLE_DEFAULT_TRACERS
211 218
212config FTRACE_SYSCALLS 219config FTRACE_SYSCALLS
213 bool "Trace syscalls" 220 bool "Trace syscalls"
214 depends on HAVE_FTRACE_SYSCALLS 221 depends on HAVE_SYSCALL_TRACEPOINTS
215 select GENERIC_TRACER 222 select GENERIC_TRACER
216 select KALLSYMS 223 select KALLSYMS
217 help 224 help
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7a34cb563fec..3eb159c277c8 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -65,13 +65,15 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
65{ 65{
66 struct blk_io_trace *t; 66 struct blk_io_trace *t;
67 struct ring_buffer_event *event = NULL; 67 struct ring_buffer_event *event = NULL;
68 struct ring_buffer *buffer = NULL;
68 int pc = 0; 69 int pc = 0;
69 int cpu = smp_processor_id(); 70 int cpu = smp_processor_id();
70 bool blk_tracer = blk_tracer_enabled; 71 bool blk_tracer = blk_tracer_enabled;
71 72
72 if (blk_tracer) { 73 if (blk_tracer) {
74 buffer = blk_tr->buffer;
73 pc = preempt_count(); 75 pc = preempt_count();
74 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, 76 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
75 sizeof(*t) + len, 77 sizeof(*t) + len,
76 0, pc); 78 0, pc);
77 if (!event) 79 if (!event)
@@ -96,7 +98,7 @@ record_it:
96 memcpy((void *) t + sizeof(*t), data, len); 98 memcpy((void *) t + sizeof(*t), data, len);
97 99
98 if (blk_tracer) 100 if (blk_tracer)
99 trace_buffer_unlock_commit(blk_tr, event, 0, pc); 101 trace_buffer_unlock_commit(buffer, event, 0, pc);
100 } 102 }
101} 103}
102 104
@@ -179,6 +181,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
179{ 181{
180 struct task_struct *tsk = current; 182 struct task_struct *tsk = current;
181 struct ring_buffer_event *event = NULL; 183 struct ring_buffer_event *event = NULL;
184 struct ring_buffer *buffer = NULL;
182 struct blk_io_trace *t; 185 struct blk_io_trace *t;
183 unsigned long flags = 0; 186 unsigned long flags = 0;
184 unsigned long *sequence; 187 unsigned long *sequence;
@@ -204,8 +207,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
204 if (blk_tracer) { 207 if (blk_tracer) {
205 tracing_record_cmdline(current); 208 tracing_record_cmdline(current);
206 209
210 buffer = blk_tr->buffer;
207 pc = preempt_count(); 211 pc = preempt_count();
208 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, 212 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
209 sizeof(*t) + pdu_len, 213 sizeof(*t) + pdu_len,
210 0, pc); 214 0, pc);
211 if (!event) 215 if (!event)
@@ -252,7 +256,7 @@ record_it:
252 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); 256 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
253 257
254 if (blk_tracer) { 258 if (blk_tracer) {
255 trace_buffer_unlock_commit(blk_tr, event, 0, pc); 259 trace_buffer_unlock_commit(buffer, event, 0, pc);
256 return; 260 return;
257 } 261 }
258 } 262 }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 25edd5cc5935..8c804e24f96f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1016,71 +1016,35 @@ static int
1016__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1016__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1017{ 1017{
1018 unsigned long ftrace_addr; 1018 unsigned long ftrace_addr;
1019 unsigned long ip, fl; 1019 unsigned long flag = 0UL;
1020 1020
1021 ftrace_addr = (unsigned long)FTRACE_ADDR; 1021 ftrace_addr = (unsigned long)FTRACE_ADDR;
1022 1022
1023 ip = rec->ip;
1024
1025 /* 1023 /*
1026 * If this record is not to be traced and 1024 * If this record is not to be traced or we want to disable it,
1027 * it is not enabled then do nothing. 1025 * then disable it.
1028 * 1026 *
1029 * If this record is not to be traced and 1027 * If we want to enable it and filtering is off, then enable it.
1030 * it is enabled then disable it.
1031 * 1028 *
1029 * If we want to enable it and filtering is on, enable it only if
1030 * it's filtered
1032 */ 1031 */
1033 if (rec->flags & FTRACE_FL_NOTRACE) { 1032 if (enable && !(rec->flags & FTRACE_FL_NOTRACE)) {
1034 if (rec->flags & FTRACE_FL_ENABLED) 1033 if (!ftrace_filtered || (rec->flags & FTRACE_FL_FILTER))
1035 rec->flags &= ~FTRACE_FL_ENABLED; 1034 flag = FTRACE_FL_ENABLED;
1036 else 1035 }
1037 return 0;
1038
1039 } else if (ftrace_filtered && enable) {
1040 /*
1041 * Filtering is on:
1042 */
1043
1044 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
1045
1046 /* Record is filtered and enabled, do nothing */
1047 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
1048 return 0;
1049
1050 /* Record is not filtered or enabled, do nothing */
1051 if (!fl)
1052 return 0;
1053
1054 /* Record is not filtered but enabled, disable it */
1055 if (fl == FTRACE_FL_ENABLED)
1056 rec->flags &= ~FTRACE_FL_ENABLED;
1057 else
1058 /* Otherwise record is filtered but not enabled, enable it */
1059 rec->flags |= FTRACE_FL_ENABLED;
1060 } else {
1061 /* Disable or not filtered */
1062
1063 if (enable) {
1064 /* if record is enabled, do nothing */
1065 if (rec->flags & FTRACE_FL_ENABLED)
1066 return 0;
1067
1068 rec->flags |= FTRACE_FL_ENABLED;
1069
1070 } else {
1071 1036
1072 /* if record is not enabled, do nothing */ 1037 /* If the state of this record hasn't changed, then do nothing */
1073 if (!(rec->flags & FTRACE_FL_ENABLED)) 1038 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1074 return 0; 1039 return 0;
1075 1040
1076 rec->flags &= ~FTRACE_FL_ENABLED; 1041 if (flag) {
1077 } 1042 rec->flags |= FTRACE_FL_ENABLED;
1043 return ftrace_make_call(rec, ftrace_addr);
1078 } 1044 }
1079 1045
1080 if (rec->flags & FTRACE_FL_ENABLED) 1046 rec->flags &= ~FTRACE_FL_ENABLED;
1081 return ftrace_make_call(rec, ftrace_addr); 1047 return ftrace_make_nop(NULL, rec, ftrace_addr);
1082 else
1083 return ftrace_make_nop(NULL, rec, ftrace_addr);
1084} 1048}
1085 1049
1086static void ftrace_replace_code(int enable) 1050static void ftrace_replace_code(int enable)
@@ -1375,7 +1339,6 @@ struct ftrace_iterator {
1375 unsigned flags; 1339 unsigned flags;
1376 unsigned char buffer[FTRACE_BUFF_MAX+1]; 1340 unsigned char buffer[FTRACE_BUFF_MAX+1];
1377 unsigned buffer_idx; 1341 unsigned buffer_idx;
1378 unsigned filtered;
1379}; 1342};
1380 1343
1381static void * 1344static void *
@@ -1438,18 +1401,13 @@ static int t_hash_show(struct seq_file *m, void *v)
1438{ 1401{
1439 struct ftrace_func_probe *rec; 1402 struct ftrace_func_probe *rec;
1440 struct hlist_node *hnd = v; 1403 struct hlist_node *hnd = v;
1441 char str[KSYM_SYMBOL_LEN];
1442 1404
1443 rec = hlist_entry(hnd, struct ftrace_func_probe, node); 1405 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
1444 1406
1445 if (rec->ops->print) 1407 if (rec->ops->print)
1446 return rec->ops->print(m, rec->ip, rec->ops, rec->data); 1408 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1447 1409
1448 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 1410 seq_printf(m, "%pf:%pf", (void *)rec->ip, (void *)rec->ops->func);
1449 seq_printf(m, "%s:", str);
1450
1451 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
1452 seq_printf(m, "%s", str);
1453 1411
1454 if (rec->data) 1412 if (rec->data)
1455 seq_printf(m, ":%p", rec->data); 1413 seq_printf(m, ":%p", rec->data);
@@ -1547,7 +1505,6 @@ static int t_show(struct seq_file *m, void *v)
1547{ 1505{
1548 struct ftrace_iterator *iter = m->private; 1506 struct ftrace_iterator *iter = m->private;
1549 struct dyn_ftrace *rec = v; 1507 struct dyn_ftrace *rec = v;
1550 char str[KSYM_SYMBOL_LEN];
1551 1508
1552 if (iter->flags & FTRACE_ITER_HASH) 1509 if (iter->flags & FTRACE_ITER_HASH)
1553 return t_hash_show(m, v); 1510 return t_hash_show(m, v);
@@ -1560,9 +1517,7 @@ static int t_show(struct seq_file *m, void *v)
1560 if (!rec) 1517 if (!rec)
1561 return 0; 1518 return 0;
1562 1519
1563 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 1520 seq_printf(m, "%pf\n", (void *)rec->ip);
1564
1565 seq_printf(m, "%s\n", str);
1566 1521
1567 return 0; 1522 return 0;
1568} 1523}
@@ -1601,17 +1556,6 @@ ftrace_avail_open(struct inode *inode, struct file *file)
1601 return ret; 1556 return ret;
1602} 1557}
1603 1558
1604int ftrace_avail_release(struct inode *inode, struct file *file)
1605{
1606 struct seq_file *m = (struct seq_file *)file->private_data;
1607 struct ftrace_iterator *iter = m->private;
1608
1609 seq_release(inode, file);
1610 kfree(iter);
1611
1612 return 0;
1613}
1614
1615static int 1559static int
1616ftrace_failures_open(struct inode *inode, struct file *file) 1560ftrace_failures_open(struct inode *inode, struct file *file)
1617{ 1561{
@@ -2317,7 +2261,6 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2317 } 2261 }
2318 2262
2319 if (isspace(ch)) { 2263 if (isspace(ch)) {
2320 iter->filtered++;
2321 iter->buffer[iter->buffer_idx] = 0; 2264 iter->buffer[iter->buffer_idx] = 0;
2322 ret = ftrace_process_regex(iter->buffer, 2265 ret = ftrace_process_regex(iter->buffer,
2323 iter->buffer_idx, enable); 2266 iter->buffer_idx, enable);
@@ -2448,7 +2391,6 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
2448 iter = file->private_data; 2391 iter = file->private_data;
2449 2392
2450 if (iter->buffer_idx) { 2393 if (iter->buffer_idx) {
2451 iter->filtered++;
2452 iter->buffer[iter->buffer_idx] = 0; 2394 iter->buffer[iter->buffer_idx] = 0;
2453 ftrace_match_records(iter->buffer, iter->buffer_idx, enable); 2395 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
2454 } 2396 }
@@ -2479,14 +2421,14 @@ static const struct file_operations ftrace_avail_fops = {
2479 .open = ftrace_avail_open, 2421 .open = ftrace_avail_open,
2480 .read = seq_read, 2422 .read = seq_read,
2481 .llseek = seq_lseek, 2423 .llseek = seq_lseek,
2482 .release = ftrace_avail_release, 2424 .release = seq_release_private,
2483}; 2425};
2484 2426
2485static const struct file_operations ftrace_failures_fops = { 2427static const struct file_operations ftrace_failures_fops = {
2486 .open = ftrace_failures_open, 2428 .open = ftrace_failures_open,
2487 .read = seq_read, 2429 .read = seq_read,
2488 .llseek = seq_lseek, 2430 .llseek = seq_lseek,
2489 .release = ftrace_avail_release, 2431 .release = seq_release_private,
2490}; 2432};
2491 2433
2492static const struct file_operations ftrace_filter_fops = { 2434static const struct file_operations ftrace_filter_fops = {
@@ -2548,7 +2490,6 @@ static void g_stop(struct seq_file *m, void *p)
2548static int g_show(struct seq_file *m, void *v) 2490static int g_show(struct seq_file *m, void *v)
2549{ 2491{
2550 unsigned long *ptr = v; 2492 unsigned long *ptr = v;
2551 char str[KSYM_SYMBOL_LEN];
2552 2493
2553 if (!ptr) 2494 if (!ptr)
2554 return 0; 2495 return 0;
@@ -2558,9 +2499,7 @@ static int g_show(struct seq_file *m, void *v)
2558 return 0; 2499 return 0;
2559 } 2500 }
2560 2501
2561 kallsyms_lookup(*ptr, NULL, NULL, NULL, str); 2502 seq_printf(m, "%pf\n", v);
2562
2563 seq_printf(m, "%s\n", str);
2564 2503
2565 return 0; 2504 return 0;
2566} 2505}
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 1edaa9516e81..81b1645c8549 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -183,11 +183,9 @@ static void kmemtrace_stop_probes(void)
183 183
184static int kmem_trace_init(struct trace_array *tr) 184static int kmem_trace_init(struct trace_array *tr)
185{ 185{
186 int cpu;
187 kmemtrace_array = tr; 186 kmemtrace_array = tr;
188 187
189 for_each_cpu(cpu, cpu_possible_mask) 188 tracing_reset_online_cpus(tr);
190 tracing_reset(tr, cpu);
191 189
192 kmemtrace_start_probes(); 190 kmemtrace_start_probes();
193 191
@@ -239,12 +237,52 @@ struct kmemtrace_user_event_alloc {
239}; 237};
240 238
241static enum print_line_t 239static enum print_line_t
242kmemtrace_print_alloc_user(struct trace_iterator *iter, 240kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
243 struct kmemtrace_alloc_entry *entry)
244{ 241{
245 struct kmemtrace_user_event_alloc *ev_alloc;
246 struct trace_seq *s = &iter->seq; 242 struct trace_seq *s = &iter->seq;
243 struct kmemtrace_alloc_entry *entry;
244 int ret;
245
246 trace_assign_type(entry, iter->ent);
247
248 ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu "
249 "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
250 entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr,
251 (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc,
252 (unsigned long)entry->gfp_flags, entry->node);
253
254 if (!ret)
255 return TRACE_TYPE_PARTIAL_LINE;
256 return TRACE_TYPE_HANDLED;
257}
258
259static enum print_line_t
260kmemtrace_print_free(struct trace_iterator *iter, int flags)
261{
262 struct trace_seq *s = &iter->seq;
263 struct kmemtrace_free_entry *entry;
264 int ret;
265
266 trace_assign_type(entry, iter->ent);
267
268 ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n",
269 entry->type_id, (void *)entry->call_site,
270 (unsigned long)entry->ptr);
271
272 if (!ret)
273 return TRACE_TYPE_PARTIAL_LINE;
274 return TRACE_TYPE_HANDLED;
275}
276
277static enum print_line_t
278kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
279{
280 struct trace_seq *s = &iter->seq;
281 struct kmemtrace_alloc_entry *entry;
247 struct kmemtrace_user_event *ev; 282 struct kmemtrace_user_event *ev;
283 struct kmemtrace_user_event_alloc *ev_alloc;
284
285 trace_assign_type(entry, iter->ent);
248 286
249 ev = trace_seq_reserve(s, sizeof(*ev)); 287 ev = trace_seq_reserve(s, sizeof(*ev));
250 if (!ev) 288 if (!ev)
@@ -271,12 +309,14 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter,
271} 309}
272 310
273static enum print_line_t 311static enum print_line_t
274kmemtrace_print_free_user(struct trace_iterator *iter, 312kmemtrace_print_free_user(struct trace_iterator *iter, int flags)
275 struct kmemtrace_free_entry *entry)
276{ 313{
277 struct trace_seq *s = &iter->seq; 314 struct trace_seq *s = &iter->seq;
315 struct kmemtrace_free_entry *entry;
278 struct kmemtrace_user_event *ev; 316 struct kmemtrace_user_event *ev;
279 317
318 trace_assign_type(entry, iter->ent);
319
280 ev = trace_seq_reserve(s, sizeof(*ev)); 320 ev = trace_seq_reserve(s, sizeof(*ev));
281 if (!ev) 321 if (!ev)
282 return TRACE_TYPE_PARTIAL_LINE; 322 return TRACE_TYPE_PARTIAL_LINE;
@@ -294,12 +334,14 @@ kmemtrace_print_free_user(struct trace_iterator *iter,
294 334
295/* The two other following provide a more minimalistic output */ 335/* The two other following provide a more minimalistic output */
296static enum print_line_t 336static enum print_line_t
297kmemtrace_print_alloc_compress(struct trace_iterator *iter, 337kmemtrace_print_alloc_compress(struct trace_iterator *iter)
298 struct kmemtrace_alloc_entry *entry)
299{ 338{
339 struct kmemtrace_alloc_entry *entry;
300 struct trace_seq *s = &iter->seq; 340 struct trace_seq *s = &iter->seq;
301 int ret; 341 int ret;
302 342
343 trace_assign_type(entry, iter->ent);
344
303 /* Alloc entry */ 345 /* Alloc entry */
304 ret = trace_seq_printf(s, " + "); 346 ret = trace_seq_printf(s, " + ");
305 if (!ret) 347 if (!ret)
@@ -345,29 +387,24 @@ kmemtrace_print_alloc_compress(struct trace_iterator *iter,
345 if (!ret) 387 if (!ret)
346 return TRACE_TYPE_PARTIAL_LINE; 388 return TRACE_TYPE_PARTIAL_LINE;
347 389
348 /* Node */ 390 /* Node and call site*/
349 ret = trace_seq_printf(s, "%4d ", entry->node); 391 ret = trace_seq_printf(s, "%4d %pf\n", entry->node,
350 if (!ret) 392 (void *)entry->call_site);
351 return TRACE_TYPE_PARTIAL_LINE;
352
353 /* Call site */
354 ret = seq_print_ip_sym(s, entry->call_site, 0);
355 if (!ret) 393 if (!ret)
356 return TRACE_TYPE_PARTIAL_LINE; 394 return TRACE_TYPE_PARTIAL_LINE;
357 395
358 if (!trace_seq_printf(s, "\n"))
359 return TRACE_TYPE_PARTIAL_LINE;
360
361 return TRACE_TYPE_HANDLED; 396 return TRACE_TYPE_HANDLED;
362} 397}
363 398
364static enum print_line_t 399static enum print_line_t
365kmemtrace_print_free_compress(struct trace_iterator *iter, 400kmemtrace_print_free_compress(struct trace_iterator *iter)
366 struct kmemtrace_free_entry *entry)
367{ 401{
402 struct kmemtrace_free_entry *entry;
368 struct trace_seq *s = &iter->seq; 403 struct trace_seq *s = &iter->seq;
369 int ret; 404 int ret;
370 405
406 trace_assign_type(entry, iter->ent);
407
371 /* Free entry */ 408 /* Free entry */
372 ret = trace_seq_printf(s, " - "); 409 ret = trace_seq_printf(s, " - ");
373 if (!ret) 410 if (!ret)
@@ -401,19 +438,11 @@ kmemtrace_print_free_compress(struct trace_iterator *iter,
401 if (!ret) 438 if (!ret)
402 return TRACE_TYPE_PARTIAL_LINE; 439 return TRACE_TYPE_PARTIAL_LINE;
403 440
404 /* Skip node */ 441 /* Skip node and print call site*/
405 ret = trace_seq_printf(s, " "); 442 ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site);
406 if (!ret) 443 if (!ret)
407 return TRACE_TYPE_PARTIAL_LINE; 444 return TRACE_TYPE_PARTIAL_LINE;
408 445
409 /* Call site */
410 ret = seq_print_ip_sym(s, entry->call_site, 0);
411 if (!ret)
412 return TRACE_TYPE_PARTIAL_LINE;
413
414 if (!trace_seq_printf(s, "\n"))
415 return TRACE_TYPE_PARTIAL_LINE;
416
417 return TRACE_TYPE_HANDLED; 446 return TRACE_TYPE_HANDLED;
418} 447}
419 448
@@ -421,32 +450,31 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
421{ 450{
422 struct trace_entry *entry = iter->ent; 451 struct trace_entry *entry = iter->ent;
423 452
424 switch (entry->type) { 453 if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
425 case TRACE_KMEM_ALLOC: { 454 return TRACE_TYPE_UNHANDLED;
426 struct kmemtrace_alloc_entry *field;
427
428 trace_assign_type(field, entry);
429 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
430 return kmemtrace_print_alloc_compress(iter, field);
431 else
432 return kmemtrace_print_alloc_user(iter, field);
433 }
434
435 case TRACE_KMEM_FREE: {
436 struct kmemtrace_free_entry *field;
437
438 trace_assign_type(field, entry);
439 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
440 return kmemtrace_print_free_compress(iter, field);
441 else
442 return kmemtrace_print_free_user(iter, field);
443 }
444 455
456 switch (entry->type) {
457 case TRACE_KMEM_ALLOC:
458 return kmemtrace_print_alloc_compress(iter);
459 case TRACE_KMEM_FREE:
460 return kmemtrace_print_free_compress(iter);
445 default: 461 default:
446 return TRACE_TYPE_UNHANDLED; 462 return TRACE_TYPE_UNHANDLED;
447 } 463 }
448} 464}
449 465
466static struct trace_event kmem_trace_alloc = {
467 .type = TRACE_KMEM_ALLOC,
468 .trace = kmemtrace_print_alloc,
469 .binary = kmemtrace_print_alloc_user,
470};
471
472static struct trace_event kmem_trace_free = {
473 .type = TRACE_KMEM_FREE,
474 .trace = kmemtrace_print_free,
475 .binary = kmemtrace_print_free_user,
476};
477
450static struct tracer kmem_tracer __read_mostly = { 478static struct tracer kmem_tracer __read_mostly = {
451 .name = "kmemtrace", 479 .name = "kmemtrace",
452 .init = kmem_trace_init, 480 .init = kmem_trace_init,
@@ -463,6 +491,21 @@ void kmemtrace_init(void)
463 491
464static int __init init_kmem_tracer(void) 492static int __init init_kmem_tracer(void)
465{ 493{
466 return register_tracer(&kmem_tracer); 494 if (!register_ftrace_event(&kmem_trace_alloc)) {
495 pr_warning("Warning: could not register kmem events\n");
496 return 1;
497 }
498
499 if (!register_ftrace_event(&kmem_trace_free)) {
500 pr_warning("Warning: could not register kmem events\n");
501 return 1;
502 }
503
504 if (!register_tracer(&kmem_tracer)) {
505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1;
507 }
508
509 return 0;
467} 510}
468device_initcall(init_kmem_tracer); 511device_initcall(init_kmem_tracer);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a330513d96ce..454e74e718cf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -218,17 +218,12 @@ enum {
218 218
219static inline int rb_null_event(struct ring_buffer_event *event) 219static inline int rb_null_event(struct ring_buffer_event *event)
220{ 220{
221 return event->type_len == RINGBUF_TYPE_PADDING 221 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
222 && event->time_delta == 0;
223}
224
225static inline int rb_discarded_event(struct ring_buffer_event *event)
226{
227 return event->type_len == RINGBUF_TYPE_PADDING && event->time_delta;
228} 222}
229 223
230static void rb_event_set_padding(struct ring_buffer_event *event) 224static void rb_event_set_padding(struct ring_buffer_event *event)
231{ 225{
226 /* padding has a NULL time_delta */
232 event->type_len = RINGBUF_TYPE_PADDING; 227 event->type_len = RINGBUF_TYPE_PADDING;
233 event->time_delta = 0; 228 event->time_delta = 0;
234} 229}
@@ -322,6 +317,14 @@ struct buffer_data_page {
322 unsigned char data[]; /* data of buffer page */ 317 unsigned char data[]; /* data of buffer page */
323}; 318};
324 319
320/*
321 * Note, the buffer_page list must be first. The buffer pages
322 * are allocated in cache lines, which means that each buffer
323 * page will be at the beginning of a cache line, and thus
324 * the least significant bits will be zero. We use this to
325 * add flags in the list struct pointers, to make the ring buffer
326 * lockless.
327 */
325struct buffer_page { 328struct buffer_page {
326 struct list_head list; /* list of buffer pages */ 329 struct list_head list; /* list of buffer pages */
327 local_t write; /* index for next write */ 330 local_t write; /* index for next write */
@@ -330,6 +333,21 @@ struct buffer_page {
330 struct buffer_data_page *page; /* Actual data page */ 333 struct buffer_data_page *page; /* Actual data page */
331}; 334};
332 335
336/*
337 * The buffer page counters, write and entries, must be reset
338 * atomically when crossing page boundaries. To synchronize this
339 * update, two counters are inserted into the number. One is
340 * the actual counter for the write position or count on the page.
341 *
342 * The other is a counter of updaters. Before an update happens
343 * the update partition of the counter is incremented. This will
344 * allow the updater to update the counter atomically.
345 *
346 * The counter is 20 bits, and the state data is 12.
347 */
348#define RB_WRITE_MASK 0xfffff
349#define RB_WRITE_INTCNT (1 << 20)
350
333static void rb_init_page(struct buffer_data_page *bpage) 351static void rb_init_page(struct buffer_data_page *bpage)
334{ 352{
335 local_set(&bpage->commit, 0); 353 local_set(&bpage->commit, 0);
@@ -403,21 +421,20 @@ int ring_buffer_print_page_header(struct trace_seq *s)
403struct ring_buffer_per_cpu { 421struct ring_buffer_per_cpu {
404 int cpu; 422 int cpu;
405 struct ring_buffer *buffer; 423 struct ring_buffer *buffer;
406 spinlock_t reader_lock; /* serialize readers */ 424 spinlock_t reader_lock; /* serialize readers */
407 raw_spinlock_t lock; 425 raw_spinlock_t lock;
408 struct lock_class_key lock_key; 426 struct lock_class_key lock_key;
409 struct list_head pages; 427 struct list_head *pages;
410 struct buffer_page *head_page; /* read from head */ 428 struct buffer_page *head_page; /* read from head */
411 struct buffer_page *tail_page; /* write to tail */ 429 struct buffer_page *tail_page; /* write to tail */
412 struct buffer_page *commit_page; /* committed pages */ 430 struct buffer_page *commit_page; /* committed pages */
413 struct buffer_page *reader_page; 431 struct buffer_page *reader_page;
414 unsigned long nmi_dropped; 432 local_t commit_overrun;
415 unsigned long commit_overrun; 433 local_t overrun;
416 unsigned long overrun;
417 unsigned long read;
418 local_t entries; 434 local_t entries;
419 local_t committing; 435 local_t committing;
420 local_t commits; 436 local_t commits;
437 unsigned long read;
421 u64 write_stamp; 438 u64 write_stamp;
422 u64 read_stamp; 439 u64 read_stamp;
423 atomic_t record_disabled; 440 atomic_t record_disabled;
@@ -450,14 +467,19 @@ struct ring_buffer_iter {
450}; 467};
451 468
452/* buffer may be either ring_buffer or ring_buffer_per_cpu */ 469/* buffer may be either ring_buffer or ring_buffer_per_cpu */
453#define RB_WARN_ON(buffer, cond) \ 470#define RB_WARN_ON(b, cond) \
454 ({ \ 471 ({ \
455 int _____ret = unlikely(cond); \ 472 int _____ret = unlikely(cond); \
456 if (_____ret) { \ 473 if (_____ret) { \
457 atomic_inc(&buffer->record_disabled); \ 474 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
458 WARN_ON(1); \ 475 struct ring_buffer_per_cpu *__b = \
459 } \ 476 (void *)b; \
460 _____ret; \ 477 atomic_inc(&__b->buffer->record_disabled); \
478 } else \
479 atomic_inc(&b->record_disabled); \
480 WARN_ON(1); \
481 } \
482 _____ret; \
461 }) 483 })
462 484
463/* Up this if you want to test the TIME_EXTENTS and normalization */ 485/* Up this if you want to test the TIME_EXTENTS and normalization */
@@ -489,6 +511,390 @@ void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
489} 511}
490EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); 512EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
491 513
514/*
515 * Making the ring buffer lockless makes things tricky.
516 * Although writes only happen on the CPU that they are on,
517 * and they only need to worry about interrupts. Reads can
518 * happen on any CPU.
519 *
520 * The reader page is always off the ring buffer, but when the
521 * reader finishes with a page, it needs to swap its page with
522 * a new one from the buffer. The reader needs to take from
523 * the head (writes go to the tail). But if a writer is in overwrite
524 * mode and wraps, it must push the head page forward.
525 *
526 * Here lies the problem.
527 *
528 * The reader must be careful to replace only the head page, and
529 * not another one. As described at the top of the file in the
530 * ASCII art, the reader sets its old page to point to the next
531 * page after head. It then sets the page after head to point to
532 * the old reader page. But if the writer moves the head page
533 * during this operation, the reader could end up with the tail.
534 *
535 * We use cmpxchg to help prevent this race. We also do something
536 * special with the page before head. We set the LSB to 1.
537 *
538 * When the writer must push the page forward, it will clear the
539 * bit that points to the head page, move the head, and then set
540 * the bit that points to the new head page.
541 *
542 * We also don't want an interrupt coming in and moving the head
543 * page on another writer. Thus we use the second LSB to catch
544 * that too. Thus:
545 *
546 * head->list->prev->next bit 1 bit 0
547 * ------- -------
548 * Normal page 0 0
549 * Points to head page 0 1
550 * New head page 1 0
551 *
552 * Note we can not trust the prev pointer of the head page, because:
553 *
554 * +----+ +-----+ +-----+
555 * | |------>| T |---X--->| N |
556 * | |<------| | | |
557 * +----+ +-----+ +-----+
558 * ^ ^ |
559 * | +-----+ | |
560 * +----------| R |----------+ |
561 * | |<-----------+
562 * +-----+
563 *
564 * Key: ---X--> HEAD flag set in pointer
565 * T Tail page
566 * R Reader page
567 * N Next page
568 *
569 * (see __rb_reserve_next() to see where this happens)
570 *
571 * What the above shows is that the reader just swapped out
572 * the reader page with a page in the buffer, but before it
573 * could make the new header point back to the new page added
574 * it was preempted by a writer. The writer moved forward onto
575 * the new page added by the reader and is about to move forward
576 * again.
577 *
578 * You can see, it is legitimate for the previous pointer of
579 * the head (or any page) not to point back to itself. But only
580 * temporarially.
581 */
582
583#define RB_PAGE_NORMAL 0UL
584#define RB_PAGE_HEAD 1UL
585#define RB_PAGE_UPDATE 2UL
586
587
588#define RB_FLAG_MASK 3UL
589
590/* PAGE_MOVED is not part of the mask */
591#define RB_PAGE_MOVED 4UL
592
593/*
594 * rb_list_head - remove any bit
595 */
596static struct list_head *rb_list_head(struct list_head *list)
597{
598 unsigned long val = (unsigned long)list;
599
600 return (struct list_head *)(val & ~RB_FLAG_MASK);
601}
602
603/*
604 * rb_is_head_page - test if the give page is the head page
605 *
606 * Because the reader may move the head_page pointer, we can
607 * not trust what the head page is (it may be pointing to
608 * the reader page). But if the next page is a header page,
609 * its flags will be non zero.
610 */
611static int inline
612rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
613 struct buffer_page *page, struct list_head *list)
614{
615 unsigned long val;
616
617 val = (unsigned long)list->next;
618
619 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
620 return RB_PAGE_MOVED;
621
622 return val & RB_FLAG_MASK;
623}
624
625/*
626 * rb_is_reader_page
627 *
628 * The unique thing about the reader page, is that, if the
629 * writer is ever on it, the previous pointer never points
630 * back to the reader page.
631 */
632static int rb_is_reader_page(struct buffer_page *page)
633{
634 struct list_head *list = page->list.prev;
635
636 return rb_list_head(list->next) != &page->list;
637}
638
639/*
640 * rb_set_list_to_head - set a list_head to be pointing to head.
641 */
642static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
643 struct list_head *list)
644{
645 unsigned long *ptr;
646
647 ptr = (unsigned long *)&list->next;
648 *ptr |= RB_PAGE_HEAD;
649 *ptr &= ~RB_PAGE_UPDATE;
650}
651
652/*
653 * rb_head_page_activate - sets up head page
654 */
655static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
656{
657 struct buffer_page *head;
658
659 head = cpu_buffer->head_page;
660 if (!head)
661 return;
662
663 /*
664 * Set the previous list pointer to have the HEAD flag.
665 */
666 rb_set_list_to_head(cpu_buffer, head->list.prev);
667}
668
669static void rb_list_head_clear(struct list_head *list)
670{
671 unsigned long *ptr = (unsigned long *)&list->next;
672
673 *ptr &= ~RB_FLAG_MASK;
674}
675
676/*
677 * rb_head_page_dactivate - clears head page ptr (for free list)
678 */
679static void
680rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
681{
682 struct list_head *hd;
683
684 /* Go through the whole list and clear any pointers found. */
685 rb_list_head_clear(cpu_buffer->pages);
686
687 list_for_each(hd, cpu_buffer->pages)
688 rb_list_head_clear(hd);
689}
690
691static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
692 struct buffer_page *head,
693 struct buffer_page *prev,
694 int old_flag, int new_flag)
695{
696 struct list_head *list;
697 unsigned long val = (unsigned long)&head->list;
698 unsigned long ret;
699
700 list = &prev->list;
701
702 val &= ~RB_FLAG_MASK;
703
704 ret = (unsigned long)cmpxchg(&list->next,
705 val | old_flag, val | new_flag);
706
707 /* check if the reader took the page */
708 if ((ret & ~RB_FLAG_MASK) != val)
709 return RB_PAGE_MOVED;
710
711 return ret & RB_FLAG_MASK;
712}
713
714static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
715 struct buffer_page *head,
716 struct buffer_page *prev,
717 int old_flag)
718{
719 return rb_head_page_set(cpu_buffer, head, prev,
720 old_flag, RB_PAGE_UPDATE);
721}
722
723static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
724 struct buffer_page *head,
725 struct buffer_page *prev,
726 int old_flag)
727{
728 return rb_head_page_set(cpu_buffer, head, prev,
729 old_flag, RB_PAGE_HEAD);
730}
731
732static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
733 struct buffer_page *head,
734 struct buffer_page *prev,
735 int old_flag)
736{
737 return rb_head_page_set(cpu_buffer, head, prev,
738 old_flag, RB_PAGE_NORMAL);
739}
740
741static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
742 struct buffer_page **bpage)
743{
744 struct list_head *p = rb_list_head((*bpage)->list.next);
745
746 *bpage = list_entry(p, struct buffer_page, list);
747}
748
749static struct buffer_page *
750rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
751{
752 struct buffer_page *head;
753 struct buffer_page *page;
754 struct list_head *list;
755 int i;
756
757 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
758 return NULL;
759
760 /* sanity check */
761 list = cpu_buffer->pages;
762 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
763 return NULL;
764
765 page = head = cpu_buffer->head_page;
766 /*
767 * It is possible that the writer moves the header behind
768 * where we started, and we miss in one loop.
769 * A second loop should grab the header, but we'll do
770 * three loops just because I'm paranoid.
771 */
772 for (i = 0; i < 3; i++) {
773 do {
774 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
775 cpu_buffer->head_page = page;
776 return page;
777 }
778 rb_inc_page(cpu_buffer, &page);
779 } while (page != head);
780 }
781
782 RB_WARN_ON(cpu_buffer, 1);
783
784 return NULL;
785}
786
787static int rb_head_page_replace(struct buffer_page *old,
788 struct buffer_page *new)
789{
790 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
791 unsigned long val;
792 unsigned long ret;
793
794 val = *ptr & ~RB_FLAG_MASK;
795 val |= RB_PAGE_HEAD;
796
797 ret = cmpxchg(ptr, val, &new->list);
798
799 return ret == val;
800}
801
802/*
803 * rb_tail_page_update - move the tail page forward
804 *
805 * Returns 1 if moved tail page, 0 if someone else did.
806 */
807static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
808 struct buffer_page *tail_page,
809 struct buffer_page *next_page)
810{
811 struct buffer_page *old_tail;
812 unsigned long old_entries;
813 unsigned long old_write;
814 int ret = 0;
815
816 /*
817 * The tail page now needs to be moved forward.
818 *
819 * We need to reset the tail page, but without messing
820 * with possible erasing of data brought in by interrupts
821 * that have moved the tail page and are currently on it.
822 *
823 * We add a counter to the write field to denote this.
824 */
825 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
826 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
827
828 /*
829 * Just make sure we have seen our old_write and synchronize
830 * with any interrupts that come in.
831 */
832 barrier();
833
834 /*
835 * If the tail page is still the same as what we think
836 * it is, then it is up to us to update the tail
837 * pointer.
838 */
839 if (tail_page == cpu_buffer->tail_page) {
840 /* Zero the write counter */
841 unsigned long val = old_write & ~RB_WRITE_MASK;
842 unsigned long eval = old_entries & ~RB_WRITE_MASK;
843
844 /*
845 * This will only succeed if an interrupt did
846 * not come in and change it. In which case, we
847 * do not want to modify it.
848 *
849 * We add (void) to let the compiler know that we do not care
850 * about the return value of these functions. We use the
851 * cmpxchg to only update if an interrupt did not already
852 * do it for us. If the cmpxchg fails, we don't care.
853 */
854 (void)local_cmpxchg(&next_page->write, old_write, val);
855 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
856
857 /*
858 * No need to worry about races with clearing out the commit.
859 * it only can increment when a commit takes place. But that
860 * only happens in the outer most nested commit.
861 */
862 local_set(&next_page->page->commit, 0);
863
864 old_tail = cmpxchg(&cpu_buffer->tail_page,
865 tail_page, next_page);
866
867 if (old_tail == tail_page)
868 ret = 1;
869 }
870
871 return ret;
872}
873
874static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
875 struct buffer_page *bpage)
876{
877 unsigned long val = (unsigned long)bpage;
878
879 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
880 return 1;
881
882 return 0;
883}
884
885/**
886 * rb_check_list - make sure a pointer to a list has the last bits zero
887 */
888static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
889 struct list_head *list)
890{
891 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
892 return 1;
893 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
894 return 1;
895 return 0;
896}
897
492/** 898/**
493 * check_pages - integrity check of buffer pages 899 * check_pages - integrity check of buffer pages
494 * @cpu_buffer: CPU buffer with pages to test 900 * @cpu_buffer: CPU buffer with pages to test
@@ -498,14 +904,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
498 */ 904 */
499static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) 905static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
500{ 906{
501 struct list_head *head = &cpu_buffer->pages; 907 struct list_head *head = cpu_buffer->pages;
502 struct buffer_page *bpage, *tmp; 908 struct buffer_page *bpage, *tmp;
503 909
910 rb_head_page_deactivate(cpu_buffer);
911
504 if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) 912 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
505 return -1; 913 return -1;
506 if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) 914 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
507 return -1; 915 return -1;
508 916
917 if (rb_check_list(cpu_buffer, head))
918 return -1;
919
509 list_for_each_entry_safe(bpage, tmp, head, list) { 920 list_for_each_entry_safe(bpage, tmp, head, list) {
510 if (RB_WARN_ON(cpu_buffer, 921 if (RB_WARN_ON(cpu_buffer,
511 bpage->list.next->prev != &bpage->list)) 922 bpage->list.next->prev != &bpage->list))
@@ -513,25 +924,33 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
513 if (RB_WARN_ON(cpu_buffer, 924 if (RB_WARN_ON(cpu_buffer,
514 bpage->list.prev->next != &bpage->list)) 925 bpage->list.prev->next != &bpage->list))
515 return -1; 926 return -1;
927 if (rb_check_list(cpu_buffer, &bpage->list))
928 return -1;
516 } 929 }
517 930
931 rb_head_page_activate(cpu_buffer);
932
518 return 0; 933 return 0;
519} 934}
520 935
521static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 936static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
522 unsigned nr_pages) 937 unsigned nr_pages)
523{ 938{
524 struct list_head *head = &cpu_buffer->pages;
525 struct buffer_page *bpage, *tmp; 939 struct buffer_page *bpage, *tmp;
526 unsigned long addr; 940 unsigned long addr;
527 LIST_HEAD(pages); 941 LIST_HEAD(pages);
528 unsigned i; 942 unsigned i;
529 943
944 WARN_ON(!nr_pages);
945
530 for (i = 0; i < nr_pages; i++) { 946 for (i = 0; i < nr_pages; i++) {
531 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 947 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
532 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); 948 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
533 if (!bpage) 949 if (!bpage)
534 goto free_pages; 950 goto free_pages;
951
952 rb_check_bpage(cpu_buffer, bpage);
953
535 list_add(&bpage->list, &pages); 954 list_add(&bpage->list, &pages);
536 955
537 addr = __get_free_page(GFP_KERNEL); 956 addr = __get_free_page(GFP_KERNEL);
@@ -541,7 +960,13 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
541 rb_init_page(bpage->page); 960 rb_init_page(bpage->page);
542 } 961 }
543 962
544 list_splice(&pages, head); 963 /*
964 * The ring buffer page list is a circular list that does not
965 * start and end with a list head. All page list items point to
966 * other pages.
967 */
968 cpu_buffer->pages = pages.next;
969 list_del(&pages);
545 970
546 rb_check_pages(cpu_buffer); 971 rb_check_pages(cpu_buffer);
547 972
@@ -573,13 +998,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
573 spin_lock_init(&cpu_buffer->reader_lock); 998 spin_lock_init(&cpu_buffer->reader_lock);
574 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 999 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
575 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1000 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
576 INIT_LIST_HEAD(&cpu_buffer->pages);
577 1001
578 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1002 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
579 GFP_KERNEL, cpu_to_node(cpu)); 1003 GFP_KERNEL, cpu_to_node(cpu));
580 if (!bpage) 1004 if (!bpage)
581 goto fail_free_buffer; 1005 goto fail_free_buffer;
582 1006
1007 rb_check_bpage(cpu_buffer, bpage);
1008
583 cpu_buffer->reader_page = bpage; 1009 cpu_buffer->reader_page = bpage;
584 addr = __get_free_page(GFP_KERNEL); 1010 addr = __get_free_page(GFP_KERNEL);
585 if (!addr) 1011 if (!addr)
@@ -594,9 +1020,11 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
594 goto fail_free_reader; 1020 goto fail_free_reader;
595 1021
596 cpu_buffer->head_page 1022 cpu_buffer->head_page
597 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 1023 = list_entry(cpu_buffer->pages, struct buffer_page, list);
598 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; 1024 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
599 1025
1026 rb_head_page_activate(cpu_buffer);
1027
600 return cpu_buffer; 1028 return cpu_buffer;
601 1029
602 fail_free_reader: 1030 fail_free_reader:
@@ -609,15 +1037,22 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
609 1037
610static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) 1038static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
611{ 1039{
612 struct list_head *head = &cpu_buffer->pages; 1040 struct list_head *head = cpu_buffer->pages;
613 struct buffer_page *bpage, *tmp; 1041 struct buffer_page *bpage, *tmp;
614 1042
615 free_buffer_page(cpu_buffer->reader_page); 1043 free_buffer_page(cpu_buffer->reader_page);
616 1044
617 list_for_each_entry_safe(bpage, tmp, head, list) { 1045 rb_head_page_deactivate(cpu_buffer);
618 list_del_init(&bpage->list); 1046
1047 if (head) {
1048 list_for_each_entry_safe(bpage, tmp, head, list) {
1049 list_del_init(&bpage->list);
1050 free_buffer_page(bpage);
1051 }
1052 bpage = list_entry(head, struct buffer_page, list);
619 free_buffer_page(bpage); 1053 free_buffer_page(bpage);
620 } 1054 }
1055
621 kfree(cpu_buffer); 1056 kfree(cpu_buffer);
622} 1057}
623 1058
@@ -760,15 +1195,17 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
760 atomic_inc(&cpu_buffer->record_disabled); 1195 atomic_inc(&cpu_buffer->record_disabled);
761 synchronize_sched(); 1196 synchronize_sched();
762 1197
1198 rb_head_page_deactivate(cpu_buffer);
1199
763 for (i = 0; i < nr_pages; i++) { 1200 for (i = 0; i < nr_pages; i++) {
764 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 1201 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
765 return; 1202 return;
766 p = cpu_buffer->pages.next; 1203 p = cpu_buffer->pages->next;
767 bpage = list_entry(p, struct buffer_page, list); 1204 bpage = list_entry(p, struct buffer_page, list);
768 list_del_init(&bpage->list); 1205 list_del_init(&bpage->list);
769 free_buffer_page(bpage); 1206 free_buffer_page(bpage);
770 } 1207 }
771 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) 1208 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
772 return; 1209 return;
773 1210
774 rb_reset_cpu(cpu_buffer); 1211 rb_reset_cpu(cpu_buffer);
@@ -790,15 +1227,19 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
790 atomic_inc(&cpu_buffer->record_disabled); 1227 atomic_inc(&cpu_buffer->record_disabled);
791 synchronize_sched(); 1228 synchronize_sched();
792 1229
1230 spin_lock_irq(&cpu_buffer->reader_lock);
1231 rb_head_page_deactivate(cpu_buffer);
1232
793 for (i = 0; i < nr_pages; i++) { 1233 for (i = 0; i < nr_pages; i++) {
794 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1234 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
795 return; 1235 return;
796 p = pages->next; 1236 p = pages->next;
797 bpage = list_entry(p, struct buffer_page, list); 1237 bpage = list_entry(p, struct buffer_page, list);
798 list_del_init(&bpage->list); 1238 list_del_init(&bpage->list);
799 list_add_tail(&bpage->list, &cpu_buffer->pages); 1239 list_add_tail(&bpage->list, cpu_buffer->pages);
800 } 1240 }
801 rb_reset_cpu(cpu_buffer); 1241 rb_reset_cpu(cpu_buffer);
1242 spin_unlock_irq(&cpu_buffer->reader_lock);
802 1243
803 rb_check_pages(cpu_buffer); 1244 rb_check_pages(cpu_buffer);
804 1245
@@ -949,21 +1390,14 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
949} 1390}
950 1391
951static inline struct ring_buffer_event * 1392static inline struct ring_buffer_event *
952rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
953{
954 return __rb_page_index(cpu_buffer->head_page,
955 cpu_buffer->head_page->read);
956}
957
958static inline struct ring_buffer_event *
959rb_iter_head_event(struct ring_buffer_iter *iter) 1393rb_iter_head_event(struct ring_buffer_iter *iter)
960{ 1394{
961 return __rb_page_index(iter->head_page, iter->head); 1395 return __rb_page_index(iter->head_page, iter->head);
962} 1396}
963 1397
964static inline unsigned rb_page_write(struct buffer_page *bpage) 1398static inline unsigned long rb_page_write(struct buffer_page *bpage)
965{ 1399{
966 return local_read(&bpage->write); 1400 return local_read(&bpage->write) & RB_WRITE_MASK;
967} 1401}
968 1402
969static inline unsigned rb_page_commit(struct buffer_page *bpage) 1403static inline unsigned rb_page_commit(struct buffer_page *bpage)
@@ -971,6 +1405,11 @@ static inline unsigned rb_page_commit(struct buffer_page *bpage)
971 return local_read(&bpage->page->commit); 1405 return local_read(&bpage->page->commit);
972} 1406}
973 1407
1408static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1409{
1410 return local_read(&bpage->entries) & RB_WRITE_MASK;
1411}
1412
974/* Size is determined by what has been commited */ 1413/* Size is determined by what has been commited */
975static inline unsigned rb_page_size(struct buffer_page *bpage) 1414static inline unsigned rb_page_size(struct buffer_page *bpage)
976{ 1415{
@@ -983,22 +1422,6 @@ rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
983 return rb_page_commit(cpu_buffer->commit_page); 1422 return rb_page_commit(cpu_buffer->commit_page);
984} 1423}
985 1424
986static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
987{
988 return rb_page_commit(cpu_buffer->head_page);
989}
990
991static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
992 struct buffer_page **bpage)
993{
994 struct list_head *p = (*bpage)->list.next;
995
996 if (p == &cpu_buffer->pages)
997 p = p->next;
998
999 *bpage = list_entry(p, struct buffer_page, list);
1000}
1001
1002static inline unsigned 1425static inline unsigned
1003rb_event_index(struct ring_buffer_event *event) 1426rb_event_index(struct ring_buffer_event *event)
1004{ 1427{
@@ -1024,6 +1447,8 @@ rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1024static void 1447static void
1025rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) 1448rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1026{ 1449{
1450 unsigned long max_count;
1451
1027 /* 1452 /*
1028 * We only race with interrupts and NMIs on this CPU. 1453 * We only race with interrupts and NMIs on this CPU.
1029 * If we own the commit event, then we can commit 1454 * If we own the commit event, then we can commit
@@ -1033,9 +1458,16 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1033 * assign the commit to the tail. 1458 * assign the commit to the tail.
1034 */ 1459 */
1035 again: 1460 again:
1461 max_count = cpu_buffer->buffer->pages * 100;
1462
1036 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 1463 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1037 cpu_buffer->commit_page->page->commit = 1464 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1038 cpu_buffer->commit_page->write; 1465 return;
1466 if (RB_WARN_ON(cpu_buffer,
1467 rb_is_reader_page(cpu_buffer->tail_page)))
1468 return;
1469 local_set(&cpu_buffer->commit_page->page->commit,
1470 rb_page_write(cpu_buffer->commit_page));
1039 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); 1471 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1040 cpu_buffer->write_stamp = 1472 cpu_buffer->write_stamp =
1041 cpu_buffer->commit_page->page->time_stamp; 1473 cpu_buffer->commit_page->page->time_stamp;
@@ -1044,8 +1476,12 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1044 } 1476 }
1045 while (rb_commit_index(cpu_buffer) != 1477 while (rb_commit_index(cpu_buffer) !=
1046 rb_page_write(cpu_buffer->commit_page)) { 1478 rb_page_write(cpu_buffer->commit_page)) {
1047 cpu_buffer->commit_page->page->commit = 1479
1048 cpu_buffer->commit_page->write; 1480 local_set(&cpu_buffer->commit_page->page->commit,
1481 rb_page_write(cpu_buffer->commit_page));
1482 RB_WARN_ON(cpu_buffer,
1483 local_read(&cpu_buffer->commit_page->page->commit) &
1484 ~RB_WRITE_MASK);
1049 barrier(); 1485 barrier();
1050 } 1486 }
1051 1487
@@ -1078,7 +1514,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
1078 * to the head page instead of next. 1514 * to the head page instead of next.
1079 */ 1515 */
1080 if (iter->head_page == cpu_buffer->reader_page) 1516 if (iter->head_page == cpu_buffer->reader_page)
1081 iter->head_page = cpu_buffer->head_page; 1517 iter->head_page = rb_set_head_page(cpu_buffer);
1082 else 1518 else
1083 rb_inc_page(cpu_buffer, &iter->head_page); 1519 rb_inc_page(cpu_buffer, &iter->head_page);
1084 1520
@@ -1122,6 +1558,163 @@ rb_update_event(struct ring_buffer_event *event,
1122 } 1558 }
1123} 1559}
1124 1560
1561/*
1562 * rb_handle_head_page - writer hit the head page
1563 *
1564 * Returns: +1 to retry page
1565 * 0 to continue
1566 * -1 on error
1567 */
1568static int
1569rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1570 struct buffer_page *tail_page,
1571 struct buffer_page *next_page)
1572{
1573 struct buffer_page *new_head;
1574 int entries;
1575 int type;
1576 int ret;
1577
1578 entries = rb_page_entries(next_page);
1579
1580 /*
1581 * The hard part is here. We need to move the head
1582 * forward, and protect against both readers on
1583 * other CPUs and writers coming in via interrupts.
1584 */
1585 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1586 RB_PAGE_HEAD);
1587
1588 /*
1589 * type can be one of four:
1590 * NORMAL - an interrupt already moved it for us
1591 * HEAD - we are the first to get here.
1592 * UPDATE - we are the interrupt interrupting
1593 * a current move.
1594 * MOVED - a reader on another CPU moved the next
1595 * pointer to its reader page. Give up
1596 * and try again.
1597 */
1598
1599 switch (type) {
1600 case RB_PAGE_HEAD:
1601 /*
1602 * We changed the head to UPDATE, thus
1603 * it is our responsibility to update
1604 * the counters.
1605 */
1606 local_add(entries, &cpu_buffer->overrun);
1607
1608 /*
1609 * The entries will be zeroed out when we move the
1610 * tail page.
1611 */
1612
1613 /* still more to do */
1614 break;
1615
1616 case RB_PAGE_UPDATE:
1617 /*
1618 * This is an interrupt that interrupt the
1619 * previous update. Still more to do.
1620 */
1621 break;
1622 case RB_PAGE_NORMAL:
1623 /*
1624 * An interrupt came in before the update
1625 * and processed this for us.
1626 * Nothing left to do.
1627 */
1628 return 1;
1629 case RB_PAGE_MOVED:
1630 /*
1631 * The reader is on another CPU and just did
1632 * a swap with our next_page.
1633 * Try again.
1634 */
1635 return 1;
1636 default:
1637 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1638 return -1;
1639 }
1640
1641 /*
1642 * Now that we are here, the old head pointer is
1643 * set to UPDATE. This will keep the reader from
1644 * swapping the head page with the reader page.
1645 * The reader (on another CPU) will spin till
1646 * we are finished.
1647 *
1648 * We just need to protect against interrupts
1649 * doing the job. We will set the next pointer
1650 * to HEAD. After that, we set the old pointer
1651 * to NORMAL, but only if it was HEAD before.
1652 * otherwise we are an interrupt, and only
1653 * want the outer most commit to reset it.
1654 */
1655 new_head = next_page;
1656 rb_inc_page(cpu_buffer, &new_head);
1657
1658 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1659 RB_PAGE_NORMAL);
1660
1661 /*
1662 * Valid returns are:
1663 * HEAD - an interrupt came in and already set it.
1664 * NORMAL - One of two things:
1665 * 1) We really set it.
1666 * 2) A bunch of interrupts came in and moved
1667 * the page forward again.
1668 */
1669 switch (ret) {
1670 case RB_PAGE_HEAD:
1671 case RB_PAGE_NORMAL:
1672 /* OK */
1673 break;
1674 default:
1675 RB_WARN_ON(cpu_buffer, 1);
1676 return -1;
1677 }
1678
1679 /*
1680 * It is possible that an interrupt came in,
1681 * set the head up, then more interrupts came in
1682 * and moved it again. When we get back here,
1683 * the page would have been set to NORMAL but we
1684 * just set it back to HEAD.
1685 *
1686 * How do you detect this? Well, if that happened
1687 * the tail page would have moved.
1688 */
1689 if (ret == RB_PAGE_NORMAL) {
1690 /*
1691 * If the tail had moved passed next, then we need
1692 * to reset the pointer.
1693 */
1694 if (cpu_buffer->tail_page != tail_page &&
1695 cpu_buffer->tail_page != next_page)
1696 rb_head_page_set_normal(cpu_buffer, new_head,
1697 next_page,
1698 RB_PAGE_HEAD);
1699 }
1700
1701 /*
1702 * If this was the outer most commit (the one that
1703 * changed the original pointer from HEAD to UPDATE),
1704 * then it is up to us to reset it to NORMAL.
1705 */
1706 if (type == RB_PAGE_HEAD) {
1707 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1708 tail_page,
1709 RB_PAGE_UPDATE);
1710 if (RB_WARN_ON(cpu_buffer,
1711 ret != RB_PAGE_UPDATE))
1712 return -1;
1713 }
1714
1715 return 0;
1716}
1717
1125static unsigned rb_calculate_event_length(unsigned length) 1718static unsigned rb_calculate_event_length(unsigned length)
1126{ 1719{
1127 struct ring_buffer_event event; /* Used only for sizeof array */ 1720 struct ring_buffer_event event; /* Used only for sizeof array */
@@ -1185,9 +1778,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1185 event->type_len = RINGBUF_TYPE_PADDING; 1778 event->type_len = RINGBUF_TYPE_PADDING;
1186 /* time delta must be non zero */ 1779 /* time delta must be non zero */
1187 event->time_delta = 1; 1780 event->time_delta = 1;
1188 /* Account for this as an entry */
1189 local_inc(&tail_page->entries);
1190 local_inc(&cpu_buffer->entries);
1191 1781
1192 /* Set write to end of buffer */ 1782 /* Set write to end of buffer */
1193 length = (tail + length) - BUF_PAGE_SIZE; 1783 length = (tail + length) - BUF_PAGE_SIZE;
@@ -1200,96 +1790,93 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1200 struct buffer_page *commit_page, 1790 struct buffer_page *commit_page,
1201 struct buffer_page *tail_page, u64 *ts) 1791 struct buffer_page *tail_page, u64 *ts)
1202{ 1792{
1203 struct buffer_page *next_page, *head_page, *reader_page;
1204 struct ring_buffer *buffer = cpu_buffer->buffer; 1793 struct ring_buffer *buffer = cpu_buffer->buffer;
1205 bool lock_taken = false; 1794 struct buffer_page *next_page;
1206 unsigned long flags; 1795 int ret;
1207 1796
1208 next_page = tail_page; 1797 next_page = tail_page;
1209 1798
1210 local_irq_save(flags);
1211 /*
1212 * Since the write to the buffer is still not
1213 * fully lockless, we must be careful with NMIs.
1214 * The locks in the writers are taken when a write
1215 * crosses to a new page. The locks protect against
1216 * races with the readers (this will soon be fixed
1217 * with a lockless solution).
1218 *
1219 * Because we can not protect against NMIs, and we
1220 * want to keep traces reentrant, we need to manage
1221 * what happens when we are in an NMI.
1222 *
1223 * NMIs can happen after we take the lock.
1224 * If we are in an NMI, only take the lock
1225 * if it is not already taken. Otherwise
1226 * simply fail.
1227 */
1228 if (unlikely(in_nmi())) {
1229 if (!__raw_spin_trylock(&cpu_buffer->lock)) {
1230 cpu_buffer->nmi_dropped++;
1231 goto out_reset;
1232 }
1233 } else
1234 __raw_spin_lock(&cpu_buffer->lock);
1235
1236 lock_taken = true;
1237
1238 rb_inc_page(cpu_buffer, &next_page); 1799 rb_inc_page(cpu_buffer, &next_page);
1239 1800
1240 head_page = cpu_buffer->head_page;
1241 reader_page = cpu_buffer->reader_page;
1242
1243 /* we grabbed the lock before incrementing */
1244 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
1245 goto out_reset;
1246
1247 /* 1801 /*
1248 * If for some reason, we had an interrupt storm that made 1802 * If for some reason, we had an interrupt storm that made
1249 * it all the way around the buffer, bail, and warn 1803 * it all the way around the buffer, bail, and warn
1250 * about it. 1804 * about it.
1251 */ 1805 */
1252 if (unlikely(next_page == commit_page)) { 1806 if (unlikely(next_page == commit_page)) {
1253 cpu_buffer->commit_overrun++; 1807 local_inc(&cpu_buffer->commit_overrun);
1254 goto out_reset; 1808 goto out_reset;
1255 } 1809 }
1256 1810
1257 if (next_page == head_page) { 1811 /*
1258 if (!(buffer->flags & RB_FL_OVERWRITE)) 1812 * This is where the fun begins!
1259 goto out_reset; 1813 *
1260 1814 * We are fighting against races between a reader that
1261 /* tail_page has not moved yet? */ 1815 * could be on another CPU trying to swap its reader
1262 if (tail_page == cpu_buffer->tail_page) { 1816 * page with the buffer head.
1263 /* count overflows */ 1817 *
1264 cpu_buffer->overrun += 1818 * We are also fighting against interrupts coming in and
1265 local_read(&head_page->entries); 1819 * moving the head or tail on us as well.
1820 *
1821 * If the next page is the head page then we have filled
1822 * the buffer, unless the commit page is still on the
1823 * reader page.
1824 */
1825 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1266 1826
1267 rb_inc_page(cpu_buffer, &head_page); 1827 /*
1268 cpu_buffer->head_page = head_page; 1828 * If the commit is not on the reader page, then
1269 cpu_buffer->head_page->read = 0; 1829 * move the header page.
1830 */
1831 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1832 /*
1833 * If we are not in overwrite mode,
1834 * this is easy, just stop here.
1835 */
1836 if (!(buffer->flags & RB_FL_OVERWRITE))
1837 goto out_reset;
1838
1839 ret = rb_handle_head_page(cpu_buffer,
1840 tail_page,
1841 next_page);
1842 if (ret < 0)
1843 goto out_reset;
1844 if (ret)
1845 goto out_again;
1846 } else {
1847 /*
1848 * We need to be careful here too. The
1849 * commit page could still be on the reader
1850 * page. We could have a small buffer, and
1851 * have filled up the buffer with events
1852 * from interrupts and such, and wrapped.
1853 *
1854 * Note, if the tail page is also the on the
1855 * reader_page, we let it move out.
1856 */
1857 if (unlikely((cpu_buffer->commit_page !=
1858 cpu_buffer->tail_page) &&
1859 (cpu_buffer->commit_page ==
1860 cpu_buffer->reader_page))) {
1861 local_inc(&cpu_buffer->commit_overrun);
1862 goto out_reset;
1863 }
1270 } 1864 }
1271 } 1865 }
1272 1866
1273 /* 1867 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1274 * If the tail page is still the same as what we think 1868 if (ret) {
1275 * it is, then it is up to us to update the tail 1869 /*
1276 * pointer. 1870 * Nested commits always have zero deltas, so
1277 */ 1871 * just reread the time stamp
1278 if (tail_page == cpu_buffer->tail_page) { 1872 */
1279 local_set(&next_page->write, 0);
1280 local_set(&next_page->entries, 0);
1281 local_set(&next_page->page->commit, 0);
1282 cpu_buffer->tail_page = next_page;
1283
1284 /* reread the time stamp */
1285 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1873 *ts = rb_time_stamp(buffer, cpu_buffer->cpu);
1286 cpu_buffer->tail_page->page->time_stamp = *ts; 1874 next_page->page->time_stamp = *ts;
1287 } 1875 }
1288 1876
1289 rb_reset_tail(cpu_buffer, tail_page, tail, length); 1877 out_again:
1290 1878
1291 __raw_spin_unlock(&cpu_buffer->lock); 1879 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1292 local_irq_restore(flags);
1293 1880
1294 /* fail and let the caller try again */ 1881 /* fail and let the caller try again */
1295 return ERR_PTR(-EAGAIN); 1882 return ERR_PTR(-EAGAIN);
@@ -1298,9 +1885,6 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1298 /* reset write */ 1885 /* reset write */
1299 rb_reset_tail(cpu_buffer, tail_page, tail, length); 1886 rb_reset_tail(cpu_buffer, tail_page, tail, length);
1300 1887
1301 if (likely(lock_taken))
1302 __raw_spin_unlock(&cpu_buffer->lock);
1303 local_irq_restore(flags);
1304 return NULL; 1888 return NULL;
1305} 1889}
1306 1890
@@ -1317,6 +1901,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1317 barrier(); 1901 barrier();
1318 tail_page = cpu_buffer->tail_page; 1902 tail_page = cpu_buffer->tail_page;
1319 write = local_add_return(length, &tail_page->write); 1903 write = local_add_return(length, &tail_page->write);
1904
1905 /* set write to only the index of the write */
1906 write &= RB_WRITE_MASK;
1320 tail = write - length; 1907 tail = write - length;
1321 1908
1322 /* See if we shot pass the end of this buffer page */ 1909 /* See if we shot pass the end of this buffer page */
@@ -1361,12 +1948,16 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
1361 bpage = cpu_buffer->tail_page; 1948 bpage = cpu_buffer->tail_page;
1362 1949
1363 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { 1950 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
1951 unsigned long write_mask =
1952 local_read(&bpage->write) & ~RB_WRITE_MASK;
1364 /* 1953 /*
1365 * This is on the tail page. It is possible that 1954 * This is on the tail page. It is possible that
1366 * a write could come in and move the tail page 1955 * a write could come in and move the tail page
1367 * and write to the next page. That is fine 1956 * and write to the next page. That is fine
1368 * because we just shorten what is on this page. 1957 * because we just shorten what is on this page.
1369 */ 1958 */
1959 old_index += write_mask;
1960 new_index += write_mask;
1370 index = local_cmpxchg(&bpage->write, old_index, new_index); 1961 index = local_cmpxchg(&bpage->write, old_index, new_index);
1371 if (index == old_index) 1962 if (index == old_index)
1372 return 1; 1963 return 1;
@@ -1482,7 +2073,8 @@ static void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
1482} 2073}
1483 2074
1484static struct ring_buffer_event * 2075static struct ring_buffer_event *
1485rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, 2076rb_reserve_next_event(struct ring_buffer *buffer,
2077 struct ring_buffer_per_cpu *cpu_buffer,
1486 unsigned long length) 2078 unsigned long length)
1487{ 2079{
1488 struct ring_buffer_event *event; 2080 struct ring_buffer_event *event;
@@ -1492,6 +2084,21 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1492 2084
1493 rb_start_commit(cpu_buffer); 2085 rb_start_commit(cpu_buffer);
1494 2086
2087#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2088 /*
2089 * Due to the ability to swap a cpu buffer from a buffer
2090 * it is possible it was swapped before we committed.
2091 * (committing stops a swap). We check for it here and
2092 * if it happened, we have to fail the write.
2093 */
2094 barrier();
2095 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2096 local_dec(&cpu_buffer->committing);
2097 local_dec(&cpu_buffer->commits);
2098 return NULL;
2099 }
2100#endif
2101
1495 length = rb_calculate_event_length(length); 2102 length = rb_calculate_event_length(length);
1496 again: 2103 again:
1497 /* 2104 /*
@@ -1652,7 +2259,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1652 if (length > BUF_MAX_DATA_SIZE) 2259 if (length > BUF_MAX_DATA_SIZE)
1653 goto out; 2260 goto out;
1654 2261
1655 event = rb_reserve_next_event(cpu_buffer, length); 2262 event = rb_reserve_next_event(buffer, cpu_buffer, length);
1656 if (!event) 2263 if (!event)
1657 goto out; 2264 goto out;
1658 2265
@@ -1675,18 +2282,23 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
1675} 2282}
1676EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); 2283EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1677 2284
1678static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 2285static void
2286rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1679 struct ring_buffer_event *event) 2287 struct ring_buffer_event *event)
1680{ 2288{
1681 local_inc(&cpu_buffer->entries);
1682
1683 /* 2289 /*
1684 * The event first in the commit queue updates the 2290 * The event first in the commit queue updates the
1685 * time stamp. 2291 * time stamp.
1686 */ 2292 */
1687 if (rb_event_is_commit(cpu_buffer, event)) 2293 if (rb_event_is_commit(cpu_buffer, event))
1688 cpu_buffer->write_stamp += event->time_delta; 2294 cpu_buffer->write_stamp += event->time_delta;
2295}
1689 2296
2297static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2298 struct ring_buffer_event *event)
2299{
2300 local_inc(&cpu_buffer->entries);
2301 rb_update_write_stamp(cpu_buffer, event);
1690 rb_end_commit(cpu_buffer); 2302 rb_end_commit(cpu_buffer);
1691} 2303}
1692 2304
@@ -1733,32 +2345,57 @@ static inline void rb_event_discard(struct ring_buffer_event *event)
1733 event->time_delta = 1; 2345 event->time_delta = 1;
1734} 2346}
1735 2347
1736/** 2348/*
1737 * ring_buffer_event_discard - discard any event in the ring buffer 2349 * Decrement the entries to the page that an event is on.
1738 * @event: the event to discard 2350 * The event does not even need to exist, only the pointer
1739 * 2351 * to the page it is on. This may only be called before the commit
1740 * Sometimes a event that is in the ring buffer needs to be ignored. 2352 * takes place.
1741 * This function lets the user discard an event in the ring buffer
1742 * and then that event will not be read later.
1743 *
1744 * Note, it is up to the user to be careful with this, and protect
1745 * against races. If the user discards an event that has been consumed
1746 * it is possible that it could corrupt the ring buffer.
1747 */ 2353 */
1748void ring_buffer_event_discard(struct ring_buffer_event *event) 2354static inline void
2355rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2356 struct ring_buffer_event *event)
1749{ 2357{
1750 rb_event_discard(event); 2358 unsigned long addr = (unsigned long)event;
2359 struct buffer_page *bpage = cpu_buffer->commit_page;
2360 struct buffer_page *start;
2361
2362 addr &= PAGE_MASK;
2363
2364 /* Do the likely case first */
2365 if (likely(bpage->page == (void *)addr)) {
2366 local_dec(&bpage->entries);
2367 return;
2368 }
2369
2370 /*
2371 * Because the commit page may be on the reader page we
2372 * start with the next page and check the end loop there.
2373 */
2374 rb_inc_page(cpu_buffer, &bpage);
2375 start = bpage;
2376 do {
2377 if (bpage->page == (void *)addr) {
2378 local_dec(&bpage->entries);
2379 return;
2380 }
2381 rb_inc_page(cpu_buffer, &bpage);
2382 } while (bpage != start);
2383
2384 /* commit not part of this buffer?? */
2385 RB_WARN_ON(cpu_buffer, 1);
1751} 2386}
1752EXPORT_SYMBOL_GPL(ring_buffer_event_discard);
1753 2387
1754/** 2388/**
1755 * ring_buffer_commit_discard - discard an event that has not been committed 2389 * ring_buffer_commit_discard - discard an event that has not been committed
1756 * @buffer: the ring buffer 2390 * @buffer: the ring buffer
1757 * @event: non committed event to discard 2391 * @event: non committed event to discard
1758 * 2392 *
1759 * This is similar to ring_buffer_event_discard but must only be 2393 * Sometimes an event that is in the ring buffer needs to be ignored.
1760 * performed on an event that has not been committed yet. The difference 2394 * This function lets the user discard an event in the ring buffer
1761 * is that this will also try to free the event from the ring buffer 2395 * and then that event will not be read later.
2396 *
2397 * This function only works if it is called before the the item has been
2398 * committed. It will try to free the event from the ring buffer
1762 * if another event has not been added behind it. 2399 * if another event has not been added behind it.
1763 * 2400 *
1764 * If another event has been added behind it, it will set the event 2401 * If another event has been added behind it, it will set the event
@@ -1786,14 +2423,15 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1786 */ 2423 */
1787 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2424 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1788 2425
2426 rb_decrement_entry(cpu_buffer, event);
1789 if (rb_try_to_discard(cpu_buffer, event)) 2427 if (rb_try_to_discard(cpu_buffer, event))
1790 goto out; 2428 goto out;
1791 2429
1792 /* 2430 /*
1793 * The commit is still visible by the reader, so we 2431 * The commit is still visible by the reader, so we
1794 * must increment entries. 2432 * must still update the timestamp.
1795 */ 2433 */
1796 local_inc(&cpu_buffer->entries); 2434 rb_update_write_stamp(cpu_buffer, event);
1797 out: 2435 out:
1798 rb_end_commit(cpu_buffer); 2436 rb_end_commit(cpu_buffer);
1799 2437
@@ -1854,7 +2492,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1854 if (length > BUF_MAX_DATA_SIZE) 2492 if (length > BUF_MAX_DATA_SIZE)
1855 goto out; 2493 goto out;
1856 2494
1857 event = rb_reserve_next_event(cpu_buffer, length); 2495 event = rb_reserve_next_event(buffer, cpu_buffer, length);
1858 if (!event) 2496 if (!event)
1859 goto out; 2497 goto out;
1860 2498
@@ -1875,9 +2513,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_write);
1875static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 2513static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1876{ 2514{
1877 struct buffer_page *reader = cpu_buffer->reader_page; 2515 struct buffer_page *reader = cpu_buffer->reader_page;
1878 struct buffer_page *head = cpu_buffer->head_page; 2516 struct buffer_page *head = rb_set_head_page(cpu_buffer);
1879 struct buffer_page *commit = cpu_buffer->commit_page; 2517 struct buffer_page *commit = cpu_buffer->commit_page;
1880 2518
2519 /* In case of error, head will be NULL */
2520 if (unlikely(!head))
2521 return 1;
2522
1881 return reader->read == rb_page_commit(reader) && 2523 return reader->read == rb_page_commit(reader) &&
1882 (commit == reader || 2524 (commit == reader ||
1883 (commit == head && 2525 (commit == head &&
@@ -1968,7 +2610,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1968 return 0; 2610 return 0;
1969 2611
1970 cpu_buffer = buffer->buffers[cpu]; 2612 cpu_buffer = buffer->buffers[cpu];
1971 ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun) 2613 ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
1972 - cpu_buffer->read; 2614 - cpu_buffer->read;
1973 2615
1974 return ret; 2616 return ret;
@@ -1989,33 +2631,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1989 return 0; 2631 return 0;
1990 2632
1991 cpu_buffer = buffer->buffers[cpu]; 2633 cpu_buffer = buffer->buffers[cpu];
1992 ret = cpu_buffer->overrun; 2634 ret = local_read(&cpu_buffer->overrun);
1993 2635
1994 return ret; 2636 return ret;
1995} 2637}
1996EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 2638EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1997 2639
1998/** 2640/**
1999 * ring_buffer_nmi_dropped_cpu - get the number of nmis that were dropped
2000 * @buffer: The ring buffer
2001 * @cpu: The per CPU buffer to get the number of overruns from
2002 */
2003unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu)
2004{
2005 struct ring_buffer_per_cpu *cpu_buffer;
2006 unsigned long ret;
2007
2008 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2009 return 0;
2010
2011 cpu_buffer = buffer->buffers[cpu];
2012 ret = cpu_buffer->nmi_dropped;
2013
2014 return ret;
2015}
2016EXPORT_SYMBOL_GPL(ring_buffer_nmi_dropped_cpu);
2017
2018/**
2019 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 2641 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2020 * @buffer: The ring buffer 2642 * @buffer: The ring buffer
2021 * @cpu: The per CPU buffer to get the number of overruns from 2643 * @cpu: The per CPU buffer to get the number of overruns from
@@ -2030,7 +2652,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2030 return 0; 2652 return 0;
2031 2653
2032 cpu_buffer = buffer->buffers[cpu]; 2654 cpu_buffer = buffer->buffers[cpu];
2033 ret = cpu_buffer->commit_overrun; 2655 ret = local_read(&cpu_buffer->commit_overrun);
2034 2656
2035 return ret; 2657 return ret;
2036} 2658}
@@ -2053,7 +2675,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2053 for_each_buffer_cpu(buffer, cpu) { 2675 for_each_buffer_cpu(buffer, cpu) {
2054 cpu_buffer = buffer->buffers[cpu]; 2676 cpu_buffer = buffer->buffers[cpu];
2055 entries += (local_read(&cpu_buffer->entries) - 2677 entries += (local_read(&cpu_buffer->entries) -
2056 cpu_buffer->overrun) - cpu_buffer->read; 2678 local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
2057 } 2679 }
2058 2680
2059 return entries; 2681 return entries;
@@ -2076,7 +2698,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2076 /* if you care about this being correct, lock the buffer */ 2698 /* if you care about this being correct, lock the buffer */
2077 for_each_buffer_cpu(buffer, cpu) { 2699 for_each_buffer_cpu(buffer, cpu) {
2078 cpu_buffer = buffer->buffers[cpu]; 2700 cpu_buffer = buffer->buffers[cpu];
2079 overruns += cpu_buffer->overrun; 2701 overruns += local_read(&cpu_buffer->overrun);
2080 } 2702 }
2081 2703
2082 return overruns; 2704 return overruns;
@@ -2089,8 +2711,10 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2089 2711
2090 /* Iterator usage is expected to have record disabled */ 2712 /* Iterator usage is expected to have record disabled */
2091 if (list_empty(&cpu_buffer->reader_page->list)) { 2713 if (list_empty(&cpu_buffer->reader_page->list)) {
2092 iter->head_page = cpu_buffer->head_page; 2714 iter->head_page = rb_set_head_page(cpu_buffer);
2093 iter->head = cpu_buffer->head_page->read; 2715 if (unlikely(!iter->head_page))
2716 return;
2717 iter->head = iter->head_page->read;
2094 } else { 2718 } else {
2095 iter->head_page = cpu_buffer->reader_page; 2719 iter->head_page = cpu_buffer->reader_page;
2096 iter->head = cpu_buffer->reader_page->read; 2720 iter->head = cpu_buffer->reader_page->read;
@@ -2207,6 +2831,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2207 struct buffer_page *reader = NULL; 2831 struct buffer_page *reader = NULL;
2208 unsigned long flags; 2832 unsigned long flags;
2209 int nr_loops = 0; 2833 int nr_loops = 0;
2834 int ret;
2210 2835
2211 local_irq_save(flags); 2836 local_irq_save(flags);
2212 __raw_spin_lock(&cpu_buffer->lock); 2837 __raw_spin_lock(&cpu_buffer->lock);
@@ -2240,30 +2865,56 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2240 goto out; 2865 goto out;
2241 2866
2242 /* 2867 /*
2243 * Splice the empty reader page into the list around the head.
2244 * Reset the reader page to size zero. 2868 * Reset the reader page to size zero.
2245 */ 2869 */
2870 local_set(&cpu_buffer->reader_page->write, 0);
2871 local_set(&cpu_buffer->reader_page->entries, 0);
2872 local_set(&cpu_buffer->reader_page->page->commit, 0);
2246 2873
2247 reader = cpu_buffer->head_page; 2874 spin:
2875 /*
2876 * Splice the empty reader page into the list around the head.
2877 */
2878 reader = rb_set_head_page(cpu_buffer);
2248 cpu_buffer->reader_page->list.next = reader->list.next; 2879 cpu_buffer->reader_page->list.next = reader->list.next;
2249 cpu_buffer->reader_page->list.prev = reader->list.prev; 2880 cpu_buffer->reader_page->list.prev = reader->list.prev;
2250 2881
2251 local_set(&cpu_buffer->reader_page->write, 0); 2882 /*
2252 local_set(&cpu_buffer->reader_page->entries, 0); 2883 * cpu_buffer->pages just needs to point to the buffer, it
2253 local_set(&cpu_buffer->reader_page->page->commit, 0); 2884 * has no specific buffer page to point to. Lets move it out
2885 * of our way so we don't accidently swap it.
2886 */
2887 cpu_buffer->pages = reader->list.prev;
2254 2888
2255 /* Make the reader page now replace the head */ 2889 /* The reader page will be pointing to the new head */
2256 reader->list.prev->next = &cpu_buffer->reader_page->list; 2890 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2257 reader->list.next->prev = &cpu_buffer->reader_page->list;
2258 2891
2259 /* 2892 /*
2260 * If the tail is on the reader, then we must set the head 2893 * Here's the tricky part.
2261 * to the inserted page, otherwise we set it one before. 2894 *
2895 * We need to move the pointer past the header page.
2896 * But we can only do that if a writer is not currently
2897 * moving it. The page before the header page has the
2898 * flag bit '1' set if it is pointing to the page we want.
2899 * but if the writer is in the process of moving it
2900 * than it will be '2' or already moved '0'.
2262 */ 2901 */
2263 cpu_buffer->head_page = cpu_buffer->reader_page;
2264 2902
2265 if (cpu_buffer->commit_page != reader) 2903 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2266 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2904
2905 /*
2906 * If we did not convert it, then we must try again.
2907 */
2908 if (!ret)
2909 goto spin;
2910
2911 /*
2912 * Yeah! We succeeded in replacing the page.
2913 *
2914 * Now make the new head point back to the reader page.
2915 */
2916 reader->list.next->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2267 2918
2268 /* Finally update the reader page to the new head */ 2919 /* Finally update the reader page to the new head */
2269 cpu_buffer->reader_page = reader; 2920 cpu_buffer->reader_page = reader;
@@ -2292,8 +2943,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2292 2943
2293 event = rb_reader_event(cpu_buffer); 2944 event = rb_reader_event(cpu_buffer);
2294 2945
2295 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX 2946 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
2296 || rb_discarded_event(event))
2297 cpu_buffer->read++; 2947 cpu_buffer->read++;
2298 2948
2299 rb_update_read_stamp(cpu_buffer, event); 2949 rb_update_read_stamp(cpu_buffer, event);
@@ -2525,10 +3175,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2525 spin_unlock(&cpu_buffer->reader_lock); 3175 spin_unlock(&cpu_buffer->reader_lock);
2526 local_irq_restore(flags); 3176 local_irq_restore(flags);
2527 3177
2528 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 3178 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2529 cpu_relax();
2530 goto again; 3179 goto again;
2531 }
2532 3180
2533 return event; 3181 return event;
2534} 3182}
@@ -2553,10 +3201,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
2553 event = rb_iter_peek(iter, ts); 3201 event = rb_iter_peek(iter, ts);
2554 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3202 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2555 3203
2556 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 3204 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2557 cpu_relax();
2558 goto again; 3205 goto again;
2559 }
2560 3206
2561 return event; 3207 return event;
2562} 3208}
@@ -2602,10 +3248,8 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2602 out: 3248 out:
2603 preempt_enable(); 3249 preempt_enable();
2604 3250
2605 if (event && event->type_len == RINGBUF_TYPE_PADDING) { 3251 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2606 cpu_relax();
2607 goto again; 3252 goto again;
2608 }
2609 3253
2610 return event; 3254 return event;
2611} 3255}
@@ -2685,21 +3329,19 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2685 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3329 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2686 unsigned long flags; 3330 unsigned long flags;
2687 3331
2688 again:
2689 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3332 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3333 again:
2690 event = rb_iter_peek(iter, ts); 3334 event = rb_iter_peek(iter, ts);
2691 if (!event) 3335 if (!event)
2692 goto out; 3336 goto out;
2693 3337
3338 if (event->type_len == RINGBUF_TYPE_PADDING)
3339 goto again;
3340
2694 rb_advance_iter(iter); 3341 rb_advance_iter(iter);
2695 out: 3342 out:
2696 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3343 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2697 3344
2698 if (event && event->type_len == RINGBUF_TYPE_PADDING) {
2699 cpu_relax();
2700 goto again;
2701 }
2702
2703 return event; 3345 return event;
2704} 3346}
2705EXPORT_SYMBOL_GPL(ring_buffer_read); 3347EXPORT_SYMBOL_GPL(ring_buffer_read);
@@ -2717,8 +3359,10 @@ EXPORT_SYMBOL_GPL(ring_buffer_size);
2717static void 3359static void
2718rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 3360rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2719{ 3361{
3362 rb_head_page_deactivate(cpu_buffer);
3363
2720 cpu_buffer->head_page 3364 cpu_buffer->head_page
2721 = list_entry(cpu_buffer->pages.next, struct buffer_page, list); 3365 = list_entry(cpu_buffer->pages, struct buffer_page, list);
2722 local_set(&cpu_buffer->head_page->write, 0); 3366 local_set(&cpu_buffer->head_page->write, 0);
2723 local_set(&cpu_buffer->head_page->entries, 0); 3367 local_set(&cpu_buffer->head_page->entries, 0);
2724 local_set(&cpu_buffer->head_page->page->commit, 0); 3368 local_set(&cpu_buffer->head_page->page->commit, 0);
@@ -2734,16 +3378,17 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2734 local_set(&cpu_buffer->reader_page->page->commit, 0); 3378 local_set(&cpu_buffer->reader_page->page->commit, 0);
2735 cpu_buffer->reader_page->read = 0; 3379 cpu_buffer->reader_page->read = 0;
2736 3380
2737 cpu_buffer->nmi_dropped = 0; 3381 local_set(&cpu_buffer->commit_overrun, 0);
2738 cpu_buffer->commit_overrun = 0; 3382 local_set(&cpu_buffer->overrun, 0);
2739 cpu_buffer->overrun = 0;
2740 cpu_buffer->read = 0;
2741 local_set(&cpu_buffer->entries, 0); 3383 local_set(&cpu_buffer->entries, 0);
2742 local_set(&cpu_buffer->committing, 0); 3384 local_set(&cpu_buffer->committing, 0);
2743 local_set(&cpu_buffer->commits, 0); 3385 local_set(&cpu_buffer->commits, 0);
3386 cpu_buffer->read = 0;
2744 3387
2745 cpu_buffer->write_stamp = 0; 3388 cpu_buffer->write_stamp = 0;
2746 cpu_buffer->read_stamp = 0; 3389 cpu_buffer->read_stamp = 0;
3390
3391 rb_head_page_activate(cpu_buffer);
2747} 3392}
2748 3393
2749/** 3394/**
@@ -2763,12 +3408,16 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2763 3408
2764 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3409 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2765 3410
3411 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3412 goto out;
3413
2766 __raw_spin_lock(&cpu_buffer->lock); 3414 __raw_spin_lock(&cpu_buffer->lock);
2767 3415
2768 rb_reset_cpu(cpu_buffer); 3416 rb_reset_cpu(cpu_buffer);
2769 3417
2770 __raw_spin_unlock(&cpu_buffer->lock); 3418 __raw_spin_unlock(&cpu_buffer->lock);
2771 3419
3420 out:
2772 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3421 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2773 3422
2774 atomic_dec(&cpu_buffer->record_disabled); 3423 atomic_dec(&cpu_buffer->record_disabled);
@@ -2851,6 +3500,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2851} 3500}
2852EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); 3501EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2853 3502
3503#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2854/** 3504/**
2855 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 3505 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2856 * @buffer_a: One buffer to swap with 3506 * @buffer_a: One buffer to swap with
@@ -2905,20 +3555,28 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2905 atomic_inc(&cpu_buffer_a->record_disabled); 3555 atomic_inc(&cpu_buffer_a->record_disabled);
2906 atomic_inc(&cpu_buffer_b->record_disabled); 3556 atomic_inc(&cpu_buffer_b->record_disabled);
2907 3557
3558 ret = -EBUSY;
3559 if (local_read(&cpu_buffer_a->committing))
3560 goto out_dec;
3561 if (local_read(&cpu_buffer_b->committing))
3562 goto out_dec;
3563
2908 buffer_a->buffers[cpu] = cpu_buffer_b; 3564 buffer_a->buffers[cpu] = cpu_buffer_b;
2909 buffer_b->buffers[cpu] = cpu_buffer_a; 3565 buffer_b->buffers[cpu] = cpu_buffer_a;
2910 3566
2911 cpu_buffer_b->buffer = buffer_a; 3567 cpu_buffer_b->buffer = buffer_a;
2912 cpu_buffer_a->buffer = buffer_b; 3568 cpu_buffer_a->buffer = buffer_b;
2913 3569
3570 ret = 0;
3571
3572out_dec:
2914 atomic_dec(&cpu_buffer_a->record_disabled); 3573 atomic_dec(&cpu_buffer_a->record_disabled);
2915 atomic_dec(&cpu_buffer_b->record_disabled); 3574 atomic_dec(&cpu_buffer_b->record_disabled);
2916
2917 ret = 0;
2918out: 3575out:
2919 return ret; 3576 return ret;
2920} 3577}
2921EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); 3578EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3579#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
2922 3580
2923/** 3581/**
2924 * ring_buffer_alloc_read_page - allocate a page to read from buffer 3582 * ring_buffer_alloc_read_page - allocate a page to read from buffer
@@ -3091,7 +3749,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3091 read = 0; 3749 read = 0;
3092 } else { 3750 } else {
3093 /* update the entry counter */ 3751 /* update the entry counter */
3094 cpu_buffer->read += local_read(&reader->entries); 3752 cpu_buffer->read += rb_page_entries(reader);
3095 3753
3096 /* swap the pages */ 3754 /* swap the pages */
3097 rb_init_page(bpage); 3755 rb_init_page(bpage);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8c358395d338..5c75deeefe30 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -43,14 +43,11 @@
43 43
44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 44#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
45 45
46unsigned long __read_mostly tracing_max_latency;
47unsigned long __read_mostly tracing_thresh;
48
49/* 46/*
50 * On boot up, the ring buffer is set to the minimum size, so that 47 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing. 48 * we do not waste memory on systems that are not using tracing.
52 */ 49 */
53static int ring_buffer_expanded; 50int ring_buffer_expanded;
54 51
55/* 52/*
56 * We need to change this state when a selftest is running. 53 * We need to change this state when a selftest is running.
@@ -64,7 +61,7 @@ static bool __read_mostly tracing_selftest_running;
64/* 61/*
65 * If a tracer is running, we do not want to run SELFTEST. 62 * If a tracer is running, we do not want to run SELFTEST.
66 */ 63 */
67static bool __read_mostly tracing_selftest_disabled; 64bool __read_mostly tracing_selftest_disabled;
68 65
69/* For tracers that don't implement custom flags */ 66/* For tracers that don't implement custom flags */
70static struct tracer_opt dummy_tracer_opt[] = { 67static struct tracer_opt dummy_tracer_opt[] = {
@@ -89,7 +86,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
89 */ 86 */
90static int tracing_disabled = 1; 87static int tracing_disabled = 1;
91 88
92static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); 89DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
93 90
94static inline void ftrace_disable_cpu(void) 91static inline void ftrace_disable_cpu(void)
95{ 92{
@@ -172,10 +169,11 @@ static struct trace_array global_trace;
172 169
173static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); 170static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
174 171
175int filter_current_check_discard(struct ftrace_event_call *call, void *rec, 172int filter_current_check_discard(struct ring_buffer *buffer,
173 struct ftrace_event_call *call, void *rec,
176 struct ring_buffer_event *event) 174 struct ring_buffer_event *event)
177{ 175{
178 return filter_check_discard(call, rec, global_trace.buffer, event); 176 return filter_check_discard(call, rec, buffer, event);
179} 177}
180EXPORT_SYMBOL_GPL(filter_current_check_discard); 178EXPORT_SYMBOL_GPL(filter_current_check_discard);
181 179
@@ -266,6 +264,9 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
266 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | 264 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
267 TRACE_ITER_GRAPH_TIME; 265 TRACE_ITER_GRAPH_TIME;
268 266
267static int trace_stop_count;
268static DEFINE_SPINLOCK(tracing_start_lock);
269
269/** 270/**
270 * trace_wake_up - wake up tasks waiting for trace input 271 * trace_wake_up - wake up tasks waiting for trace input
271 * 272 *
@@ -323,50 +324,20 @@ static const char *trace_options[] = {
323 "printk-msg-only", 324 "printk-msg-only",
324 "context-info", 325 "context-info",
325 "latency-format", 326 "latency-format",
326 "global-clock",
327 "sleep-time", 327 "sleep-time",
328 "graph-time", 328 "graph-time",
329 NULL 329 NULL
330}; 330};
331 331
332/* 332static struct {
333 * ftrace_max_lock is used to protect the swapping of buffers 333 u64 (*func)(void);
334 * when taking a max snapshot. The buffers themselves are 334 const char *name;
335 * protected by per_cpu spinlocks. But the action of the swap 335} trace_clocks[] = {
336 * needs its own lock. 336 { trace_clock_local, "local" },
337 * 337 { trace_clock_global, "global" },
338 * This is defined as a raw_spinlock_t in order to help 338};
339 * with performance when lockdep debugging is enabled.
340 */
341static raw_spinlock_t ftrace_max_lock =
342 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
343
344/*
345 * Copy the new maximum trace into the separate maximum-trace
346 * structure. (this way the maximum trace is permanently saved,
347 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
348 */
349static void
350__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
351{
352 struct trace_array_cpu *data = tr->data[cpu];
353
354 max_tr.cpu = cpu;
355 max_tr.time_start = data->preempt_timestamp;
356 339
357 data = max_tr.data[cpu]; 340int trace_clock_id;
358 data->saved_latency = tracing_max_latency;
359
360 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
361 data->pid = tsk->pid;
362 data->uid = task_uid(tsk);
363 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
364 data->policy = tsk->policy;
365 data->rt_priority = tsk->rt_priority;
366
367 /* record this tasks comm */
368 tracing_record_cmdline(tsk);
369}
370 341
371ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) 342ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
372{ 343{
@@ -411,6 +382,56 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
411 return cnt; 382 return cnt;
412} 383}
413 384
385/*
386 * ftrace_max_lock is used to protect the swapping of buffers
387 * when taking a max snapshot. The buffers themselves are
388 * protected by per_cpu spinlocks. But the action of the swap
389 * needs its own lock.
390 *
391 * This is defined as a raw_spinlock_t in order to help
392 * with performance when lockdep debugging is enabled.
393 *
394 * It is also used in other places outside the update_max_tr
395 * so it needs to be defined outside of the
396 * CONFIG_TRACER_MAX_TRACE.
397 */
398static raw_spinlock_t ftrace_max_lock =
399 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
400
401#ifdef CONFIG_TRACER_MAX_TRACE
402unsigned long __read_mostly tracing_max_latency;
403unsigned long __read_mostly tracing_thresh;
404
405/*
406 * Copy the new maximum trace into the separate maximum-trace
407 * structure. (this way the maximum trace is permanently saved,
408 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
409 */
410static void
411__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
412{
413 struct trace_array_cpu *data = tr->data[cpu];
414 struct trace_array_cpu *max_data = tr->data[cpu];
415
416 max_tr.cpu = cpu;
417 max_tr.time_start = data->preempt_timestamp;
418
419 max_data = max_tr.data[cpu];
420 max_data->saved_latency = tracing_max_latency;
421 max_data->critical_start = data->critical_start;
422 max_data->critical_end = data->critical_end;
423
424 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
425 max_data->pid = tsk->pid;
426 max_data->uid = task_uid(tsk);
427 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
428 max_data->policy = tsk->policy;
429 max_data->rt_priority = tsk->rt_priority;
430
431 /* record this tasks comm */
432 tracing_record_cmdline(tsk);
433}
434
414/** 435/**
415 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 436 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
416 * @tr: tracer 437 * @tr: tracer
@@ -425,16 +446,15 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
425{ 446{
426 struct ring_buffer *buf = tr->buffer; 447 struct ring_buffer *buf = tr->buffer;
427 448
449 if (trace_stop_count)
450 return;
451
428 WARN_ON_ONCE(!irqs_disabled()); 452 WARN_ON_ONCE(!irqs_disabled());
429 __raw_spin_lock(&ftrace_max_lock); 453 __raw_spin_lock(&ftrace_max_lock);
430 454
431 tr->buffer = max_tr.buffer; 455 tr->buffer = max_tr.buffer;
432 max_tr.buffer = buf; 456 max_tr.buffer = buf;
433 457
434 ftrace_disable_cpu();
435 ring_buffer_reset(tr->buffer);
436 ftrace_enable_cpu();
437
438 __update_max_tr(tr, tsk, cpu); 458 __update_max_tr(tr, tsk, cpu);
439 __raw_spin_unlock(&ftrace_max_lock); 459 __raw_spin_unlock(&ftrace_max_lock);
440} 460}
@@ -452,21 +472,35 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
452{ 472{
453 int ret; 473 int ret;
454 474
475 if (trace_stop_count)
476 return;
477
455 WARN_ON_ONCE(!irqs_disabled()); 478 WARN_ON_ONCE(!irqs_disabled());
456 __raw_spin_lock(&ftrace_max_lock); 479 __raw_spin_lock(&ftrace_max_lock);
457 480
458 ftrace_disable_cpu(); 481 ftrace_disable_cpu();
459 482
460 ring_buffer_reset(max_tr.buffer);
461 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); 483 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
462 484
485 if (ret == -EBUSY) {
486 /*
487 * We failed to swap the buffer due to a commit taking
488 * place on this CPU. We fail to record, but we reset
489 * the max trace buffer (no one writes directly to it)
490 * and flag that it failed.
491 */
492 trace_array_printk(&max_tr, _THIS_IP_,
493 "Failed to swap buffers due to commit in progress\n");
494 }
495
463 ftrace_enable_cpu(); 496 ftrace_enable_cpu();
464 497
465 WARN_ON_ONCE(ret && ret != -EAGAIN); 498 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
466 499
467 __update_max_tr(tr, tsk, cpu); 500 __update_max_tr(tr, tsk, cpu);
468 __raw_spin_unlock(&ftrace_max_lock); 501 __raw_spin_unlock(&ftrace_max_lock);
469} 502}
503#endif /* CONFIG_TRACER_MAX_TRACE */
470 504
471/** 505/**
472 * register_tracer - register a tracer with the ftrace system. 506 * register_tracer - register a tracer with the ftrace system.
@@ -523,7 +557,6 @@ __acquires(kernel_lock)
523 if (type->selftest && !tracing_selftest_disabled) { 557 if (type->selftest && !tracing_selftest_disabled) {
524 struct tracer *saved_tracer = current_trace; 558 struct tracer *saved_tracer = current_trace;
525 struct trace_array *tr = &global_trace; 559 struct trace_array *tr = &global_trace;
526 int i;
527 560
528 /* 561 /*
529 * Run a selftest on this tracer. 562 * Run a selftest on this tracer.
@@ -532,8 +565,7 @@ __acquires(kernel_lock)
532 * internal tracing to verify that everything is in order. 565 * internal tracing to verify that everything is in order.
533 * If we fail, we do not register this tracer. 566 * If we fail, we do not register this tracer.
534 */ 567 */
535 for_each_tracing_cpu(i) 568 tracing_reset_online_cpus(tr);
536 tracing_reset(tr, i);
537 569
538 current_trace = type; 570 current_trace = type;
539 /* the test is responsible for initializing and enabling */ 571 /* the test is responsible for initializing and enabling */
@@ -546,8 +578,7 @@ __acquires(kernel_lock)
546 goto out; 578 goto out;
547 } 579 }
548 /* Only reset on passing, to avoid touching corrupted buffers */ 580 /* Only reset on passing, to avoid touching corrupted buffers */
549 for_each_tracing_cpu(i) 581 tracing_reset_online_cpus(tr);
550 tracing_reset(tr, i);
551 582
552 printk(KERN_CONT "PASSED\n"); 583 printk(KERN_CONT "PASSED\n");
553 } 584 }
@@ -622,21 +653,42 @@ void unregister_tracer(struct tracer *type)
622 mutex_unlock(&trace_types_lock); 653 mutex_unlock(&trace_types_lock);
623} 654}
624 655
625void tracing_reset(struct trace_array *tr, int cpu) 656static void __tracing_reset(struct trace_array *tr, int cpu)
626{ 657{
627 ftrace_disable_cpu(); 658 ftrace_disable_cpu();
628 ring_buffer_reset_cpu(tr->buffer, cpu); 659 ring_buffer_reset_cpu(tr->buffer, cpu);
629 ftrace_enable_cpu(); 660 ftrace_enable_cpu();
630} 661}
631 662
663void tracing_reset(struct trace_array *tr, int cpu)
664{
665 struct ring_buffer *buffer = tr->buffer;
666
667 ring_buffer_record_disable(buffer);
668
669 /* Make sure all commits have finished */
670 synchronize_sched();
671 __tracing_reset(tr, cpu);
672
673 ring_buffer_record_enable(buffer);
674}
675
632void tracing_reset_online_cpus(struct trace_array *tr) 676void tracing_reset_online_cpus(struct trace_array *tr)
633{ 677{
678 struct ring_buffer *buffer = tr->buffer;
634 int cpu; 679 int cpu;
635 680
681 ring_buffer_record_disable(buffer);
682
683 /* Make sure all commits have finished */
684 synchronize_sched();
685
636 tr->time_start = ftrace_now(tr->cpu); 686 tr->time_start = ftrace_now(tr->cpu);
637 687
638 for_each_online_cpu(cpu) 688 for_each_online_cpu(cpu)
639 tracing_reset(tr, cpu); 689 __tracing_reset(tr, cpu);
690
691 ring_buffer_record_enable(buffer);
640} 692}
641 693
642void tracing_reset_current(int cpu) 694void tracing_reset_current(int cpu)
@@ -667,9 +719,6 @@ static void trace_init_cmdlines(void)
667 cmdline_idx = 0; 719 cmdline_idx = 0;
668} 720}
669 721
670static int trace_stop_count;
671static DEFINE_SPINLOCK(tracing_start_lock);
672
673/** 722/**
674 * ftrace_off_permanent - disable all ftrace code permanently 723 * ftrace_off_permanent - disable all ftrace code permanently
675 * 724 *
@@ -850,14 +899,15 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
850} 899}
851EXPORT_SYMBOL_GPL(tracing_generic_entry_update); 900EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
852 901
853struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 902struct ring_buffer_event *
854 int type, 903trace_buffer_lock_reserve(struct ring_buffer *buffer,
855 unsigned long len, 904 int type,
856 unsigned long flags, int pc) 905 unsigned long len,
906 unsigned long flags, int pc)
857{ 907{
858 struct ring_buffer_event *event; 908 struct ring_buffer_event *event;
859 909
860 event = ring_buffer_lock_reserve(tr->buffer, len); 910 event = ring_buffer_lock_reserve(buffer, len);
861 if (event != NULL) { 911 if (event != NULL) {
862 struct trace_entry *ent = ring_buffer_event_data(event); 912 struct trace_entry *ent = ring_buffer_event_data(event);
863 913
@@ -867,58 +917,60 @@ struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
867 917
868 return event; 918 return event;
869} 919}
870static void ftrace_trace_stack(struct trace_array *tr,
871 unsigned long flags, int skip, int pc);
872static void ftrace_trace_userstack(struct trace_array *tr,
873 unsigned long flags, int pc);
874 920
875static inline void __trace_buffer_unlock_commit(struct trace_array *tr, 921static inline void
876 struct ring_buffer_event *event, 922__trace_buffer_unlock_commit(struct ring_buffer *buffer,
877 unsigned long flags, int pc, 923 struct ring_buffer_event *event,
878 int wake) 924 unsigned long flags, int pc,
925 int wake)
879{ 926{
880 ring_buffer_unlock_commit(tr->buffer, event); 927 ring_buffer_unlock_commit(buffer, event);
881 928
882 ftrace_trace_stack(tr, flags, 6, pc); 929 ftrace_trace_stack(buffer, flags, 6, pc);
883 ftrace_trace_userstack(tr, flags, pc); 930 ftrace_trace_userstack(buffer, flags, pc);
884 931
885 if (wake) 932 if (wake)
886 trace_wake_up(); 933 trace_wake_up();
887} 934}
888 935
889void trace_buffer_unlock_commit(struct trace_array *tr, 936void trace_buffer_unlock_commit(struct ring_buffer *buffer,
890 struct ring_buffer_event *event, 937 struct ring_buffer_event *event,
891 unsigned long flags, int pc) 938 unsigned long flags, int pc)
892{ 939{
893 __trace_buffer_unlock_commit(tr, event, flags, pc, 1); 940 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
894} 941}
895 942
896struct ring_buffer_event * 943struct ring_buffer_event *
897trace_current_buffer_lock_reserve(int type, unsigned long len, 944trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
945 int type, unsigned long len,
898 unsigned long flags, int pc) 946 unsigned long flags, int pc)
899{ 947{
900 return trace_buffer_lock_reserve(&global_trace, 948 *current_rb = global_trace.buffer;
949 return trace_buffer_lock_reserve(*current_rb,
901 type, len, flags, pc); 950 type, len, flags, pc);
902} 951}
903EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve); 952EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
904 953
905void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 954void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
955 struct ring_buffer_event *event,
906 unsigned long flags, int pc) 956 unsigned long flags, int pc)
907{ 957{
908 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1); 958 __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
909} 959}
910EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 960EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
911 961
912void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 962void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
913 unsigned long flags, int pc) 963 struct ring_buffer_event *event,
964 unsigned long flags, int pc)
914{ 965{
915 __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0); 966 __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
916} 967}
917EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 968EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
918 969
919void trace_current_buffer_discard_commit(struct ring_buffer_event *event) 970void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
971 struct ring_buffer_event *event)
920{ 972{
921 ring_buffer_discard_commit(global_trace.buffer, event); 973 ring_buffer_discard_commit(buffer, event);
922} 974}
923EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit); 975EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
924 976
@@ -928,6 +980,7 @@ trace_function(struct trace_array *tr,
928 int pc) 980 int pc)
929{ 981{
930 struct ftrace_event_call *call = &event_function; 982 struct ftrace_event_call *call = &event_function;
983 struct ring_buffer *buffer = tr->buffer;
931 struct ring_buffer_event *event; 984 struct ring_buffer_event *event;
932 struct ftrace_entry *entry; 985 struct ftrace_entry *entry;
933 986
@@ -935,7 +988,7 @@ trace_function(struct trace_array *tr,
935 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) 988 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
936 return; 989 return;
937 990
938 event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), 991 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
939 flags, pc); 992 flags, pc);
940 if (!event) 993 if (!event)
941 return; 994 return;
@@ -943,58 +996,10 @@ trace_function(struct trace_array *tr,
943 entry->ip = ip; 996 entry->ip = ip;
944 entry->parent_ip = parent_ip; 997 entry->parent_ip = parent_ip;
945 998
946 if (!filter_check_discard(call, entry, tr->buffer, event)) 999 if (!filter_check_discard(call, entry, buffer, event))
947 ring_buffer_unlock_commit(tr->buffer, event); 1000 ring_buffer_unlock_commit(buffer, event);
948}
949
950#ifdef CONFIG_FUNCTION_GRAPH_TRACER
951static int __trace_graph_entry(struct trace_array *tr,
952 struct ftrace_graph_ent *trace,
953 unsigned long flags,
954 int pc)
955{
956 struct ftrace_event_call *call = &event_funcgraph_entry;
957 struct ring_buffer_event *event;
958 struct ftrace_graph_ent_entry *entry;
959
960 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
961 return 0;
962
963 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
964 sizeof(*entry), flags, pc);
965 if (!event)
966 return 0;
967 entry = ring_buffer_event_data(event);
968 entry->graph_ent = *trace;
969 if (!filter_current_check_discard(call, entry, event))
970 ring_buffer_unlock_commit(global_trace.buffer, event);
971
972 return 1;
973} 1001}
974 1002
975static void __trace_graph_return(struct trace_array *tr,
976 struct ftrace_graph_ret *trace,
977 unsigned long flags,
978 int pc)
979{
980 struct ftrace_event_call *call = &event_funcgraph_exit;
981 struct ring_buffer_event *event;
982 struct ftrace_graph_ret_entry *entry;
983
984 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
985 return;
986
987 event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET,
988 sizeof(*entry), flags, pc);
989 if (!event)
990 return;
991 entry = ring_buffer_event_data(event);
992 entry->ret = *trace;
993 if (!filter_current_check_discard(call, entry, event))
994 ring_buffer_unlock_commit(global_trace.buffer, event);
995}
996#endif
997
998void 1003void
999ftrace(struct trace_array *tr, struct trace_array_cpu *data, 1004ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1000 unsigned long ip, unsigned long parent_ip, unsigned long flags, 1005 unsigned long ip, unsigned long parent_ip, unsigned long flags,
@@ -1004,17 +1009,17 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1004 trace_function(tr, ip, parent_ip, flags, pc); 1009 trace_function(tr, ip, parent_ip, flags, pc);
1005} 1010}
1006 1011
1007static void __ftrace_trace_stack(struct trace_array *tr, 1012#ifdef CONFIG_STACKTRACE
1013static void __ftrace_trace_stack(struct ring_buffer *buffer,
1008 unsigned long flags, 1014 unsigned long flags,
1009 int skip, int pc) 1015 int skip, int pc)
1010{ 1016{
1011#ifdef CONFIG_STACKTRACE
1012 struct ftrace_event_call *call = &event_kernel_stack; 1017 struct ftrace_event_call *call = &event_kernel_stack;
1013 struct ring_buffer_event *event; 1018 struct ring_buffer_event *event;
1014 struct stack_entry *entry; 1019 struct stack_entry *entry;
1015 struct stack_trace trace; 1020 struct stack_trace trace;
1016 1021
1017 event = trace_buffer_lock_reserve(tr, TRACE_STACK, 1022 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1018 sizeof(*entry), flags, pc); 1023 sizeof(*entry), flags, pc);
1019 if (!event) 1024 if (!event)
1020 return; 1025 return;
@@ -1027,32 +1032,28 @@ static void __ftrace_trace_stack(struct trace_array *tr,
1027 trace.entries = entry->caller; 1032 trace.entries = entry->caller;
1028 1033
1029 save_stack_trace(&trace); 1034 save_stack_trace(&trace);
1030 if (!filter_check_discard(call, entry, tr->buffer, event)) 1035 if (!filter_check_discard(call, entry, buffer, event))
1031 ring_buffer_unlock_commit(tr->buffer, event); 1036 ring_buffer_unlock_commit(buffer, event);
1032#endif
1033} 1037}
1034 1038
1035static void ftrace_trace_stack(struct trace_array *tr, 1039void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1036 unsigned long flags, 1040 int skip, int pc)
1037 int skip, int pc)
1038{ 1041{
1039 if (!(trace_flags & TRACE_ITER_STACKTRACE)) 1042 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1040 return; 1043 return;
1041 1044
1042 __ftrace_trace_stack(tr, flags, skip, pc); 1045 __ftrace_trace_stack(buffer, flags, skip, pc);
1043} 1046}
1044 1047
1045void __trace_stack(struct trace_array *tr, 1048void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1046 unsigned long flags, 1049 int pc)
1047 int skip, int pc)
1048{ 1050{
1049 __ftrace_trace_stack(tr, flags, skip, pc); 1051 __ftrace_trace_stack(tr->buffer, flags, skip, pc);
1050} 1052}
1051 1053
1052static void ftrace_trace_userstack(struct trace_array *tr, 1054void
1053 unsigned long flags, int pc) 1055ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1054{ 1056{
1055#ifdef CONFIG_STACKTRACE
1056 struct ftrace_event_call *call = &event_user_stack; 1057 struct ftrace_event_call *call = &event_user_stack;
1057 struct ring_buffer_event *event; 1058 struct ring_buffer_event *event;
1058 struct userstack_entry *entry; 1059 struct userstack_entry *entry;
@@ -1061,7 +1062,7 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1061 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) 1062 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1062 return; 1063 return;
1063 1064
1064 event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, 1065 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1065 sizeof(*entry), flags, pc); 1066 sizeof(*entry), flags, pc);
1066 if (!event) 1067 if (!event)
1067 return; 1068 return;
@@ -1075,9 +1076,8 @@ static void ftrace_trace_userstack(struct trace_array *tr,
1075 trace.entries = entry->caller; 1076 trace.entries = entry->caller;
1076 1077
1077 save_stack_trace_user(&trace); 1078 save_stack_trace_user(&trace);
1078 if (!filter_check_discard(call, entry, tr->buffer, event)) 1079 if (!filter_check_discard(call, entry, buffer, event))
1079 ring_buffer_unlock_commit(tr->buffer, event); 1080 ring_buffer_unlock_commit(buffer, event);
1080#endif
1081} 1081}
1082 1082
1083#ifdef UNUSED 1083#ifdef UNUSED
@@ -1087,6 +1087,8 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1087} 1087}
1088#endif /* UNUSED */ 1088#endif /* UNUSED */
1089 1089
1090#endif /* CONFIG_STACKTRACE */
1091
1090static void 1092static void
1091ftrace_trace_special(void *__tr, 1093ftrace_trace_special(void *__tr,
1092 unsigned long arg1, unsigned long arg2, unsigned long arg3, 1094 unsigned long arg1, unsigned long arg2, unsigned long arg3,
@@ -1094,9 +1096,10 @@ ftrace_trace_special(void *__tr,
1094{ 1096{
1095 struct ring_buffer_event *event; 1097 struct ring_buffer_event *event;
1096 struct trace_array *tr = __tr; 1098 struct trace_array *tr = __tr;
1099 struct ring_buffer *buffer = tr->buffer;
1097 struct special_entry *entry; 1100 struct special_entry *entry;
1098 1101
1099 event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, 1102 event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
1100 sizeof(*entry), 0, pc); 1103 sizeof(*entry), 0, pc);
1101 if (!event) 1104 if (!event)
1102 return; 1105 return;
@@ -1104,7 +1107,7 @@ ftrace_trace_special(void *__tr,
1104 entry->arg1 = arg1; 1107 entry->arg1 = arg1;
1105 entry->arg2 = arg2; 1108 entry->arg2 = arg2;
1106 entry->arg3 = arg3; 1109 entry->arg3 = arg3;
1107 trace_buffer_unlock_commit(tr, event, 0, pc); 1110 trace_buffer_unlock_commit(buffer, event, 0, pc);
1108} 1111}
1109 1112
1110void 1113void
@@ -1115,62 +1118,6 @@ __trace_special(void *__tr, void *__data,
1115} 1118}
1116 1119
1117void 1120void
1118tracing_sched_switch_trace(struct trace_array *tr,
1119 struct task_struct *prev,
1120 struct task_struct *next,
1121 unsigned long flags, int pc)
1122{
1123 struct ftrace_event_call *call = &event_context_switch;
1124 struct ring_buffer_event *event;
1125 struct ctx_switch_entry *entry;
1126
1127 event = trace_buffer_lock_reserve(tr, TRACE_CTX,
1128 sizeof(*entry), flags, pc);
1129 if (!event)
1130 return;
1131 entry = ring_buffer_event_data(event);
1132 entry->prev_pid = prev->pid;
1133 entry->prev_prio = prev->prio;
1134 entry->prev_state = prev->state;
1135 entry->next_pid = next->pid;
1136 entry->next_prio = next->prio;
1137 entry->next_state = next->state;
1138 entry->next_cpu = task_cpu(next);
1139
1140 if (!filter_check_discard(call, entry, tr->buffer, event))
1141 trace_buffer_unlock_commit(tr, event, flags, pc);
1142}
1143
1144void
1145tracing_sched_wakeup_trace(struct trace_array *tr,
1146 struct task_struct *wakee,
1147 struct task_struct *curr,
1148 unsigned long flags, int pc)
1149{
1150 struct ftrace_event_call *call = &event_wakeup;
1151 struct ring_buffer_event *event;
1152 struct ctx_switch_entry *entry;
1153
1154 event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
1155 sizeof(*entry), flags, pc);
1156 if (!event)
1157 return;
1158 entry = ring_buffer_event_data(event);
1159 entry->prev_pid = curr->pid;
1160 entry->prev_prio = curr->prio;
1161 entry->prev_state = curr->state;
1162 entry->next_pid = wakee->pid;
1163 entry->next_prio = wakee->prio;
1164 entry->next_state = wakee->state;
1165 entry->next_cpu = task_cpu(wakee);
1166
1167 if (!filter_check_discard(call, entry, tr->buffer, event))
1168 ring_buffer_unlock_commit(tr->buffer, event);
1169 ftrace_trace_stack(tr, flags, 6, pc);
1170 ftrace_trace_userstack(tr, flags, pc);
1171}
1172
1173void
1174ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) 1121ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1175{ 1122{
1176 struct trace_array *tr = &global_trace; 1123 struct trace_array *tr = &global_trace;
@@ -1194,68 +1141,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1194 local_irq_restore(flags); 1141 local_irq_restore(flags);
1195} 1142}
1196 1143
1197#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1198int trace_graph_entry(struct ftrace_graph_ent *trace)
1199{
1200 struct trace_array *tr = &global_trace;
1201 struct trace_array_cpu *data;
1202 unsigned long flags;
1203 long disabled;
1204 int ret;
1205 int cpu;
1206 int pc;
1207
1208 if (!ftrace_trace_task(current))
1209 return 0;
1210
1211 if (!ftrace_graph_addr(trace->func))
1212 return 0;
1213
1214 local_irq_save(flags);
1215 cpu = raw_smp_processor_id();
1216 data = tr->data[cpu];
1217 disabled = atomic_inc_return(&data->disabled);
1218 if (likely(disabled == 1)) {
1219 pc = preempt_count();
1220 ret = __trace_graph_entry(tr, trace, flags, pc);
1221 } else {
1222 ret = 0;
1223 }
1224 /* Only do the atomic if it is not already set */
1225 if (!test_tsk_trace_graph(current))
1226 set_tsk_trace_graph(current);
1227
1228 atomic_dec(&data->disabled);
1229 local_irq_restore(flags);
1230
1231 return ret;
1232}
1233
1234void trace_graph_return(struct ftrace_graph_ret *trace)
1235{
1236 struct trace_array *tr = &global_trace;
1237 struct trace_array_cpu *data;
1238 unsigned long flags;
1239 long disabled;
1240 int cpu;
1241 int pc;
1242
1243 local_irq_save(flags);
1244 cpu = raw_smp_processor_id();
1245 data = tr->data[cpu];
1246 disabled = atomic_inc_return(&data->disabled);
1247 if (likely(disabled == 1)) {
1248 pc = preempt_count();
1249 __trace_graph_return(tr, trace, flags, pc);
1250 }
1251 if (!trace->depth)
1252 clear_tsk_trace_graph(current);
1253 atomic_dec(&data->disabled);
1254 local_irq_restore(flags);
1255}
1256#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1257
1258
1259/** 1144/**
1260 * trace_vbprintk - write binary msg to tracing buffer 1145 * trace_vbprintk - write binary msg to tracing buffer
1261 * 1146 *
@@ -1268,6 +1153,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1268 1153
1269 struct ftrace_event_call *call = &event_bprint; 1154 struct ftrace_event_call *call = &event_bprint;
1270 struct ring_buffer_event *event; 1155 struct ring_buffer_event *event;
1156 struct ring_buffer *buffer;
1271 struct trace_array *tr = &global_trace; 1157 struct trace_array *tr = &global_trace;
1272 struct trace_array_cpu *data; 1158 struct trace_array_cpu *data;
1273 struct bprint_entry *entry; 1159 struct bprint_entry *entry;
@@ -1300,7 +1186,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1300 goto out_unlock; 1186 goto out_unlock;
1301 1187
1302 size = sizeof(*entry) + sizeof(u32) * len; 1188 size = sizeof(*entry) + sizeof(u32) * len;
1303 event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc); 1189 buffer = tr->buffer;
1190 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1191 flags, pc);
1304 if (!event) 1192 if (!event)
1305 goto out_unlock; 1193 goto out_unlock;
1306 entry = ring_buffer_event_data(event); 1194 entry = ring_buffer_event_data(event);
@@ -1308,8 +1196,8 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1308 entry->fmt = fmt; 1196 entry->fmt = fmt;
1309 1197
1310 memcpy(entry->buf, trace_buf, sizeof(u32) * len); 1198 memcpy(entry->buf, trace_buf, sizeof(u32) * len);
1311 if (!filter_check_discard(call, entry, tr->buffer, event)) 1199 if (!filter_check_discard(call, entry, buffer, event))
1312 ring_buffer_unlock_commit(tr->buffer, event); 1200 ring_buffer_unlock_commit(buffer, event);
1313 1201
1314out_unlock: 1202out_unlock:
1315 __raw_spin_unlock(&trace_buf_lock); 1203 __raw_spin_unlock(&trace_buf_lock);
@@ -1324,14 +1212,30 @@ out:
1324} 1212}
1325EXPORT_SYMBOL_GPL(trace_vbprintk); 1213EXPORT_SYMBOL_GPL(trace_vbprintk);
1326 1214
1327int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1215int trace_array_printk(struct trace_array *tr,
1216 unsigned long ip, const char *fmt, ...)
1217{
1218 int ret;
1219 va_list ap;
1220
1221 if (!(trace_flags & TRACE_ITER_PRINTK))
1222 return 0;
1223
1224 va_start(ap, fmt);
1225 ret = trace_array_vprintk(tr, ip, fmt, ap);
1226 va_end(ap);
1227 return ret;
1228}
1229
1230int trace_array_vprintk(struct trace_array *tr,
1231 unsigned long ip, const char *fmt, va_list args)
1328{ 1232{
1329 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1233 static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
1330 static char trace_buf[TRACE_BUF_SIZE]; 1234 static char trace_buf[TRACE_BUF_SIZE];
1331 1235
1332 struct ftrace_event_call *call = &event_print; 1236 struct ftrace_event_call *call = &event_print;
1333 struct ring_buffer_event *event; 1237 struct ring_buffer_event *event;
1334 struct trace_array *tr = &global_trace; 1238 struct ring_buffer *buffer;
1335 struct trace_array_cpu *data; 1239 struct trace_array_cpu *data;
1336 int cpu, len = 0, size, pc; 1240 int cpu, len = 0, size, pc;
1337 struct print_entry *entry; 1241 struct print_entry *entry;
@@ -1359,7 +1263,9 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1359 trace_buf[len] = 0; 1263 trace_buf[len] = 0;
1360 1264
1361 size = sizeof(*entry) + len + 1; 1265 size = sizeof(*entry) + len + 1;
1362 event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); 1266 buffer = tr->buffer;
1267 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1268 irq_flags, pc);
1363 if (!event) 1269 if (!event)
1364 goto out_unlock; 1270 goto out_unlock;
1365 entry = ring_buffer_event_data(event); 1271 entry = ring_buffer_event_data(event);
@@ -1367,8 +1273,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1367 1273
1368 memcpy(&entry->buf, trace_buf, len); 1274 memcpy(&entry->buf, trace_buf, len);
1369 entry->buf[len] = 0; 1275 entry->buf[len] = 0;
1370 if (!filter_check_discard(call, entry, tr->buffer, event)) 1276 if (!filter_check_discard(call, entry, buffer, event))
1371 ring_buffer_unlock_commit(tr->buffer, event); 1277 ring_buffer_unlock_commit(buffer, event);
1372 1278
1373 out_unlock: 1279 out_unlock:
1374 __raw_spin_unlock(&trace_buf_lock); 1280 __raw_spin_unlock(&trace_buf_lock);
@@ -1380,6 +1286,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1380 1286
1381 return len; 1287 return len;
1382} 1288}
1289
1290int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1291{
1292 return trace_array_printk(&global_trace, ip, fmt, args);
1293}
1383EXPORT_SYMBOL_GPL(trace_vprintk); 1294EXPORT_SYMBOL_GPL(trace_vprintk);
1384 1295
1385enum trace_file_type { 1296enum trace_file_type {
@@ -1519,6 +1430,37 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1519 return ent; 1430 return ent;
1520} 1431}
1521 1432
1433static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1434{
1435 struct trace_array *tr = iter->tr;
1436 struct ring_buffer_event *event;
1437 struct ring_buffer_iter *buf_iter;
1438 unsigned long entries = 0;
1439 u64 ts;
1440
1441 tr->data[cpu]->skipped_entries = 0;
1442
1443 if (!iter->buffer_iter[cpu])
1444 return;
1445
1446 buf_iter = iter->buffer_iter[cpu];
1447 ring_buffer_iter_reset(buf_iter);
1448
1449 /*
1450 * We could have the case with the max latency tracers
1451 * that a reset never took place on a cpu. This is evident
1452 * by the timestamp being before the start of the buffer.
1453 */
1454 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1455 if (ts >= iter->tr->time_start)
1456 break;
1457 entries++;
1458 ring_buffer_read(buf_iter, NULL);
1459 }
1460
1461 tr->data[cpu]->skipped_entries = entries;
1462}
1463
1522/* 1464/*
1523 * No necessary locking here. The worst thing which can 1465 * No necessary locking here. The worst thing which can
1524 * happen is loosing events consumed at the same time 1466 * happen is loosing events consumed at the same time
@@ -1557,10 +1499,9 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1557 1499
1558 if (cpu_file == TRACE_PIPE_ALL_CPU) { 1500 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1559 for_each_tracing_cpu(cpu) 1501 for_each_tracing_cpu(cpu)
1560 ring_buffer_iter_reset(iter->buffer_iter[cpu]); 1502 tracing_iter_reset(iter, cpu);
1561 } else 1503 } else
1562 ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); 1504 tracing_iter_reset(iter, cpu_file);
1563
1564 1505
1565 ftrace_enable_cpu(); 1506 ftrace_enable_cpu();
1566 1507
@@ -1609,16 +1550,32 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1609 struct trace_array *tr = iter->tr; 1550 struct trace_array *tr = iter->tr;
1610 struct trace_array_cpu *data = tr->data[tr->cpu]; 1551 struct trace_array_cpu *data = tr->data[tr->cpu];
1611 struct tracer *type = current_trace; 1552 struct tracer *type = current_trace;
1612 unsigned long total; 1553 unsigned long entries = 0;
1613 unsigned long entries; 1554 unsigned long total = 0;
1555 unsigned long count;
1614 const char *name = "preemption"; 1556 const char *name = "preemption";
1557 int cpu;
1615 1558
1616 if (type) 1559 if (type)
1617 name = type->name; 1560 name = type->name;
1618 1561
1619 entries = ring_buffer_entries(iter->tr->buffer); 1562
1620 total = entries + 1563 for_each_tracing_cpu(cpu) {
1621 ring_buffer_overruns(iter->tr->buffer); 1564 count = ring_buffer_entries_cpu(tr->buffer, cpu);
1565 /*
1566 * If this buffer has skipped entries, then we hold all
1567 * entries for the trace and we need to ignore the
1568 * ones before the time stamp.
1569 */
1570 if (tr->data[cpu]->skipped_entries) {
1571 count -= tr->data[cpu]->skipped_entries;
1572 /* total is the same as the entries */
1573 total += count;
1574 } else
1575 total += count +
1576 ring_buffer_overrun_cpu(tr->buffer, cpu);
1577 entries += count;
1578 }
1622 1579
1623 seq_printf(m, "# %s latency trace v1.1.5 on %s\n", 1580 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
1624 name, UTS_RELEASE); 1581 name, UTS_RELEASE);
@@ -1660,7 +1617,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1660 seq_puts(m, "\n# => ended at: "); 1617 seq_puts(m, "\n# => ended at: ");
1661 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); 1618 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1662 trace_print_seq(m, &iter->seq); 1619 trace_print_seq(m, &iter->seq);
1663 seq_puts(m, "#\n"); 1620 seq_puts(m, "\n#\n");
1664 } 1621 }
1665 1622
1666 seq_puts(m, "#\n"); 1623 seq_puts(m, "#\n");
@@ -1679,6 +1636,9 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1679 if (cpumask_test_cpu(iter->cpu, iter->started)) 1636 if (cpumask_test_cpu(iter->cpu, iter->started))
1680 return; 1637 return;
1681 1638
1639 if (iter->tr->data[iter->cpu]->skipped_entries)
1640 return;
1641
1682 cpumask_set_cpu(iter->cpu, iter->started); 1642 cpumask_set_cpu(iter->cpu, iter->started);
1683 1643
1684 /* Don't print started cpu buffer for the first entry of the trace */ 1644 /* Don't print started cpu buffer for the first entry of the trace */
@@ -1941,19 +1901,23 @@ __tracing_open(struct inode *inode, struct file *file)
1941 if (ring_buffer_overruns(iter->tr->buffer)) 1901 if (ring_buffer_overruns(iter->tr->buffer))
1942 iter->iter_flags |= TRACE_FILE_ANNOTATE; 1902 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1943 1903
1904 /* stop the trace while dumping */
1905 tracing_stop();
1906
1944 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 1907 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
1945 for_each_tracing_cpu(cpu) { 1908 for_each_tracing_cpu(cpu) {
1946 1909
1947 iter->buffer_iter[cpu] = 1910 iter->buffer_iter[cpu] =
1948 ring_buffer_read_start(iter->tr->buffer, cpu); 1911 ring_buffer_read_start(iter->tr->buffer, cpu);
1912 tracing_iter_reset(iter, cpu);
1949 } 1913 }
1950 } else { 1914 } else {
1951 cpu = iter->cpu_file; 1915 cpu = iter->cpu_file;
1952 iter->buffer_iter[cpu] = 1916 iter->buffer_iter[cpu] =
1953 ring_buffer_read_start(iter->tr->buffer, cpu); 1917 ring_buffer_read_start(iter->tr->buffer, cpu);
1918 tracing_iter_reset(iter, cpu);
1954 } 1919 }
1955 1920
1956 /* TODO stop tracer */
1957 ret = seq_open(file, &tracer_seq_ops); 1921 ret = seq_open(file, &tracer_seq_ops);
1958 if (ret < 0) { 1922 if (ret < 0) {
1959 fail_ret = ERR_PTR(ret); 1923 fail_ret = ERR_PTR(ret);
@@ -1963,9 +1927,6 @@ __tracing_open(struct inode *inode, struct file *file)
1963 m = file->private_data; 1927 m = file->private_data;
1964 m->private = iter; 1928 m->private = iter;
1965 1929
1966 /* stop the trace while dumping */
1967 tracing_stop();
1968
1969 mutex_unlock(&trace_types_lock); 1930 mutex_unlock(&trace_types_lock);
1970 1931
1971 return iter; 1932 return iter;
@@ -1976,6 +1937,7 @@ __tracing_open(struct inode *inode, struct file *file)
1976 ring_buffer_read_finish(iter->buffer_iter[cpu]); 1937 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1977 } 1938 }
1978 free_cpumask_var(iter->started); 1939 free_cpumask_var(iter->started);
1940 tracing_start();
1979 fail: 1941 fail:
1980 mutex_unlock(&trace_types_lock); 1942 mutex_unlock(&trace_types_lock);
1981 kfree(iter->trace); 1943 kfree(iter->trace);
@@ -2257,8 +2219,8 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2257 len += 3; /* "no" and newline */ 2219 len += 3; /* "no" and newline */
2258 } 2220 }
2259 2221
2260 /* +2 for \n and \0 */ 2222 /* +1 for \0 */
2261 buf = kmalloc(len + 2, GFP_KERNEL); 2223 buf = kmalloc(len + 1, GFP_KERNEL);
2262 if (!buf) { 2224 if (!buf) {
2263 mutex_unlock(&trace_types_lock); 2225 mutex_unlock(&trace_types_lock);
2264 return -ENOMEM; 2226 return -ENOMEM;
@@ -2281,7 +2243,7 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2281 } 2243 }
2282 mutex_unlock(&trace_types_lock); 2244 mutex_unlock(&trace_types_lock);
2283 2245
2284 WARN_ON(r >= len + 2); 2246 WARN_ON(r >= len + 1);
2285 2247
2286 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2248 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2287 2249
@@ -2292,23 +2254,23 @@ tracing_trace_options_read(struct file *filp, char __user *ubuf,
2292/* Try to assign a tracer specific option */ 2254/* Try to assign a tracer specific option */
2293static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2255static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2294{ 2256{
2295 struct tracer_flags *trace_flags = trace->flags; 2257 struct tracer_flags *tracer_flags = trace->flags;
2296 struct tracer_opt *opts = NULL; 2258 struct tracer_opt *opts = NULL;
2297 int ret = 0, i = 0; 2259 int ret = 0, i = 0;
2298 int len; 2260 int len;
2299 2261
2300 for (i = 0; trace_flags->opts[i].name; i++) { 2262 for (i = 0; tracer_flags->opts[i].name; i++) {
2301 opts = &trace_flags->opts[i]; 2263 opts = &tracer_flags->opts[i];
2302 len = strlen(opts->name); 2264 len = strlen(opts->name);
2303 2265
2304 if (strncmp(cmp, opts->name, len) == 0) { 2266 if (strncmp(cmp, opts->name, len) == 0) {
2305 ret = trace->set_flag(trace_flags->val, 2267 ret = trace->set_flag(tracer_flags->val,
2306 opts->bit, !neg); 2268 opts->bit, !neg);
2307 break; 2269 break;
2308 } 2270 }
2309 } 2271 }
2310 /* Not found */ 2272 /* Not found */
2311 if (!trace_flags->opts[i].name) 2273 if (!tracer_flags->opts[i].name)
2312 return -EINVAL; 2274 return -EINVAL;
2313 2275
2314 /* Refused to handle */ 2276 /* Refused to handle */
@@ -2316,9 +2278,9 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2316 return ret; 2278 return ret;
2317 2279
2318 if (neg) 2280 if (neg)
2319 trace_flags->val &= ~opts->bit; 2281 tracer_flags->val &= ~opts->bit;
2320 else 2282 else
2321 trace_flags->val |= opts->bit; 2283 tracer_flags->val |= opts->bit;
2322 2284
2323 return 0; 2285 return 0;
2324} 2286}
@@ -2333,22 +2295,6 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2333 trace_flags |= mask; 2295 trace_flags |= mask;
2334 else 2296 else
2335 trace_flags &= ~mask; 2297 trace_flags &= ~mask;
2336
2337 if (mask == TRACE_ITER_GLOBAL_CLK) {
2338 u64 (*func)(void);
2339
2340 if (enabled)
2341 func = trace_clock_global;
2342 else
2343 func = trace_clock_local;
2344
2345 mutex_lock(&trace_types_lock);
2346 ring_buffer_set_clock(global_trace.buffer, func);
2347
2348 if (max_tr.buffer)
2349 ring_buffer_set_clock(max_tr.buffer, func);
2350 mutex_unlock(&trace_types_lock);
2351 }
2352} 2298}
2353 2299
2354static ssize_t 2300static ssize_t
@@ -3316,6 +3262,62 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3316 return cnt; 3262 return cnt;
3317} 3263}
3318 3264
3265static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
3266 size_t cnt, loff_t *ppos)
3267{
3268 char buf[64];
3269 int bufiter = 0;
3270 int i;
3271
3272 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3273 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
3274 "%s%s%s%s", i ? " " : "",
3275 i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3276 i == trace_clock_id ? "]" : "");
3277 bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
3278
3279 return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
3280}
3281
3282static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3283 size_t cnt, loff_t *fpos)
3284{
3285 char buf[64];
3286 const char *clockstr;
3287 int i;
3288
3289 if (cnt >= sizeof(buf))
3290 return -EINVAL;
3291
3292 if (copy_from_user(&buf, ubuf, cnt))
3293 return -EFAULT;
3294
3295 buf[cnt] = 0;
3296
3297 clockstr = strstrip(buf);
3298
3299 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
3300 if (strcmp(trace_clocks[i].name, clockstr) == 0)
3301 break;
3302 }
3303 if (i == ARRAY_SIZE(trace_clocks))
3304 return -EINVAL;
3305
3306 trace_clock_id = i;
3307
3308 mutex_lock(&trace_types_lock);
3309
3310 ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
3311 if (max_tr.buffer)
3312 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
3313
3314 mutex_unlock(&trace_types_lock);
3315
3316 *fpos += cnt;
3317
3318 return cnt;
3319}
3320
3319static const struct file_operations tracing_max_lat_fops = { 3321static const struct file_operations tracing_max_lat_fops = {
3320 .open = tracing_open_generic, 3322 .open = tracing_open_generic,
3321 .read = tracing_max_lat_read, 3323 .read = tracing_max_lat_read,
@@ -3353,6 +3355,12 @@ static const struct file_operations tracing_mark_fops = {
3353 .write = tracing_mark_write, 3355 .write = tracing_mark_write,
3354}; 3356};
3355 3357
3358static const struct file_operations trace_clock_fops = {
3359 .open = tracing_open_generic,
3360 .read = tracing_clock_read,
3361 .write = tracing_clock_write,
3362};
3363
3356struct ftrace_buffer_info { 3364struct ftrace_buffer_info {
3357 struct trace_array *tr; 3365 struct trace_array *tr;
3358 void *spare; 3366 void *spare;
@@ -3633,9 +3641,6 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3633 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu); 3641 cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
3634 trace_seq_printf(s, "commit overrun: %ld\n", cnt); 3642 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
3635 3643
3636 cnt = ring_buffer_nmi_dropped_cpu(tr->buffer, cpu);
3637 trace_seq_printf(s, "nmi dropped: %ld\n", cnt);
3638
3639 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 3644 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
3640 3645
3641 kfree(s); 3646 kfree(s);
@@ -4066,11 +4071,13 @@ static __init int tracer_init_debugfs(void)
4066 trace_create_file("current_tracer", 0644, d_tracer, 4071 trace_create_file("current_tracer", 0644, d_tracer,
4067 &global_trace, &set_tracer_fops); 4072 &global_trace, &set_tracer_fops);
4068 4073
4074#ifdef CONFIG_TRACER_MAX_TRACE
4069 trace_create_file("tracing_max_latency", 0644, d_tracer, 4075 trace_create_file("tracing_max_latency", 0644, d_tracer,
4070 &tracing_max_latency, &tracing_max_lat_fops); 4076 &tracing_max_latency, &tracing_max_lat_fops);
4071 4077
4072 trace_create_file("tracing_thresh", 0644, d_tracer, 4078 trace_create_file("tracing_thresh", 0644, d_tracer,
4073 &tracing_thresh, &tracing_max_lat_fops); 4079 &tracing_thresh, &tracing_max_lat_fops);
4080#endif
4074 4081
4075 trace_create_file("README", 0444, d_tracer, 4082 trace_create_file("README", 0444, d_tracer,
4076 NULL, &tracing_readme_fops); 4083 NULL, &tracing_readme_fops);
@@ -4087,6 +4094,9 @@ static __init int tracer_init_debugfs(void)
4087 trace_create_file("saved_cmdlines", 0444, d_tracer, 4094 trace_create_file("saved_cmdlines", 0444, d_tracer,
4088 NULL, &tracing_saved_cmdlines_fops); 4095 NULL, &tracing_saved_cmdlines_fops);
4089 4096
4097 trace_create_file("trace_clock", 0644, d_tracer, NULL,
4098 &trace_clock_fops);
4099
4090#ifdef CONFIG_DYNAMIC_FTRACE 4100#ifdef CONFIG_DYNAMIC_FTRACE
4091 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, 4101 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4092 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 4102 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
@@ -4265,7 +4275,6 @@ void ftrace_dump(void)
4265 4275
4266__init static int tracer_alloc_buffers(void) 4276__init static int tracer_alloc_buffers(void)
4267{ 4277{
4268 struct trace_array_cpu *data;
4269 int ring_buf_size; 4278 int ring_buf_size;
4270 int i; 4279 int i;
4271 int ret = -ENOMEM; 4280 int ret = -ENOMEM;
@@ -4315,7 +4324,7 @@ __init static int tracer_alloc_buffers(void)
4315 4324
4316 /* Allocate the first page for all buffers */ 4325 /* Allocate the first page for all buffers */
4317 for_each_tracing_cpu(i) { 4326 for_each_tracing_cpu(i) {
4318 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i); 4327 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
4319 max_tr.data[i] = &per_cpu(max_data, i); 4328 max_tr.data[i] = &per_cpu(max_data, i);
4320 } 4329 }
4321 4330
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8b9f4f6e9559..fa1dccb579d5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -34,8 +34,6 @@ enum trace_type {
34 TRACE_GRAPH_ENT, 34 TRACE_GRAPH_ENT,
35 TRACE_USER_STACK, 35 TRACE_USER_STACK,
36 TRACE_HW_BRANCHES, 36 TRACE_HW_BRANCHES,
37 TRACE_SYSCALL_ENTER,
38 TRACE_SYSCALL_EXIT,
39 TRACE_KMEM_ALLOC, 37 TRACE_KMEM_ALLOC,
40 TRACE_KMEM_FREE, 38 TRACE_KMEM_FREE,
41 TRACE_POWER, 39 TRACE_POWER,
@@ -236,9 +234,6 @@ struct trace_array_cpu {
236 atomic_t disabled; 234 atomic_t disabled;
237 void *buffer_page; /* ring buffer spare */ 235 void *buffer_page; /* ring buffer spare */
238 236
239 /* these fields get copied into max-trace: */
240 unsigned long trace_idx;
241 unsigned long overrun;
242 unsigned long saved_latency; 237 unsigned long saved_latency;
243 unsigned long critical_start; 238 unsigned long critical_start;
244 unsigned long critical_end; 239 unsigned long critical_end;
@@ -246,6 +241,7 @@ struct trace_array_cpu {
246 unsigned long nice; 241 unsigned long nice;
247 unsigned long policy; 242 unsigned long policy;
248 unsigned long rt_priority; 243 unsigned long rt_priority;
244 unsigned long skipped_entries;
249 cycle_t preempt_timestamp; 245 cycle_t preempt_timestamp;
250 pid_t pid; 246 pid_t pid;
251 uid_t uid; 247 uid_t uid;
@@ -319,10 +315,6 @@ extern void __ftrace_bad_type(void);
319 TRACE_KMEM_ALLOC); \ 315 TRACE_KMEM_ALLOC); \
320 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 316 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
321 TRACE_KMEM_FREE); \ 317 TRACE_KMEM_FREE); \
322 IF_ASSIGN(var, ent, struct syscall_trace_enter, \
323 TRACE_SYSCALL_ENTER); \
324 IF_ASSIGN(var, ent, struct syscall_trace_exit, \
325 TRACE_SYSCALL_EXIT); \
326 __ftrace_bad_type(); \ 318 __ftrace_bad_type(); \
327 } while (0) 319 } while (0)
328 320
@@ -423,12 +415,13 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
423 415
424struct ring_buffer_event; 416struct ring_buffer_event;
425 417
426struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 418struct ring_buffer_event *
427 int type, 419trace_buffer_lock_reserve(struct ring_buffer *buffer,
428 unsigned long len, 420 int type,
429 unsigned long flags, 421 unsigned long len,
430 int pc); 422 unsigned long flags,
431void trace_buffer_unlock_commit(struct trace_array *tr, 423 int pc);
424void trace_buffer_unlock_commit(struct ring_buffer *buffer,
432 struct ring_buffer_event *event, 425 struct ring_buffer_event *event,
433 unsigned long flags, int pc); 426 unsigned long flags, int pc);
434 427
@@ -467,6 +460,7 @@ void trace_function(struct trace_array *tr,
467 460
468void trace_graph_return(struct ftrace_graph_ret *trace); 461void trace_graph_return(struct ftrace_graph_ret *trace);
469int trace_graph_entry(struct ftrace_graph_ent *trace); 462int trace_graph_entry(struct ftrace_graph_ent *trace);
463void set_graph_array(struct trace_array *tr);
470 464
471void tracing_start_cmdline_record(void); 465void tracing_start_cmdline_record(void);
472void tracing_stop_cmdline_record(void); 466void tracing_stop_cmdline_record(void);
@@ -478,16 +472,40 @@ void unregister_tracer(struct tracer *type);
478 472
479extern unsigned long nsecs_to_usecs(unsigned long nsecs); 473extern unsigned long nsecs_to_usecs(unsigned long nsecs);
480 474
475#ifdef CONFIG_TRACER_MAX_TRACE
481extern unsigned long tracing_max_latency; 476extern unsigned long tracing_max_latency;
482extern unsigned long tracing_thresh; 477extern unsigned long tracing_thresh;
483 478
484void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 479void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
485void update_max_tr_single(struct trace_array *tr, 480void update_max_tr_single(struct trace_array *tr,
486 struct task_struct *tsk, int cpu); 481 struct task_struct *tsk, int cpu);
482#endif /* CONFIG_TRACER_MAX_TRACE */
487 483
488void __trace_stack(struct trace_array *tr, 484#ifdef CONFIG_STACKTRACE
489 unsigned long flags, 485void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
490 int skip, int pc); 486 int skip, int pc);
487
488void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
489 int pc);
490
491void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
492 int pc);
493#else
494static inline void ftrace_trace_stack(struct trace_array *tr,
495 unsigned long flags, int skip, int pc)
496{
497}
498
499static inline void ftrace_trace_userstack(struct trace_array *tr,
500 unsigned long flags, int pc)
501{
502}
503
504static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
505 int skip, int pc)
506{
507}
508#endif /* CONFIG_STACKTRACE */
491 509
492extern cycle_t ftrace_now(int cpu); 510extern cycle_t ftrace_now(int cpu);
493 511
@@ -513,6 +531,10 @@ extern unsigned long ftrace_update_tot_cnt;
513extern int DYN_FTRACE_TEST_NAME(void); 531extern int DYN_FTRACE_TEST_NAME(void);
514#endif 532#endif
515 533
534extern int ring_buffer_expanded;
535extern bool tracing_selftest_disabled;
536DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
537
516#ifdef CONFIG_FTRACE_STARTUP_TEST 538#ifdef CONFIG_FTRACE_STARTUP_TEST
517extern int trace_selftest_startup_function(struct tracer *trace, 539extern int trace_selftest_startup_function(struct tracer *trace,
518 struct trace_array *tr); 540 struct trace_array *tr);
@@ -544,9 +566,16 @@ extern int
544trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 566trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
545extern int 567extern int
546trace_vprintk(unsigned long ip, const char *fmt, va_list args); 568trace_vprintk(unsigned long ip, const char *fmt, va_list args);
569extern int
570trace_array_vprintk(struct trace_array *tr,
571 unsigned long ip, const char *fmt, va_list args);
572int trace_array_printk(struct trace_array *tr,
573 unsigned long ip, const char *fmt, ...);
547 574
548extern unsigned long trace_flags; 575extern unsigned long trace_flags;
549 576
577extern int trace_clock_id;
578
550/* Standard output formatting function used for function return traces */ 579/* Standard output formatting function used for function return traces */
551#ifdef CONFIG_FUNCTION_GRAPH_TRACER 580#ifdef CONFIG_FUNCTION_GRAPH_TRACER
552extern enum print_line_t print_graph_function(struct trace_iterator *iter); 581extern enum print_line_t print_graph_function(struct trace_iterator *iter);
@@ -635,9 +664,8 @@ enum trace_iterator_flags {
635 TRACE_ITER_PRINTK_MSGONLY = 0x10000, 664 TRACE_ITER_PRINTK_MSGONLY = 0x10000,
636 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ 665 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
637 TRACE_ITER_LATENCY_FMT = 0x40000, 666 TRACE_ITER_LATENCY_FMT = 0x40000,
638 TRACE_ITER_GLOBAL_CLK = 0x80000, 667 TRACE_ITER_SLEEP_TIME = 0x80000,
639 TRACE_ITER_SLEEP_TIME = 0x100000, 668 TRACE_ITER_GRAPH_TIME = 0x100000,
640 TRACE_ITER_GRAPH_TIME = 0x200000,
641}; 669};
642 670
643/* 671/*
@@ -734,6 +762,7 @@ struct ftrace_event_field {
734 struct list_head link; 762 struct list_head link;
735 char *name; 763 char *name;
736 char *type; 764 char *type;
765 int filter_type;
737 int offset; 766 int offset;
738 int size; 767 int size;
739 int is_signed; 768 int is_signed;
@@ -743,13 +772,15 @@ struct event_filter {
743 int n_preds; 772 int n_preds;
744 struct filter_pred **preds; 773 struct filter_pred **preds;
745 char *filter_string; 774 char *filter_string;
775 bool no_reset;
746}; 776};
747 777
748struct event_subsystem { 778struct event_subsystem {
749 struct list_head list; 779 struct list_head list;
750 const char *name; 780 const char *name;
751 struct dentry *entry; 781 struct dentry *entry;
752 void *filter; 782 struct event_filter *filter;
783 int nr_events;
753}; 784};
754 785
755struct filter_pred; 786struct filter_pred;
@@ -777,6 +808,7 @@ extern int apply_subsystem_event_filter(struct event_subsystem *system,
777 char *filter_string); 808 char *filter_string);
778extern void print_subsystem_event_filter(struct event_subsystem *system, 809extern void print_subsystem_event_filter(struct event_subsystem *system,
779 struct trace_seq *s); 810 struct trace_seq *s);
811extern int filter_assign_type(const char *type);
780 812
781static inline int 813static inline int
782filter_check_discard(struct ftrace_event_call *call, void *rec, 814filter_check_discard(struct ftrace_event_call *call, void *rec,
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a29ef23ffb47..19bfc75d467e 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -41,14 +41,12 @@ void disable_boot_trace(void)
41 41
42static int boot_trace_init(struct trace_array *tr) 42static int boot_trace_init(struct trace_array *tr)
43{ 43{
44 int cpu;
45 boot_trace = tr; 44 boot_trace = tr;
46 45
47 if (!tr) 46 if (!tr)
48 return 0; 47 return 0;
49 48
50 for_each_cpu(cpu, cpu_possible_mask) 49 tracing_reset_online_cpus(tr);
51 tracing_reset(tr, cpu);
52 50
53 tracing_sched_switch_assign_trace(tr); 51 tracing_sched_switch_assign_trace(tr);
54 return 0; 52 return 0;
@@ -132,6 +130,7 @@ struct tracer boot_tracer __read_mostly =
132void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) 130void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
133{ 131{
134 struct ring_buffer_event *event; 132 struct ring_buffer_event *event;
133 struct ring_buffer *buffer;
135 struct trace_boot_call *entry; 134 struct trace_boot_call *entry;
136 struct trace_array *tr = boot_trace; 135 struct trace_array *tr = boot_trace;
137 136
@@ -144,13 +143,14 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
144 sprint_symbol(bt->func, (unsigned long)fn); 143 sprint_symbol(bt->func, (unsigned long)fn);
145 preempt_disable(); 144 preempt_disable();
146 145
147 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, 146 buffer = tr->buffer;
147 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
148 sizeof(*entry), 0, 0); 148 sizeof(*entry), 0, 0);
149 if (!event) 149 if (!event)
150 goto out; 150 goto out;
151 entry = ring_buffer_event_data(event); 151 entry = ring_buffer_event_data(event);
152 entry->boot_call = *bt; 152 entry->boot_call = *bt;
153 trace_buffer_unlock_commit(tr, event, 0, 0); 153 trace_buffer_unlock_commit(buffer, event, 0, 0);
154 out: 154 out:
155 preempt_enable(); 155 preempt_enable();
156} 156}
@@ -158,6 +158,7 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
158void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) 158void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
159{ 159{
160 struct ring_buffer_event *event; 160 struct ring_buffer_event *event;
161 struct ring_buffer *buffer;
161 struct trace_boot_ret *entry; 162 struct trace_boot_ret *entry;
162 struct trace_array *tr = boot_trace; 163 struct trace_array *tr = boot_trace;
163 164
@@ -167,13 +168,14 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
167 sprint_symbol(bt->func, (unsigned long)fn); 168 sprint_symbol(bt->func, (unsigned long)fn);
168 preempt_disable(); 169 preempt_disable();
169 170
170 event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, 171 buffer = tr->buffer;
172 event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
171 sizeof(*entry), 0, 0); 173 sizeof(*entry), 0, 0);
172 if (!event) 174 if (!event)
173 goto out; 175 goto out;
174 entry = ring_buffer_event_data(event); 176 entry = ring_buffer_event_data(event);
175 entry->boot_ret = *bt; 177 entry->boot_ret = *bt;
176 trace_buffer_unlock_commit(tr, event, 0, 0); 178 trace_buffer_unlock_commit(buffer, event, 0, 0);
177 out: 179 out:
178 preempt_enable(); 180 preempt_enable();
179} 181}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e75276a49cf5..78b1ed230177 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -17,6 +17,8 @@
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19 19
20#include <asm/setup.h>
21
20#include "trace_output.h" 22#include "trace_output.h"
21 23
22#define TRACE_SYSTEM "TRACE_SYSTEM" 24#define TRACE_SYSTEM "TRACE_SYSTEM"
@@ -25,8 +27,9 @@ DEFINE_MUTEX(event_mutex);
25 27
26LIST_HEAD(ftrace_events); 28LIST_HEAD(ftrace_events);
27 29
28int trace_define_field(struct ftrace_event_call *call, char *type, 30int trace_define_field(struct ftrace_event_call *call, const char *type,
29 char *name, int offset, int size, int is_signed) 31 const char *name, int offset, int size, int is_signed,
32 int filter_type)
30{ 33{
31 struct ftrace_event_field *field; 34 struct ftrace_event_field *field;
32 35
@@ -42,9 +45,15 @@ int trace_define_field(struct ftrace_event_call *call, char *type,
42 if (!field->type) 45 if (!field->type)
43 goto err; 46 goto err;
44 47
48 if (filter_type == FILTER_OTHER)
49 field->filter_type = filter_assign_type(type);
50 else
51 field->filter_type = filter_type;
52
45 field->offset = offset; 53 field->offset = offset;
46 field->size = size; 54 field->size = size;
47 field->is_signed = is_signed; 55 field->is_signed = is_signed;
56
48 list_add(&field->link, &call->fields); 57 list_add(&field->link, &call->fields);
49 58
50 return 0; 59 return 0;
@@ -60,6 +69,29 @@ err:
60} 69}
61EXPORT_SYMBOL_GPL(trace_define_field); 70EXPORT_SYMBOL_GPL(trace_define_field);
62 71
72#define __common_field(type, item) \
73 ret = trace_define_field(call, #type, "common_" #item, \
74 offsetof(typeof(ent), item), \
75 sizeof(ent.item), \
76 is_signed_type(type), FILTER_OTHER); \
77 if (ret) \
78 return ret;
79
80int trace_define_common_fields(struct ftrace_event_call *call)
81{
82 int ret;
83 struct trace_entry ent;
84
85 __common_field(unsigned short, type);
86 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid);
89 __common_field(int, tgid);
90
91 return ret;
92}
93EXPORT_SYMBOL_GPL(trace_define_common_fields);
94
63#ifdef CONFIG_MODULES 95#ifdef CONFIG_MODULES
64 96
65static void trace_destroy_fields(struct ftrace_event_call *call) 97static void trace_destroy_fields(struct ftrace_event_call *call)
@@ -84,14 +116,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
84 if (call->enabled) { 116 if (call->enabled) {
85 call->enabled = 0; 117 call->enabled = 0;
86 tracing_stop_cmdline_record(); 118 tracing_stop_cmdline_record();
87 call->unregfunc(); 119 call->unregfunc(call->data);
88 } 120 }
89 break; 121 break;
90 case 1: 122 case 1:
91 if (!call->enabled) { 123 if (!call->enabled) {
92 call->enabled = 1; 124 call->enabled = 1;
93 tracing_start_cmdline_record(); 125 tracing_start_cmdline_record();
94 call->regfunc(); 126 call->regfunc(call->data);
95 } 127 }
96 break; 128 break;
97 } 129 }
@@ -574,7 +606,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
574 trace_seq_printf(s, "format:\n"); 606 trace_seq_printf(s, "format:\n");
575 trace_write_header(s); 607 trace_write_header(s);
576 608
577 r = call->show_format(s); 609 r = call->show_format(call, s);
578 if (!r) { 610 if (!r) {
579 /* 611 /*
580 * ug! The format output is bigger than a PAGE!! 612 * ug! The format output is bigger than a PAGE!!
@@ -849,8 +881,10 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
849 881
850 /* First see if we did not already create this dir */ 882 /* First see if we did not already create this dir */
851 list_for_each_entry(system, &event_subsystems, list) { 883 list_for_each_entry(system, &event_subsystems, list) {
852 if (strcmp(system->name, name) == 0) 884 if (strcmp(system->name, name) == 0) {
885 system->nr_events++;
853 return system->entry; 886 return system->entry;
887 }
854 } 888 }
855 889
856 /* need to create new entry */ 890 /* need to create new entry */
@@ -869,6 +903,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
869 return d_events; 903 return d_events;
870 } 904 }
871 905
906 system->nr_events = 1;
872 system->name = kstrdup(name, GFP_KERNEL); 907 system->name = kstrdup(name, GFP_KERNEL);
873 if (!system->name) { 908 if (!system->name) {
874 debugfs_remove(system->entry); 909 debugfs_remove(system->entry);
@@ -920,15 +955,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
920 if (strcmp(call->system, TRACE_SYSTEM) != 0) 955 if (strcmp(call->system, TRACE_SYSTEM) != 0)
921 d_events = event_subsystem_dir(call->system, d_events); 956 d_events = event_subsystem_dir(call->system, d_events);
922 957
923 if (call->raw_init) {
924 ret = call->raw_init();
925 if (ret < 0) {
926 pr_warning("Could not initialize trace point"
927 " events/%s\n", call->name);
928 return ret;
929 }
930 }
931
932 call->dir = debugfs_create_dir(call->name, d_events); 958 call->dir = debugfs_create_dir(call->name, d_events);
933 if (!call->dir) { 959 if (!call->dir) {
934 pr_warning("Could not create debugfs " 960 pr_warning("Could not create debugfs "
@@ -945,7 +971,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
945 id); 971 id);
946 972
947 if (call->define_fields) { 973 if (call->define_fields) {
948 ret = call->define_fields(); 974 ret = call->define_fields(call);
949 if (ret < 0) { 975 if (ret < 0) {
950 pr_warning("Could not initialize trace point" 976 pr_warning("Could not initialize trace point"
951 " events/%s\n", call->name); 977 " events/%s\n", call->name);
@@ -987,6 +1013,32 @@ struct ftrace_module_file_ops {
987 struct file_operations filter; 1013 struct file_operations filter;
988}; 1014};
989 1015
1016static void remove_subsystem_dir(const char *name)
1017{
1018 struct event_subsystem *system;
1019
1020 if (strcmp(name, TRACE_SYSTEM) == 0)
1021 return;
1022
1023 list_for_each_entry(system, &event_subsystems, list) {
1024 if (strcmp(system->name, name) == 0) {
1025 if (!--system->nr_events) {
1026 struct event_filter *filter = system->filter;
1027
1028 debugfs_remove_recursive(system->entry);
1029 list_del(&system->list);
1030 if (filter) {
1031 kfree(filter->filter_string);
1032 kfree(filter);
1033 }
1034 kfree(system->name);
1035 kfree(system);
1036 }
1037 break;
1038 }
1039 }
1040}
1041
990static struct ftrace_module_file_ops * 1042static struct ftrace_module_file_ops *
991trace_create_file_ops(struct module *mod) 1043trace_create_file_ops(struct module *mod)
992{ 1044{
@@ -1027,6 +1079,7 @@ static void trace_module_add_events(struct module *mod)
1027 struct ftrace_module_file_ops *file_ops = NULL; 1079 struct ftrace_module_file_ops *file_ops = NULL;
1028 struct ftrace_event_call *call, *start, *end; 1080 struct ftrace_event_call *call, *start, *end;
1029 struct dentry *d_events; 1081 struct dentry *d_events;
1082 int ret;
1030 1083
1031 start = mod->trace_events; 1084 start = mod->trace_events;
1032 end = mod->trace_events + mod->num_trace_events; 1085 end = mod->trace_events + mod->num_trace_events;
@@ -1042,7 +1095,15 @@ static void trace_module_add_events(struct module *mod)
1042 /* The linker may leave blanks */ 1095 /* The linker may leave blanks */
1043 if (!call->name) 1096 if (!call->name)
1044 continue; 1097 continue;
1045 1098 if (call->raw_init) {
1099 ret = call->raw_init();
1100 if (ret < 0) {
1101 if (ret != -ENOSYS)
1102 pr_warning("Could not initialize trace "
1103 "point events/%s\n", call->name);
1104 continue;
1105 }
1106 }
1046 /* 1107 /*
1047 * This module has events, create file ops for this module 1108 * This module has events, create file ops for this module
1048 * if not already done. 1109 * if not already done.
@@ -1077,6 +1138,7 @@ static void trace_module_remove_events(struct module *mod)
1077 list_del(&call->list); 1138 list_del(&call->list);
1078 trace_destroy_fields(call); 1139 trace_destroy_fields(call);
1079 destroy_preds(call); 1140 destroy_preds(call);
1141 remove_subsystem_dir(call->system);
1080 } 1142 }
1081 } 1143 }
1082 1144
@@ -1133,6 +1195,18 @@ struct notifier_block trace_module_nb = {
1133extern struct ftrace_event_call __start_ftrace_events[]; 1195extern struct ftrace_event_call __start_ftrace_events[];
1134extern struct ftrace_event_call __stop_ftrace_events[]; 1196extern struct ftrace_event_call __stop_ftrace_events[];
1135 1197
1198static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
1199
1200static __init int setup_trace_event(char *str)
1201{
1202 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
1203 ring_buffer_expanded = 1;
1204 tracing_selftest_disabled = 1;
1205
1206 return 1;
1207}
1208__setup("trace_event=", setup_trace_event);
1209
1136static __init int event_trace_init(void) 1210static __init int event_trace_init(void)
1137{ 1211{
1138 struct ftrace_event_call *call; 1212 struct ftrace_event_call *call;
@@ -1140,6 +1214,8 @@ static __init int event_trace_init(void)
1140 struct dentry *entry; 1214 struct dentry *entry;
1141 struct dentry *d_events; 1215 struct dentry *d_events;
1142 int ret; 1216 int ret;
1217 char *buf = bootup_event_buf;
1218 char *token;
1143 1219
1144 d_tracer = tracing_init_dentry(); 1220 d_tracer = tracing_init_dentry();
1145 if (!d_tracer) 1221 if (!d_tracer)
@@ -1179,12 +1255,34 @@ static __init int event_trace_init(void)
1179 /* The linker may leave blanks */ 1255 /* The linker may leave blanks */
1180 if (!call->name) 1256 if (!call->name)
1181 continue; 1257 continue;
1258 if (call->raw_init) {
1259 ret = call->raw_init();
1260 if (ret < 0) {
1261 if (ret != -ENOSYS)
1262 pr_warning("Could not initialize trace "
1263 "point events/%s\n", call->name);
1264 continue;
1265 }
1266 }
1182 list_add(&call->list, &ftrace_events); 1267 list_add(&call->list, &ftrace_events);
1183 event_create_dir(call, d_events, &ftrace_event_id_fops, 1268 event_create_dir(call, d_events, &ftrace_event_id_fops,
1184 &ftrace_enable_fops, &ftrace_event_filter_fops, 1269 &ftrace_enable_fops, &ftrace_event_filter_fops,
1185 &ftrace_event_format_fops); 1270 &ftrace_event_format_fops);
1186 } 1271 }
1187 1272
1273 while (true) {
1274 token = strsep(&buf, ",");
1275
1276 if (!token)
1277 break;
1278 if (!*token)
1279 continue;
1280
1281 ret = ftrace_set_clr_event(token, 1);
1282 if (ret)
1283 pr_warning("Failed to enable trace event: %s\n", token);
1284 }
1285
1188 ret = register_module_notifier(&trace_module_nb); 1286 ret = register_module_notifier(&trace_module_nb);
1189 if (ret) 1287 if (ret)
1190 pr_warning("Failed to register trace events module notifier\n"); 1288 pr_warning("Failed to register trace events module notifier\n");
@@ -1340,6 +1438,7 @@ static void
1340function_test_events_call(unsigned long ip, unsigned long parent_ip) 1438function_test_events_call(unsigned long ip, unsigned long parent_ip)
1341{ 1439{
1342 struct ring_buffer_event *event; 1440 struct ring_buffer_event *event;
1441 struct ring_buffer *buffer;
1343 struct ftrace_entry *entry; 1442 struct ftrace_entry *entry;
1344 unsigned long flags; 1443 unsigned long flags;
1345 long disabled; 1444 long disabled;
@@ -1357,7 +1456,8 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1357 1456
1358 local_save_flags(flags); 1457 local_save_flags(flags);
1359 1458
1360 event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry), 1459 event = trace_current_buffer_lock_reserve(&buffer,
1460 TRACE_FN, sizeof(*entry),
1361 flags, pc); 1461 flags, pc);
1362 if (!event) 1462 if (!event)
1363 goto out; 1463 goto out;
@@ -1365,7 +1465,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1365 entry->ip = ip; 1465 entry->ip = ip;
1366 entry->parent_ip = parent_ip; 1466 entry->parent_ip = parent_ip;
1367 1467
1368 trace_nowake_buffer_unlock_commit(event, flags, pc); 1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1369 1469
1370 out: 1470 out:
1371 atomic_dec(&per_cpu(test_event_disable, cpu)); 1471 atomic_dec(&per_cpu(test_event_disable, cpu));
@@ -1392,10 +1492,10 @@ static __init void event_trace_self_test_with_function(void)
1392 1492
1393static __init int event_trace_self_tests_init(void) 1493static __init int event_trace_self_tests_init(void)
1394{ 1494{
1395 1495 if (!tracing_selftest_disabled) {
1396 event_trace_self_tests(); 1496 event_trace_self_tests();
1397 1497 event_trace_self_test_with_function();
1398 event_trace_self_test_with_function(); 1498 }
1399 1499
1400 return 0; 1500 return 0;
1401} 1501}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f32dc9d1ea7b..93660fbbf629 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -163,6 +163,20 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
163 return match; 163 return match;
164} 164}
165 165
166/* Filter predicate for char * pointers */
167static int filter_pred_pchar(struct filter_pred *pred, void *event,
168 int val1, int val2)
169{
170 char **addr = (char **)(event + pred->offset);
171 int cmp, match;
172
173 cmp = strncmp(*addr, pred->str_val, pred->str_len);
174
175 match = (!cmp) ^ pred->not;
176
177 return match;
178}
179
166/* 180/*
167 * Filter predicate for dynamic sized arrays of characters. 181 * Filter predicate for dynamic sized arrays of characters.
168 * These are implemented through a list of strings at the end 182 * These are implemented through a list of strings at the end
@@ -176,11 +190,13 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
176static int filter_pred_strloc(struct filter_pred *pred, void *event, 190static int filter_pred_strloc(struct filter_pred *pred, void *event,
177 int val1, int val2) 191 int val1, int val2)
178{ 192{
179 unsigned short str_loc = *(unsigned short *)(event + pred->offset); 193 u32 str_item = *(u32 *)(event + pred->offset);
194 int str_loc = str_item & 0xffff;
195 int str_len = str_item >> 16;
180 char *addr = (char *)(event + str_loc); 196 char *addr = (char *)(event + str_loc);
181 int cmp, match; 197 int cmp, match;
182 198
183 cmp = strncmp(addr, pred->str_val, pred->str_len); 199 cmp = strncmp(addr, pred->str_val, str_len);
184 200
185 match = (!cmp) ^ pred->not; 201 match = (!cmp) ^ pred->not;
186 202
@@ -293,7 +309,7 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
293 struct event_filter *filter = call->filter; 309 struct event_filter *filter = call->filter;
294 310
295 mutex_lock(&event_mutex); 311 mutex_lock(&event_mutex);
296 if (filter->filter_string) 312 if (filter && filter->filter_string)
297 trace_seq_printf(s, "%s\n", filter->filter_string); 313 trace_seq_printf(s, "%s\n", filter->filter_string);
298 else 314 else
299 trace_seq_printf(s, "none\n"); 315 trace_seq_printf(s, "none\n");
@@ -306,7 +322,7 @@ void print_subsystem_event_filter(struct event_subsystem *system,
306 struct event_filter *filter = system->filter; 322 struct event_filter *filter = system->filter;
307 323
308 mutex_lock(&event_mutex); 324 mutex_lock(&event_mutex);
309 if (filter->filter_string) 325 if (filter && filter->filter_string)
310 trace_seq_printf(s, "%s\n", filter->filter_string); 326 trace_seq_printf(s, "%s\n", filter->filter_string);
311 else 327 else
312 trace_seq_printf(s, "none\n"); 328 trace_seq_printf(s, "none\n");
@@ -374,6 +390,9 @@ void destroy_preds(struct ftrace_event_call *call)
374 struct event_filter *filter = call->filter; 390 struct event_filter *filter = call->filter;
375 int i; 391 int i;
376 392
393 if (!filter)
394 return;
395
377 for (i = 0; i < MAX_FILTER_PRED; i++) { 396 for (i = 0; i < MAX_FILTER_PRED; i++) {
378 if (filter->preds[i]) 397 if (filter->preds[i])
379 filter_free_pred(filter->preds[i]); 398 filter_free_pred(filter->preds[i]);
@@ -384,17 +403,19 @@ void destroy_preds(struct ftrace_event_call *call)
384 call->filter = NULL; 403 call->filter = NULL;
385} 404}
386 405
387int init_preds(struct ftrace_event_call *call) 406static int init_preds(struct ftrace_event_call *call)
388{ 407{
389 struct event_filter *filter; 408 struct event_filter *filter;
390 struct filter_pred *pred; 409 struct filter_pred *pred;
391 int i; 410 int i;
392 411
412 if (call->filter)
413 return 0;
414
393 filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL); 415 filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL);
394 if (!call->filter) 416 if (!call->filter)
395 return -ENOMEM; 417 return -ENOMEM;
396 418
397 call->filter_active = 0;
398 filter->n_preds = 0; 419 filter->n_preds = 0;
399 420
400 filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL); 421 filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred), GFP_KERNEL);
@@ -416,30 +437,55 @@ oom:
416 437
417 return -ENOMEM; 438 return -ENOMEM;
418} 439}
419EXPORT_SYMBOL_GPL(init_preds);
420 440
421static void filter_free_subsystem_preds(struct event_subsystem *system) 441static int init_subsystem_preds(struct event_subsystem *system)
422{ 442{
423 struct event_filter *filter = system->filter;
424 struct ftrace_event_call *call; 443 struct ftrace_event_call *call;
425 int i; 444 int err;
426 445
427 if (filter->n_preds) { 446 list_for_each_entry(call, &ftrace_events, list) {
428 for (i = 0; i < filter->n_preds; i++) 447 if (!call->define_fields)
429 filter_free_pred(filter->preds[i]); 448 continue;
430 kfree(filter->preds); 449
431 filter->preds = NULL; 450 if (strcmp(call->system, system->name) != 0)
432 filter->n_preds = 0; 451 continue;
452
453 err = init_preds(call);
454 if (err)
455 return err;
433 } 456 }
434 457
458 return 0;
459}
460
461enum {
462 FILTER_DISABLE_ALL,
463 FILTER_INIT_NO_RESET,
464 FILTER_SKIP_NO_RESET,
465};
466
467static void filter_free_subsystem_preds(struct event_subsystem *system,
468 int flag)
469{
470 struct ftrace_event_call *call;
471
435 list_for_each_entry(call, &ftrace_events, list) { 472 list_for_each_entry(call, &ftrace_events, list) {
436 if (!call->define_fields) 473 if (!call->define_fields)
437 continue; 474 continue;
438 475
439 if (!strcmp(call->system, system->name)) { 476 if (strcmp(call->system, system->name) != 0)
440 filter_disable_preds(call); 477 continue;
441 remove_filter_string(call->filter); 478
479 if (flag == FILTER_INIT_NO_RESET) {
480 call->filter->no_reset = false;
481 continue;
442 } 482 }
483
484 if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset)
485 continue;
486
487 filter_disable_preds(call);
488 remove_filter_string(call->filter);
443 } 489 }
444} 490}
445 491
@@ -468,12 +514,7 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
468 return 0; 514 return 0;
469} 515}
470 516
471enum { 517int filter_assign_type(const char *type)
472 FILTER_STATIC_STRING = 1,
473 FILTER_DYN_STRING
474};
475
476static int is_string_field(const char *type)
477{ 518{
478 if (strstr(type, "__data_loc") && strstr(type, "char")) 519 if (strstr(type, "__data_loc") && strstr(type, "char"))
479 return FILTER_DYN_STRING; 520 return FILTER_DYN_STRING;
@@ -481,12 +522,19 @@ static int is_string_field(const char *type)
481 if (strchr(type, '[') && strstr(type, "char")) 522 if (strchr(type, '[') && strstr(type, "char"))
482 return FILTER_STATIC_STRING; 523 return FILTER_STATIC_STRING;
483 524
484 return 0; 525 return FILTER_OTHER;
526}
527
528static bool is_string_field(struct ftrace_event_field *field)
529{
530 return field->filter_type == FILTER_DYN_STRING ||
531 field->filter_type == FILTER_STATIC_STRING ||
532 field->filter_type == FILTER_PTR_STRING;
485} 533}
486 534
487static int is_legal_op(struct ftrace_event_field *field, int op) 535static int is_legal_op(struct ftrace_event_field *field, int op)
488{ 536{
489 if (is_string_field(field->type) && (op != OP_EQ && op != OP_NE)) 537 if (is_string_field(field) && (op != OP_EQ && op != OP_NE))
490 return 0; 538 return 0;
491 539
492 return 1; 540 return 1;
@@ -537,22 +585,24 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
537 585
538static int filter_add_pred(struct filter_parse_state *ps, 586static int filter_add_pred(struct filter_parse_state *ps,
539 struct ftrace_event_call *call, 587 struct ftrace_event_call *call,
540 struct filter_pred *pred) 588 struct filter_pred *pred,
589 bool dry_run)
541{ 590{
542 struct ftrace_event_field *field; 591 struct ftrace_event_field *field;
543 filter_pred_fn_t fn; 592 filter_pred_fn_t fn;
544 unsigned long long val; 593 unsigned long long val;
545 int string_type;
546 int ret; 594 int ret;
547 595
548 pred->fn = filter_pred_none; 596 pred->fn = filter_pred_none;
549 597
550 if (pred->op == OP_AND) { 598 if (pred->op == OP_AND) {
551 pred->pop_n = 2; 599 pred->pop_n = 2;
552 return filter_add_pred_fn(ps, call, pred, filter_pred_and); 600 fn = filter_pred_and;
601 goto add_pred_fn;
553 } else if (pred->op == OP_OR) { 602 } else if (pred->op == OP_OR) {
554 pred->pop_n = 2; 603 pred->pop_n = 2;
555 return filter_add_pred_fn(ps, call, pred, filter_pred_or); 604 fn = filter_pred_or;
605 goto add_pred_fn;
556 } 606 }
557 607
558 field = find_event_field(call, pred->field_name); 608 field = find_event_field(call, pred->field_name);
@@ -568,16 +618,17 @@ static int filter_add_pred(struct filter_parse_state *ps,
568 return -EINVAL; 618 return -EINVAL;
569 } 619 }
570 620
571 string_type = is_string_field(field->type); 621 if (is_string_field(field)) {
572 if (string_type) { 622 pred->str_len = field->size;
573 if (string_type == FILTER_STATIC_STRING) 623
624 if (field->filter_type == FILTER_STATIC_STRING)
574 fn = filter_pred_string; 625 fn = filter_pred_string;
575 else 626 else if (field->filter_type == FILTER_DYN_STRING)
576 fn = filter_pred_strloc; 627 fn = filter_pred_strloc;
577 pred->str_len = field->size; 628 else {
578 if (pred->op == OP_NE) 629 fn = filter_pred_pchar;
579 pred->not = 1; 630 pred->str_len = strlen(pred->str_val);
580 return filter_add_pred_fn(ps, call, pred, fn); 631 }
581 } else { 632 } else {
582 if (field->is_signed) 633 if (field->is_signed)
583 ret = strict_strtoll(pred->str_val, 0, &val); 634 ret = strict_strtoll(pred->str_val, 0, &val);
@@ -588,41 +639,33 @@ static int filter_add_pred(struct filter_parse_state *ps,
588 return -EINVAL; 639 return -EINVAL;
589 } 640 }
590 pred->val = val; 641 pred->val = val;
591 }
592 642
593 fn = select_comparison_fn(pred->op, field->size, field->is_signed); 643 fn = select_comparison_fn(pred->op, field->size,
594 if (!fn) { 644 field->is_signed);
595 parse_error(ps, FILT_ERR_INVALID_OP, 0); 645 if (!fn) {
596 return -EINVAL; 646 parse_error(ps, FILT_ERR_INVALID_OP, 0);
647 return -EINVAL;
648 }
597 } 649 }
598 650
599 if (pred->op == OP_NE) 651 if (pred->op == OP_NE)
600 pred->not = 1; 652 pred->not = 1;
601 653
602 return filter_add_pred_fn(ps, call, pred, fn); 654add_pred_fn:
655 if (!dry_run)
656 return filter_add_pred_fn(ps, call, pred, fn);
657 return 0;
603} 658}
604 659
605static int filter_add_subsystem_pred(struct filter_parse_state *ps, 660static int filter_add_subsystem_pred(struct filter_parse_state *ps,
606 struct event_subsystem *system, 661 struct event_subsystem *system,
607 struct filter_pred *pred, 662 struct filter_pred *pred,
608 char *filter_string) 663 char *filter_string,
664 bool dry_run)
609{ 665{
610 struct event_filter *filter = system->filter;
611 struct ftrace_event_call *call; 666 struct ftrace_event_call *call;
612 int err = 0; 667 int err = 0;
613 668 bool fail = true;
614 if (!filter->preds) {
615 filter->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
616 GFP_KERNEL);
617
618 if (!filter->preds)
619 return -ENOMEM;
620 }
621
622 if (filter->n_preds == MAX_FILTER_PRED) {
623 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
624 return -ENOSPC;
625 }
626 669
627 list_for_each_entry(call, &ftrace_events, list) { 670 list_for_each_entry(call, &ftrace_events, list) {
628 671
@@ -632,19 +675,24 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
632 if (strcmp(call->system, system->name)) 675 if (strcmp(call->system, system->name))
633 continue; 676 continue;
634 677
635 err = filter_add_pred(ps, call, pred); 678 if (call->filter->no_reset)
636 if (err) { 679 continue;
637 filter_free_subsystem_preds(system); 680
638 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); 681 err = filter_add_pred(ps, call, pred, dry_run);
639 goto out; 682 if (err)
640 } 683 call->filter->no_reset = true;
641 replace_filter_string(call->filter, filter_string); 684 else
685 fail = false;
686
687 if (!dry_run)
688 replace_filter_string(call->filter, filter_string);
642 } 689 }
643 690
644 filter->preds[filter->n_preds] = pred; 691 if (fail) {
645 filter->n_preds++; 692 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
646out: 693 return err;
647 return err; 694 }
695 return 0;
648} 696}
649 697
650static void parse_init(struct filter_parse_state *ps, 698static void parse_init(struct filter_parse_state *ps,
@@ -1003,12 +1051,14 @@ static int check_preds(struct filter_parse_state *ps)
1003static int replace_preds(struct event_subsystem *system, 1051static int replace_preds(struct event_subsystem *system,
1004 struct ftrace_event_call *call, 1052 struct ftrace_event_call *call,
1005 struct filter_parse_state *ps, 1053 struct filter_parse_state *ps,
1006 char *filter_string) 1054 char *filter_string,
1055 bool dry_run)
1007{ 1056{
1008 char *operand1 = NULL, *operand2 = NULL; 1057 char *operand1 = NULL, *operand2 = NULL;
1009 struct filter_pred *pred; 1058 struct filter_pred *pred;
1010 struct postfix_elt *elt; 1059 struct postfix_elt *elt;
1011 int err; 1060 int err;
1061 int n_preds = 0;
1012 1062
1013 err = check_preds(ps); 1063 err = check_preds(ps);
1014 if (err) 1064 if (err)
@@ -1027,24 +1077,14 @@ static int replace_preds(struct event_subsystem *system,
1027 continue; 1077 continue;
1028 } 1078 }
1029 1079
1080 if (n_preds++ == MAX_FILTER_PRED) {
1081 parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
1082 return -ENOSPC;
1083 }
1084
1030 if (elt->op == OP_AND || elt->op == OP_OR) { 1085 if (elt->op == OP_AND || elt->op == OP_OR) {
1031 pred = create_logical_pred(elt->op); 1086 pred = create_logical_pred(elt->op);
1032 if (!pred) 1087 goto add_pred;
1033 return -ENOMEM;
1034 if (call) {
1035 err = filter_add_pred(ps, call, pred);
1036 filter_free_pred(pred);
1037 } else {
1038 err = filter_add_subsystem_pred(ps, system,
1039 pred, filter_string);
1040 if (err)
1041 filter_free_pred(pred);
1042 }
1043 if (err)
1044 return err;
1045
1046 operand1 = operand2 = NULL;
1047 continue;
1048 } 1088 }
1049 1089
1050 if (!operand1 || !operand2) { 1090 if (!operand1 || !operand2) {
@@ -1053,17 +1093,15 @@ static int replace_preds(struct event_subsystem *system,
1053 } 1093 }
1054 1094
1055 pred = create_pred(elt->op, operand1, operand2); 1095 pred = create_pred(elt->op, operand1, operand2);
1096add_pred:
1056 if (!pred) 1097 if (!pred)
1057 return -ENOMEM; 1098 return -ENOMEM;
1058 if (call) { 1099 if (call)
1059 err = filter_add_pred(ps, call, pred); 1100 err = filter_add_pred(ps, call, pred, false);
1060 filter_free_pred(pred); 1101 else
1061 } else {
1062 err = filter_add_subsystem_pred(ps, system, pred, 1102 err = filter_add_subsystem_pred(ps, system, pred,
1063 filter_string); 1103 filter_string, dry_run);
1064 if (err) 1104 filter_free_pred(pred);
1065 filter_free_pred(pred);
1066 }
1067 if (err) 1105 if (err)
1068 return err; 1106 return err;
1069 1107
@@ -1081,6 +1119,10 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1081 1119
1082 mutex_lock(&event_mutex); 1120 mutex_lock(&event_mutex);
1083 1121
1122 err = init_preds(call);
1123 if (err)
1124 goto out_unlock;
1125
1084 if (!strcmp(strstrip(filter_string), "0")) { 1126 if (!strcmp(strstrip(filter_string), "0")) {
1085 filter_disable_preds(call); 1127 filter_disable_preds(call);
1086 remove_filter_string(call->filter); 1128 remove_filter_string(call->filter);
@@ -1103,7 +1145,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1103 goto out; 1145 goto out;
1104 } 1146 }
1105 1147
1106 err = replace_preds(NULL, call, ps, filter_string); 1148 err = replace_preds(NULL, call, ps, filter_string, false);
1107 if (err) 1149 if (err)
1108 append_filter_err(ps, call->filter); 1150 append_filter_err(ps, call->filter);
1109 1151
@@ -1126,8 +1168,12 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1126 1168
1127 mutex_lock(&event_mutex); 1169 mutex_lock(&event_mutex);
1128 1170
1171 err = init_subsystem_preds(system);
1172 if (err)
1173 goto out_unlock;
1174
1129 if (!strcmp(strstrip(filter_string), "0")) { 1175 if (!strcmp(strstrip(filter_string), "0")) {
1130 filter_free_subsystem_preds(system); 1176 filter_free_subsystem_preds(system, FILTER_DISABLE_ALL);
1131 remove_filter_string(system->filter); 1177 remove_filter_string(system->filter);
1132 mutex_unlock(&event_mutex); 1178 mutex_unlock(&event_mutex);
1133 return 0; 1179 return 0;
@@ -1138,7 +1184,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1138 if (!ps) 1184 if (!ps)
1139 goto out_unlock; 1185 goto out_unlock;
1140 1186
1141 filter_free_subsystem_preds(system);
1142 replace_filter_string(system->filter, filter_string); 1187 replace_filter_string(system->filter, filter_string);
1143 1188
1144 parse_init(ps, filter_ops, filter_string); 1189 parse_init(ps, filter_ops, filter_string);
@@ -1148,9 +1193,23 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1148 goto out; 1193 goto out;
1149 } 1194 }
1150 1195
1151 err = replace_preds(system, NULL, ps, filter_string); 1196 filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET);
1152 if (err) 1197
1198 /* try to see the filter can be applied to which events */
1199 err = replace_preds(system, NULL, ps, filter_string, true);
1200 if (err) {
1153 append_filter_err(ps, system->filter); 1201 append_filter_err(ps, system->filter);
1202 goto out;
1203 }
1204
1205 filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET);
1206
1207 /* really apply the filter to the events */
1208 err = replace_preds(system, NULL, ps, filter_string, false);
1209 if (err) {
1210 append_filter_err(ps, system->filter);
1211 filter_free_subsystem_preds(system, 2);
1212 }
1154 1213
1155out: 1214out:
1156 filter_opstack_clear(ps); 1215 filter_opstack_clear(ps);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index d06cf898dc86..df1bf6e48bb9 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -60,7 +60,8 @@ extern void __bad_type_size(void);
60#undef TRACE_EVENT_FORMAT 60#undef TRACE_EVENT_FORMAT
61#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ 61#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
62static int \ 62static int \
63ftrace_format_##call(struct trace_seq *s) \ 63ftrace_format_##call(struct ftrace_event_call *unused, \
64 struct trace_seq *s) \
64{ \ 65{ \
65 struct args field; \ 66 struct args field; \
66 int ret; \ 67 int ret; \
@@ -76,7 +77,8 @@ ftrace_format_##call(struct trace_seq *s) \
76#define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \ 77#define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, \
77 tpfmt) \ 78 tpfmt) \
78static int \ 79static int \
79ftrace_format_##call(struct trace_seq *s) \ 80ftrace_format_##call(struct ftrace_event_call *unused, \
81 struct trace_seq *s) \
80{ \ 82{ \
81 struct args field; \ 83 struct args field; \
82 int ret; \ 84 int ret; \
@@ -117,7 +119,7 @@ ftrace_format_##call(struct trace_seq *s) \
117 119
118#undef TRACE_EVENT_FORMAT 120#undef TRACE_EVENT_FORMAT
119#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ 121#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
120int ftrace_define_fields_##call(void); \ 122int ftrace_define_fields_##call(struct ftrace_event_call *event_call); \
121static int ftrace_raw_init_event_##call(void); \ 123static int ftrace_raw_init_event_##call(void); \
122 \ 124 \
123struct ftrace_event_call __used \ 125struct ftrace_event_call __used \
@@ -133,7 +135,6 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
133static int ftrace_raw_init_event_##call(void) \ 135static int ftrace_raw_init_event_##call(void) \
134{ \ 136{ \
135 INIT_LIST_HEAD(&event_##call.fields); \ 137 INIT_LIST_HEAD(&event_##call.fields); \
136 init_preds(&event_##call); \
137 return 0; \ 138 return 0; \
138} \ 139} \
139 140
@@ -156,7 +157,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
156#define TRACE_FIELD(type, item, assign) \ 157#define TRACE_FIELD(type, item, assign) \
157 ret = trace_define_field(event_call, #type, #item, \ 158 ret = trace_define_field(event_call, #type, #item, \
158 offsetof(typeof(field), item), \ 159 offsetof(typeof(field), item), \
159 sizeof(field.item), is_signed_type(type)); \ 160 sizeof(field.item), \
161 is_signed_type(type), FILTER_OTHER); \
160 if (ret) \ 162 if (ret) \
161 return ret; 163 return ret;
162 164
@@ -164,7 +166,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
164#define TRACE_FIELD_SPECIAL(type, item, len, cmd) \ 166#define TRACE_FIELD_SPECIAL(type, item, len, cmd) \
165 ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 167 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
166 offsetof(typeof(field), item), \ 168 offsetof(typeof(field), item), \
167 sizeof(field.item), 0); \ 169 sizeof(field.item), 0, FILTER_OTHER); \
168 if (ret) \ 170 if (ret) \
169 return ret; 171 return ret;
170 172
@@ -172,7 +174,8 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
172#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \ 174#define TRACE_FIELD_SIGN(type, item, assign, is_signed) \
173 ret = trace_define_field(event_call, #type, #item, \ 175 ret = trace_define_field(event_call, #type, #item, \
174 offsetof(typeof(field), item), \ 176 offsetof(typeof(field), item), \
175 sizeof(field.item), is_signed); \ 177 sizeof(field.item), is_signed, \
178 FILTER_OTHER); \
176 if (ret) \ 179 if (ret) \
177 return ret; 180 return ret;
178 181
@@ -182,17 +185,14 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
182#undef TRACE_EVENT_FORMAT 185#undef TRACE_EVENT_FORMAT
183#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ 186#define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \
184int \ 187int \
185ftrace_define_fields_##call(void) \ 188ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
186{ \ 189{ \
187 struct ftrace_event_call *event_call = &event_##call; \
188 struct args field; \ 190 struct args field; \
189 int ret; \ 191 int ret; \
190 \ 192 \
191 __common_field(unsigned char, type, 0); \ 193 ret = trace_define_common_fields(event_call); \
192 __common_field(unsigned char, flags, 0); \ 194 if (ret) \
193 __common_field(unsigned char, preempt_count, 0); \ 195 return ret; \
194 __common_field(int, pid, 1); \
195 __common_field(int, tgid, 1); \
196 \ 196 \
197 tstruct; \ 197 tstruct; \
198 \ 198 \
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 75ef000613c3..5b01b94518fc 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -288,11 +288,9 @@ static int
288ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, 288ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
289 struct ftrace_probe_ops *ops, void *data) 289 struct ftrace_probe_ops *ops, void *data)
290{ 290{
291 char str[KSYM_SYMBOL_LEN];
292 long count = (long)data; 291 long count = (long)data;
293 292
294 kallsyms_lookup(ip, NULL, NULL, NULL, str); 293 seq_printf(m, "%pf:", (void *)ip);
295 seq_printf(m, "%s:", str);
296 294
297 if (ops == &traceon_probe_ops) 295 if (ops == &traceon_probe_ops)
298 seq_printf(m, "traceon"); 296 seq_printf(m, "traceon");
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 420ec3487579..b3749a2c3132 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -52,7 +52,7 @@ static struct tracer_flags tracer_flags = {
52 .opts = trace_opts 52 .opts = trace_opts
53}; 53};
54 54
55/* pid on the last trace processed */ 55static struct trace_array *graph_array;
56 56
57 57
58/* Add a function return address to the trace stack on thread info.*/ 58/* Add a function return address to the trace stack on thread info.*/
@@ -166,10 +166,123 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
166 return ret; 166 return ret;
167} 167}
168 168
169static int __trace_graph_entry(struct trace_array *tr,
170 struct ftrace_graph_ent *trace,
171 unsigned long flags,
172 int pc)
173{
174 struct ftrace_event_call *call = &event_funcgraph_entry;
175 struct ring_buffer_event *event;
176 struct ring_buffer *buffer = tr->buffer;
177 struct ftrace_graph_ent_entry *entry;
178
179 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
180 return 0;
181
182 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
183 sizeof(*entry), flags, pc);
184 if (!event)
185 return 0;
186 entry = ring_buffer_event_data(event);
187 entry->graph_ent = *trace;
188 if (!filter_current_check_discard(buffer, call, entry, event))
189 ring_buffer_unlock_commit(buffer, event);
190
191 return 1;
192}
193
194int trace_graph_entry(struct ftrace_graph_ent *trace)
195{
196 struct trace_array *tr = graph_array;
197 struct trace_array_cpu *data;
198 unsigned long flags;
199 long disabled;
200 int ret;
201 int cpu;
202 int pc;
203
204 if (unlikely(!tr))
205 return 0;
206
207 if (!ftrace_trace_task(current))
208 return 0;
209
210 if (!ftrace_graph_addr(trace->func))
211 return 0;
212
213 local_irq_save(flags);
214 cpu = raw_smp_processor_id();
215 data = tr->data[cpu];
216 disabled = atomic_inc_return(&data->disabled);
217 if (likely(disabled == 1)) {
218 pc = preempt_count();
219 ret = __trace_graph_entry(tr, trace, flags, pc);
220 } else {
221 ret = 0;
222 }
223 /* Only do the atomic if it is not already set */
224 if (!test_tsk_trace_graph(current))
225 set_tsk_trace_graph(current);
226
227 atomic_dec(&data->disabled);
228 local_irq_restore(flags);
229
230 return ret;
231}
232
233static void __trace_graph_return(struct trace_array *tr,
234 struct ftrace_graph_ret *trace,
235 unsigned long flags,
236 int pc)
237{
238 struct ftrace_event_call *call = &event_funcgraph_exit;
239 struct ring_buffer_event *event;
240 struct ring_buffer *buffer = tr->buffer;
241 struct ftrace_graph_ret_entry *entry;
242
243 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
244 return;
245
246 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
247 sizeof(*entry), flags, pc);
248 if (!event)
249 return;
250 entry = ring_buffer_event_data(event);
251 entry->ret = *trace;
252 if (!filter_current_check_discard(buffer, call, entry, event))
253 ring_buffer_unlock_commit(buffer, event);
254}
255
256void trace_graph_return(struct ftrace_graph_ret *trace)
257{
258 struct trace_array *tr = graph_array;
259 struct trace_array_cpu *data;
260 unsigned long flags;
261 long disabled;
262 int cpu;
263 int pc;
264
265 local_irq_save(flags);
266 cpu = raw_smp_processor_id();
267 data = tr->data[cpu];
268 disabled = atomic_inc_return(&data->disabled);
269 if (likely(disabled == 1)) {
270 pc = preempt_count();
271 __trace_graph_return(tr, trace, flags, pc);
272 }
273 if (!trace->depth)
274 clear_tsk_trace_graph(current);
275 atomic_dec(&data->disabled);
276 local_irq_restore(flags);
277}
278
169static int graph_trace_init(struct trace_array *tr) 279static int graph_trace_init(struct trace_array *tr)
170{ 280{
171 int ret = register_ftrace_graph(&trace_graph_return, 281 int ret;
172 &trace_graph_entry); 282
283 graph_array = tr;
284 ret = register_ftrace_graph(&trace_graph_return,
285 &trace_graph_entry);
173 if (ret) 286 if (ret)
174 return ret; 287 return ret;
175 tracing_start_cmdline_record(); 288 tracing_start_cmdline_record();
@@ -177,49 +290,30 @@ static int graph_trace_init(struct trace_array *tr)
177 return 0; 290 return 0;
178} 291}
179 292
293void set_graph_array(struct trace_array *tr)
294{
295 graph_array = tr;
296}
297
180static void graph_trace_reset(struct trace_array *tr) 298static void graph_trace_reset(struct trace_array *tr)
181{ 299{
182 tracing_stop_cmdline_record(); 300 tracing_stop_cmdline_record();
183 unregister_ftrace_graph(); 301 unregister_ftrace_graph();
184} 302}
185 303
186static inline int log10_cpu(int nb) 304static int max_bytes_for_cpu;
187{
188 if (nb / 100)
189 return 3;
190 if (nb / 10)
191 return 2;
192 return 1;
193}
194 305
195static enum print_line_t 306static enum print_line_t
196print_graph_cpu(struct trace_seq *s, int cpu) 307print_graph_cpu(struct trace_seq *s, int cpu)
197{ 308{
198 int i;
199 int ret; 309 int ret;
200 int log10_this = log10_cpu(cpu);
201 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
202
203 310
204 /* 311 /*
205 * Start with a space character - to make it stand out 312 * Start with a space character - to make it stand out
206 * to the right a bit when trace output is pasted into 313 * to the right a bit when trace output is pasted into
207 * email: 314 * email:
208 */ 315 */
209 ret = trace_seq_printf(s, " "); 316 ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
210
211 /*
212 * Tricky - we space the CPU field according to the max
213 * number of online CPUs. On a 2-cpu system it would take
214 * a maximum of 1 digit - on a 128 cpu system it would
215 * take up to 3 digits:
216 */
217 for (i = 0; i < log10_all - log10_this; i++) {
218 ret = trace_seq_printf(s, " ");
219 if (!ret)
220 return TRACE_TYPE_PARTIAL_LINE;
221 }
222 ret = trace_seq_printf(s, "%d) ", cpu);
223 if (!ret) 317 if (!ret)
224 return TRACE_TYPE_PARTIAL_LINE; 318 return TRACE_TYPE_PARTIAL_LINE;
225 319
@@ -565,11 +659,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
565 return TRACE_TYPE_PARTIAL_LINE; 659 return TRACE_TYPE_PARTIAL_LINE;
566 } 660 }
567 661
568 ret = seq_print_ip_sym(s, call->func, 0); 662 ret = trace_seq_printf(s, "%pf();\n", (void *)call->func);
569 if (!ret)
570 return TRACE_TYPE_PARTIAL_LINE;
571
572 ret = trace_seq_printf(s, "();\n");
573 if (!ret) 663 if (!ret)
574 return TRACE_TYPE_PARTIAL_LINE; 664 return TRACE_TYPE_PARTIAL_LINE;
575 665
@@ -612,11 +702,7 @@ print_graph_entry_nested(struct trace_iterator *iter,
612 return TRACE_TYPE_PARTIAL_LINE; 702 return TRACE_TYPE_PARTIAL_LINE;
613 } 703 }
614 704
615 ret = seq_print_ip_sym(s, call->func, 0); 705 ret = trace_seq_printf(s, "%pf() {\n", (void *)call->func);
616 if (!ret)
617 return TRACE_TYPE_PARTIAL_LINE;
618
619 ret = trace_seq_printf(s, "() {\n");
620 if (!ret) 706 if (!ret)
621 return TRACE_TYPE_PARTIAL_LINE; 707 return TRACE_TYPE_PARTIAL_LINE;
622 708
@@ -934,6 +1020,8 @@ static struct tracer graph_trace __read_mostly = {
934 1020
935static __init int init_graph_trace(void) 1021static __init int init_graph_trace(void)
936{ 1022{
1023 max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1024
937 return register_tracer(&graph_trace); 1025 return register_tracer(&graph_trace);
938} 1026}
939 1027
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b923d13e2fad..5555b75a0d12 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -178,7 +178,6 @@ out_unlock:
178out: 178out:
179 data->critical_sequence = max_sequence; 179 data->critical_sequence = max_sequence;
180 data->preempt_timestamp = ftrace_now(cpu); 180 data->preempt_timestamp = ftrace_now(cpu);
181 tracing_reset(tr, cpu);
182 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 181 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
183} 182}
184 183
@@ -208,7 +207,6 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
208 data->critical_sequence = max_sequence; 207 data->critical_sequence = max_sequence;
209 data->preempt_timestamp = ftrace_now(cpu); 208 data->preempt_timestamp = ftrace_now(cpu);
210 data->critical_start = parent_ip ? : ip; 209 data->critical_start = parent_ip ? : ip;
211 tracing_reset(tr, cpu);
212 210
213 local_save_flags(flags); 211 local_save_flags(flags);
214 212
@@ -379,6 +377,7 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
379 irqsoff_trace = tr; 377 irqsoff_trace = tr;
380 /* make sure that the tracer is visible */ 378 /* make sure that the tracer is visible */
381 smp_wmb(); 379 smp_wmb();
380 tracing_reset_online_cpus(tr);
382 start_irqsoff_tracer(tr); 381 start_irqsoff_tracer(tr);
383} 382}
384 383
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index d53b45ed0806..c4c9bbda53d3 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -307,11 +307,12 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
307 struct trace_array_cpu *data, 307 struct trace_array_cpu *data,
308 struct mmiotrace_rw *rw) 308 struct mmiotrace_rw *rw)
309{ 309{
310 struct ring_buffer *buffer = tr->buffer;
310 struct ring_buffer_event *event; 311 struct ring_buffer_event *event;
311 struct trace_mmiotrace_rw *entry; 312 struct trace_mmiotrace_rw *entry;
312 int pc = preempt_count(); 313 int pc = preempt_count();
313 314
314 event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, 315 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
315 sizeof(*entry), 0, pc); 316 sizeof(*entry), 0, pc);
316 if (!event) { 317 if (!event) {
317 atomic_inc(&dropped_count); 318 atomic_inc(&dropped_count);
@@ -319,7 +320,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
319 } 320 }
320 entry = ring_buffer_event_data(event); 321 entry = ring_buffer_event_data(event);
321 entry->rw = *rw; 322 entry->rw = *rw;
322 trace_buffer_unlock_commit(tr, event, 0, pc); 323 trace_buffer_unlock_commit(buffer, event, 0, pc);
323} 324}
324 325
325void mmio_trace_rw(struct mmiotrace_rw *rw) 326void mmio_trace_rw(struct mmiotrace_rw *rw)
@@ -333,11 +334,12 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
333 struct trace_array_cpu *data, 334 struct trace_array_cpu *data,
334 struct mmiotrace_map *map) 335 struct mmiotrace_map *map)
335{ 336{
337 struct ring_buffer *buffer = tr->buffer;
336 struct ring_buffer_event *event; 338 struct ring_buffer_event *event;
337 struct trace_mmiotrace_map *entry; 339 struct trace_mmiotrace_map *entry;
338 int pc = preempt_count(); 340 int pc = preempt_count();
339 341
340 event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, 342 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
341 sizeof(*entry), 0, pc); 343 sizeof(*entry), 0, pc);
342 if (!event) { 344 if (!event) {
343 atomic_inc(&dropped_count); 345 atomic_inc(&dropped_count);
@@ -345,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
345 } 347 }
346 entry = ring_buffer_event_data(event); 348 entry = ring_buffer_event_data(event);
347 entry->map = *map; 349 entry->map = *map;
348 trace_buffer_unlock_commit(tr, event, 0, pc); 350 trace_buffer_unlock_commit(buffer, event, 0, pc);
349} 351}
350 352
351void mmio_trace_mapping(struct mmiotrace_map *map) 353void mmio_trace_mapping(struct mmiotrace_map *map)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 8a30d9874cd4..fe1a00f1445a 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -38,6 +38,7 @@ static void probe_power_end(struct power_trace *it)
38{ 38{
39 struct ftrace_event_call *call = &event_power; 39 struct ftrace_event_call *call = &event_power;
40 struct ring_buffer_event *event; 40 struct ring_buffer_event *event;
41 struct ring_buffer *buffer;
41 struct trace_power *entry; 42 struct trace_power *entry;
42 struct trace_array_cpu *data; 43 struct trace_array_cpu *data;
43 struct trace_array *tr = power_trace; 44 struct trace_array *tr = power_trace;
@@ -45,18 +46,20 @@ static void probe_power_end(struct power_trace *it)
45 if (!trace_power_enabled) 46 if (!trace_power_enabled)
46 return; 47 return;
47 48
49 buffer = tr->buffer;
50
48 preempt_disable(); 51 preempt_disable();
49 it->end = ktime_get(); 52 it->end = ktime_get();
50 data = tr->data[smp_processor_id()]; 53 data = tr->data[smp_processor_id()];
51 54
52 event = trace_buffer_lock_reserve(tr, TRACE_POWER, 55 event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
53 sizeof(*entry), 0, 0); 56 sizeof(*entry), 0, 0);
54 if (!event) 57 if (!event)
55 goto out; 58 goto out;
56 entry = ring_buffer_event_data(event); 59 entry = ring_buffer_event_data(event);
57 entry->state_data = *it; 60 entry->state_data = *it;
58 if (!filter_check_discard(call, entry, tr->buffer, event)) 61 if (!filter_check_discard(call, entry, buffer, event))
59 trace_buffer_unlock_commit(tr, event, 0, 0); 62 trace_buffer_unlock_commit(buffer, event, 0, 0);
60 out: 63 out:
61 preempt_enable(); 64 preempt_enable();
62} 65}
@@ -66,6 +69,7 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
66{ 69{
67 struct ftrace_event_call *call = &event_power; 70 struct ftrace_event_call *call = &event_power;
68 struct ring_buffer_event *event; 71 struct ring_buffer_event *event;
72 struct ring_buffer *buffer;
69 struct trace_power *entry; 73 struct trace_power *entry;
70 struct trace_array_cpu *data; 74 struct trace_array_cpu *data;
71 struct trace_array *tr = power_trace; 75 struct trace_array *tr = power_trace;
@@ -73,6 +77,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
73 if (!trace_power_enabled) 77 if (!trace_power_enabled)
74 return; 78 return;
75 79
80 buffer = tr->buffer;
81
76 memset(it, 0, sizeof(struct power_trace)); 82 memset(it, 0, sizeof(struct power_trace));
77 it->state = level; 83 it->state = level;
78 it->type = type; 84 it->type = type;
@@ -81,14 +87,14 @@ static void probe_power_mark(struct power_trace *it, unsigned int type,
81 it->end = it->stamp; 87 it->end = it->stamp;
82 data = tr->data[smp_processor_id()]; 88 data = tr->data[smp_processor_id()];
83 89
84 event = trace_buffer_lock_reserve(tr, TRACE_POWER, 90 event = trace_buffer_lock_reserve(buffer, TRACE_POWER,
85 sizeof(*entry), 0, 0); 91 sizeof(*entry), 0, 0);
86 if (!event) 92 if (!event)
87 goto out; 93 goto out;
88 entry = ring_buffer_event_data(event); 94 entry = ring_buffer_event_data(event);
89 entry->state_data = *it; 95 entry->state_data = *it;
90 if (!filter_check_discard(call, entry, tr->buffer, event)) 96 if (!filter_check_discard(call, entry, buffer, event))
91 trace_buffer_unlock_commit(tr, event, 0, 0); 97 trace_buffer_unlock_commit(buffer, event, 0, 0);
92 out: 98 out:
93 preempt_enable(); 99 preempt_enable();
94} 100}
@@ -144,14 +150,12 @@ static void power_trace_reset(struct trace_array *tr)
144 150
145static int power_trace_init(struct trace_array *tr) 151static int power_trace_init(struct trace_array *tr)
146{ 152{
147 int cpu;
148 power_trace = tr; 153 power_trace = tr;
149 154
150 trace_power_enabled = 1; 155 trace_power_enabled = 1;
151 tracing_power_register(); 156 tracing_power_register();
152 157
153 for_each_cpu(cpu, cpu_possible_mask) 158 tracing_reset_online_cpus(tr);
154 tracing_reset(tr, cpu);
155 return 0; 159 return 0;
156} 160}
157 161
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index a98106dd979c..5fca0f51fde4 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -20,6 +20,35 @@ static int sched_ref;
20static DEFINE_MUTEX(sched_register_mutex); 20static DEFINE_MUTEX(sched_register_mutex);
21static int sched_stopped; 21static int sched_stopped;
22 22
23
24void
25tracing_sched_switch_trace(struct trace_array *tr,
26 struct task_struct *prev,
27 struct task_struct *next,
28 unsigned long flags, int pc)
29{
30 struct ftrace_event_call *call = &event_context_switch;
31 struct ring_buffer *buffer = tr->buffer;
32 struct ring_buffer_event *event;
33 struct ctx_switch_entry *entry;
34
35 event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
36 sizeof(*entry), flags, pc);
37 if (!event)
38 return;
39 entry = ring_buffer_event_data(event);
40 entry->prev_pid = prev->pid;
41 entry->prev_prio = prev->prio;
42 entry->prev_state = prev->state;
43 entry->next_pid = next->pid;
44 entry->next_prio = next->prio;
45 entry->next_state = next->state;
46 entry->next_cpu = task_cpu(next);
47
48 if (!filter_check_discard(call, entry, buffer, event))
49 trace_buffer_unlock_commit(buffer, event, flags, pc);
50}
51
23static void 52static void
24probe_sched_switch(struct rq *__rq, struct task_struct *prev, 53probe_sched_switch(struct rq *__rq, struct task_struct *prev,
25 struct task_struct *next) 54 struct task_struct *next)
@@ -49,6 +78,36 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49 local_irq_restore(flags); 78 local_irq_restore(flags);
50} 79}
51 80
81void
82tracing_sched_wakeup_trace(struct trace_array *tr,
83 struct task_struct *wakee,
84 struct task_struct *curr,
85 unsigned long flags, int pc)
86{
87 struct ftrace_event_call *call = &event_wakeup;
88 struct ring_buffer_event *event;
89 struct ctx_switch_entry *entry;
90 struct ring_buffer *buffer = tr->buffer;
91
92 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
93 sizeof(*entry), flags, pc);
94 if (!event)
95 return;
96 entry = ring_buffer_event_data(event);
97 entry->prev_pid = curr->pid;
98 entry->prev_prio = curr->prio;
99 entry->prev_state = curr->state;
100 entry->next_pid = wakee->pid;
101 entry->next_prio = wakee->prio;
102 entry->next_state = wakee->state;
103 entry->next_cpu = task_cpu(wakee);
104
105 if (!filter_check_discard(call, entry, buffer, event))
106 ring_buffer_unlock_commit(buffer, event);
107 ftrace_trace_stack(tr->buffer, flags, 6, pc);
108 ftrace_trace_userstack(tr->buffer, flags, pc);
109}
110
52static void 111static void
53probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) 112probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
54{ 113{
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index eacb27225173..ad69f105a7c6 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -186,11 +186,6 @@ out:
186 186
187static void __wakeup_reset(struct trace_array *tr) 187static void __wakeup_reset(struct trace_array *tr)
188{ 188{
189 int cpu;
190
191 for_each_possible_cpu(cpu)
192 tracing_reset(tr, cpu);
193
194 wakeup_cpu = -1; 189 wakeup_cpu = -1;
195 wakeup_prio = -1; 190 wakeup_prio = -1;
196 191
@@ -204,6 +199,8 @@ static void wakeup_reset(struct trace_array *tr)
204{ 199{
205 unsigned long flags; 200 unsigned long flags;
206 201
202 tracing_reset_online_cpus(tr);
203
207 local_irq_save(flags); 204 local_irq_save(flags);
208 __raw_spin_lock(&wakeup_lock); 205 __raw_spin_lock(&wakeup_lock);
209 __wakeup_reset(tr); 206 __wakeup_reset(tr);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 00dd6485bdd7..d2cdbabb4ead 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -288,6 +288,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
288 * to detect and recover from possible hangs 288 * to detect and recover from possible hangs
289 */ 289 */
290 tracing_reset_online_cpus(tr); 290 tracing_reset_online_cpus(tr);
291 set_graph_array(tr);
291 ret = register_ftrace_graph(&trace_graph_return, 292 ret = register_ftrace_graph(&trace_graph_return,
292 &trace_graph_entry_watchdog); 293 &trace_graph_entry_watchdog);
293 if (ret) { 294 if (ret) {
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 6a2a9d484cd6..0f6facb050a1 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -186,43 +186,33 @@ static const struct file_operations stack_max_size_fops = {
186}; 186};
187 187
188static void * 188static void *
189t_next(struct seq_file *m, void *v, loff_t *pos) 189__next(struct seq_file *m, loff_t *pos)
190{ 190{
191 long i; 191 long n = *pos - 1;
192 192
193 (*pos)++; 193 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
194
195 if (v == SEQ_START_TOKEN)
196 i = 0;
197 else {
198 i = *(long *)v;
199 i++;
200 }
201
202 if (i >= max_stack_trace.nr_entries ||
203 stack_dump_trace[i] == ULONG_MAX)
204 return NULL; 194 return NULL;
205 195
206 m->private = (void *)i; 196 m->private = (void *)n;
207
208 return &m->private; 197 return &m->private;
209} 198}
210 199
211static void *t_start(struct seq_file *m, loff_t *pos) 200static void *
201t_next(struct seq_file *m, void *v, loff_t *pos)
212{ 202{
213 void *t = SEQ_START_TOKEN; 203 (*pos)++;
214 loff_t l = 0; 204 return __next(m, pos);
205}
215 206
207static void *t_start(struct seq_file *m, loff_t *pos)
208{
216 local_irq_disable(); 209 local_irq_disable();
217 __raw_spin_lock(&max_stack_lock); 210 __raw_spin_lock(&max_stack_lock);
218 211
219 if (*pos == 0) 212 if (*pos == 0)
220 return SEQ_START_TOKEN; 213 return SEQ_START_TOKEN;
221 214
222 for (; t && l < *pos; t = t_next(m, t, &l)) 215 return __next(m, pos);
223 ;
224
225 return t;
226} 216}
227 217
228static void t_stop(struct seq_file *m, void *p) 218static void t_stop(struct seq_file *m, void *p)
@@ -234,15 +224,8 @@ static void t_stop(struct seq_file *m, void *p)
234static int trace_lookup_stack(struct seq_file *m, long i) 224static int trace_lookup_stack(struct seq_file *m, long i)
235{ 225{
236 unsigned long addr = stack_dump_trace[i]; 226 unsigned long addr = stack_dump_trace[i];
237#ifdef CONFIG_KALLSYMS
238 char str[KSYM_SYMBOL_LEN];
239
240 sprint_symbol(str, addr);
241 227
242 return seq_printf(m, "%s\n", str); 228 return seq_printf(m, "%pF\n", (void *)addr);
243#else
244 return seq_printf(m, "%p\n", (void*)addr);
245#endif
246} 229}
247 230
248static void print_disabled(struct seq_file *m) 231static void print_disabled(struct seq_file *m)
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index aea321c82fa0..a4bb239eb987 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -49,7 +49,8 @@ static struct dentry *stat_dir;
49 * but it will at least advance closer to the next one 49 * but it will at least advance closer to the next one
50 * to be released. 50 * to be released.
51 */ 51 */
52static struct rb_node *release_next(struct rb_node *node) 52static struct rb_node *release_next(struct tracer_stat *ts,
53 struct rb_node *node)
53{ 54{
54 struct stat_node *snode; 55 struct stat_node *snode;
55 struct rb_node *parent = rb_parent(node); 56 struct rb_node *parent = rb_parent(node);
@@ -67,6 +68,8 @@ static struct rb_node *release_next(struct rb_node *node)
67 parent->rb_right = NULL; 68 parent->rb_right = NULL;
68 69
69 snode = container_of(node, struct stat_node, node); 70 snode = container_of(node, struct stat_node, node);
71 if (ts->stat_release)
72 ts->stat_release(snode->stat);
70 kfree(snode); 73 kfree(snode);
71 74
72 return parent; 75 return parent;
@@ -78,7 +81,7 @@ static void __reset_stat_session(struct stat_session *session)
78 struct rb_node *node = session->stat_root.rb_node; 81 struct rb_node *node = session->stat_root.rb_node;
79 82
80 while (node) 83 while (node)
81 node = release_next(node); 84 node = release_next(session->ts, node);
82 85
83 session->stat_root = RB_ROOT; 86 session->stat_root = RB_ROOT;
84} 87}
@@ -200,17 +203,21 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
200{ 203{
201 struct stat_session *session = s->private; 204 struct stat_session *session = s->private;
202 struct rb_node *node; 205 struct rb_node *node;
206 int n = *pos;
203 int i; 207 int i;
204 208
205 /* Prevent from tracer switch or rbtree modification */ 209 /* Prevent from tracer switch or rbtree modification */
206 mutex_lock(&session->stat_mutex); 210 mutex_lock(&session->stat_mutex);
207 211
208 /* If we are in the beginning of the file, print the headers */ 212 /* If we are in the beginning of the file, print the headers */
209 if (!*pos && session->ts->stat_headers) 213 if (session->ts->stat_headers) {
210 return SEQ_START_TOKEN; 214 if (n == 0)
215 return SEQ_START_TOKEN;
216 n--;
217 }
211 218
212 node = rb_first(&session->stat_root); 219 node = rb_first(&session->stat_root);
213 for (i = 0; node && i < *pos; i++) 220 for (i = 0; node && i < n; i++)
214 node = rb_next(node); 221 node = rb_next(node);
215 222
216 return node; 223 return node;
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h
index f3546a2cd826..8f03914b9a6a 100644
--- a/kernel/trace/trace_stat.h
+++ b/kernel/trace/trace_stat.h
@@ -18,6 +18,8 @@ struct tracer_stat {
18 int (*stat_cmp)(void *p1, void *p2); 18 int (*stat_cmp)(void *p1, void *p2);
19 /* Print a stat entry */ 19 /* Print a stat entry */
20 int (*stat_show)(struct seq_file *s, void *p); 20 int (*stat_show)(struct seq_file *s, void *p);
21 /* Release an entry */
22 void (*stat_release)(void *stat);
21 /* Print the headers of your stat entries */ 23 /* Print the headers of your stat entries */
22 int (*stat_headers)(struct seq_file *s); 24 int (*stat_headers)(struct seq_file *s);
23}; 25};
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 5e579645ac86..8712ce3c6a0e 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -1,30 +1,18 @@
1#include <trace/syscall.h> 1#include <trace/syscall.h>
2#include <trace/events/syscalls.h>
2#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/ftrace.h>
5#include <linux/perf_counter.h>
3#include <asm/syscall.h> 6#include <asm/syscall.h>
4 7
5#include "trace_output.h" 8#include "trace_output.h"
6#include "trace.h" 9#include "trace.h"
7 10
8/* Keep a counter of the syscall tracing users */
9static int refcount;
10
11/* Prevent from races on thread flags toggling */
12static DEFINE_MUTEX(syscall_trace_lock); 11static DEFINE_MUTEX(syscall_trace_lock);
13 12static int sys_refcount_enter;
14/* Option to display the parameters types */ 13static int sys_refcount_exit;
15enum { 14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
16 TRACE_SYSCALLS_OPT_TYPES = 0x1, 15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
17};
18
19static struct tracer_opt syscalls_opts[] = {
20 { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
21 { }
22};
23
24static struct tracer_flags syscalls_flags = {
25 .val = 0, /* By default: no parameters types */
26 .opts = syscalls_opts
27};
28 16
29enum print_line_t 17enum print_line_t
30print_syscall_enter(struct trace_iterator *iter, int flags) 18print_syscall_enter(struct trace_iterator *iter, int flags)
@@ -35,35 +23,46 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
35 struct syscall_metadata *entry; 23 struct syscall_metadata *entry;
36 int i, ret, syscall; 24 int i, ret, syscall;
37 25
38 trace_assign_type(trace, ent); 26 trace = (typeof(trace))ent;
39
40 syscall = trace->nr; 27 syscall = trace->nr;
41
42 entry = syscall_nr_to_meta(syscall); 28 entry = syscall_nr_to_meta(syscall);
29
43 if (!entry) 30 if (!entry)
44 goto end; 31 goto end;
45 32
33 if (entry->enter_id != ent->type) {
34 WARN_ON_ONCE(1);
35 goto end;
36 }
37
46 ret = trace_seq_printf(s, "%s(", entry->name); 38 ret = trace_seq_printf(s, "%s(", entry->name);
47 if (!ret) 39 if (!ret)
48 return TRACE_TYPE_PARTIAL_LINE; 40 return TRACE_TYPE_PARTIAL_LINE;
49 41
50 for (i = 0; i < entry->nb_args; i++) { 42 for (i = 0; i < entry->nb_args; i++) {
51 /* parameter types */ 43 /* parameter types */
52 if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) { 44 if (trace_flags & TRACE_ITER_VERBOSE) {
53 ret = trace_seq_printf(s, "%s ", entry->types[i]); 45 ret = trace_seq_printf(s, "%s ", entry->types[i]);
54 if (!ret) 46 if (!ret)
55 return TRACE_TYPE_PARTIAL_LINE; 47 return TRACE_TYPE_PARTIAL_LINE;
56 } 48 }
57 /* parameter values */ 49 /* parameter values */
58 ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i], 50 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
59 trace->args[i], 51 trace->args[i],
60 i == entry->nb_args - 1 ? ")" : ","); 52 i == entry->nb_args - 1 ? "" : ", ");
61 if (!ret) 53 if (!ret)
62 return TRACE_TYPE_PARTIAL_LINE; 54 return TRACE_TYPE_PARTIAL_LINE;
63 } 55 }
64 56
57 ret = trace_seq_putc(s, ')');
58 if (!ret)
59 return TRACE_TYPE_PARTIAL_LINE;
60
65end: 61end:
66 trace_seq_printf(s, "\n"); 62 ret = trace_seq_putc(s, '\n');
63 if (!ret)
64 return TRACE_TYPE_PARTIAL_LINE;
65
67 return TRACE_TYPE_HANDLED; 66 return TRACE_TYPE_HANDLED;
68} 67}
69 68
@@ -77,16 +76,20 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
77 struct syscall_metadata *entry; 76 struct syscall_metadata *entry;
78 int ret; 77 int ret;
79 78
80 trace_assign_type(trace, ent); 79 trace = (typeof(trace))ent;
81
82 syscall = trace->nr; 80 syscall = trace->nr;
83
84 entry = syscall_nr_to_meta(syscall); 81 entry = syscall_nr_to_meta(syscall);
82
85 if (!entry) { 83 if (!entry) {
86 trace_seq_printf(s, "\n"); 84 trace_seq_printf(s, "\n");
87 return TRACE_TYPE_HANDLED; 85 return TRACE_TYPE_HANDLED;
88 } 86 }
89 87
88 if (entry->exit_id != ent->type) {
89 WARN_ON_ONCE(1);
90 return TRACE_TYPE_UNHANDLED;
91 }
92
90 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, 93 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
91 trace->ret); 94 trace->ret);
92 if (!ret) 95 if (!ret)
@@ -95,62 +98,140 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
95 return TRACE_TYPE_HANDLED; 98 return TRACE_TYPE_HANDLED;
96} 99}
97 100
98void start_ftrace_syscalls(void) 101extern char *__bad_type_size(void);
102
103#define SYSCALL_FIELD(type, name) \
104 sizeof(type) != sizeof(trace.name) ? \
105 __bad_type_size() : \
106 #type, #name, offsetof(typeof(trace), name), sizeof(trace.name)
107
108int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
99{ 109{
100 unsigned long flags; 110 int i;
101 struct task_struct *g, *t; 111 int nr;
112 int ret;
113 struct syscall_metadata *entry;
114 struct syscall_trace_enter trace;
115 int offset = offsetof(struct syscall_trace_enter, args);
102 116
103 mutex_lock(&syscall_trace_lock); 117 nr = syscall_name_to_nr(call->data);
118 entry = syscall_nr_to_meta(nr);
104 119
105 /* Don't enable the flag on the tasks twice */ 120 if (!entry)
106 if (++refcount != 1) 121 return 0;
107 goto unlock;
108 122
109 arch_init_ftrace_syscalls(); 123 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
110 read_lock_irqsave(&tasklist_lock, flags); 124 SYSCALL_FIELD(int, nr));
125 if (!ret)
126 return 0;
111 127
112 do_each_thread(g, t) { 128 for (i = 0; i < entry->nb_args; i++) {
113 set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE); 129 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
114 } while_each_thread(g, t); 130 entry->args[i]);
131 if (!ret)
132 return 0;
133 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset,
134 sizeof(unsigned long));
135 if (!ret)
136 return 0;
137 offset += sizeof(unsigned long);
138 }
115 139
116 read_unlock_irqrestore(&tasklist_lock, flags); 140 trace_seq_puts(s, "\nprint fmt: \"");
141 for (i = 0; i < entry->nb_args; i++) {
142 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
143 sizeof(unsigned long),
144 i == entry->nb_args - 1 ? "" : ", ");
145 if (!ret)
146 return 0;
147 }
148 trace_seq_putc(s, '"');
117 149
118unlock: 150 for (i = 0; i < entry->nb_args; i++) {
119 mutex_unlock(&syscall_trace_lock); 151 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
152 entry->args[i]);
153 if (!ret)
154 return 0;
155 }
156
157 return trace_seq_putc(s, '\n');
120} 158}
121 159
122void stop_ftrace_syscalls(void) 160int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
123{ 161{
124 unsigned long flags; 162 int ret;
125 struct task_struct *g, *t; 163 struct syscall_trace_exit trace;
126 164
127 mutex_lock(&syscall_trace_lock); 165 ret = trace_seq_printf(s,
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
168 SYSCALL_FIELD(int, nr),
169 SYSCALL_FIELD(unsigned long, ret));
170 if (!ret)
171 return 0;
128 172
129 /* There are perhaps still some users */ 173 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
130 if (--refcount) 174}
131 goto unlock;
132 175
133 read_lock_irqsave(&tasklist_lock, flags); 176int syscall_enter_define_fields(struct ftrace_event_call *call)
177{
178 struct syscall_trace_enter trace;
179 struct syscall_metadata *meta;
180 int ret;
181 int nr;
182 int i;
183 int offset = offsetof(typeof(trace), args);
184
185 nr = syscall_name_to_nr(call->data);
186 meta = syscall_nr_to_meta(nr);
187
188 if (!meta)
189 return 0;
190
191 ret = trace_define_common_fields(call);
192 if (ret)
193 return ret;
194
195 for (i = 0; i < meta->nb_args; i++) {
196 ret = trace_define_field(call, meta->types[i],
197 meta->args[i], offset,
198 sizeof(unsigned long), 0,
199 FILTER_OTHER);
200 offset += sizeof(unsigned long);
201 }
134 202
135 do_each_thread(g, t) { 203 return ret;
136 clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE); 204}
137 } while_each_thread(g, t);
138 205
139 read_unlock_irqrestore(&tasklist_lock, flags); 206int syscall_exit_define_fields(struct ftrace_event_call *call)
207{
208 struct syscall_trace_exit trace;
209 int ret;
140 210
141unlock: 211 ret = trace_define_common_fields(call);
142 mutex_unlock(&syscall_trace_lock); 212 if (ret)
213 return ret;
214
215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0,
216 FILTER_OTHER);
217
218 return ret;
143} 219}
144 220
145void ftrace_syscall_enter(struct pt_regs *regs) 221void ftrace_syscall_enter(struct pt_regs *regs, long id)
146{ 222{
147 struct syscall_trace_enter *entry; 223 struct syscall_trace_enter *entry;
148 struct syscall_metadata *sys_data; 224 struct syscall_metadata *sys_data;
149 struct ring_buffer_event *event; 225 struct ring_buffer_event *event;
226 struct ring_buffer *buffer;
150 int size; 227 int size;
151 int syscall_nr; 228 int syscall_nr;
152 229
153 syscall_nr = syscall_get_nr(current, regs); 230 syscall_nr = syscall_get_nr(current, regs);
231 if (syscall_nr < 0)
232 return;
233 if (!test_bit(syscall_nr, enabled_enter_syscalls))
234 return;
154 235
155 sys_data = syscall_nr_to_meta(syscall_nr); 236 sys_data = syscall_nr_to_meta(syscall_nr);
156 if (!sys_data) 237 if (!sys_data)
@@ -158,8 +239,8 @@ void ftrace_syscall_enter(struct pt_regs *regs)
158 239
159 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 240 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
160 241
161 event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size, 242 event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
162 0, 0); 243 size, 0, 0);
163 if (!event) 244 if (!event)
164 return; 245 return;
165 246
@@ -167,24 +248,30 @@ void ftrace_syscall_enter(struct pt_regs *regs)
167 entry->nr = syscall_nr; 248 entry->nr = syscall_nr;
168 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 249 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
169 250
170 trace_current_buffer_unlock_commit(event, 0, 0); 251 if (!filter_current_check_discard(buffer, sys_data->enter_event,
171 trace_wake_up(); 252 entry, event))
253 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
172} 254}
173 255
174void ftrace_syscall_exit(struct pt_regs *regs) 256void ftrace_syscall_exit(struct pt_regs *regs, long ret)
175{ 257{
176 struct syscall_trace_exit *entry; 258 struct syscall_trace_exit *entry;
177 struct syscall_metadata *sys_data; 259 struct syscall_metadata *sys_data;
178 struct ring_buffer_event *event; 260 struct ring_buffer_event *event;
261 struct ring_buffer *buffer;
179 int syscall_nr; 262 int syscall_nr;
180 263
181 syscall_nr = syscall_get_nr(current, regs); 264 syscall_nr = syscall_get_nr(current, regs);
265 if (syscall_nr < 0)
266 return;
267 if (!test_bit(syscall_nr, enabled_exit_syscalls))
268 return;
182 269
183 sys_data = syscall_nr_to_meta(syscall_nr); 270 sys_data = syscall_nr_to_meta(syscall_nr);
184 if (!sys_data) 271 if (!sys_data)
185 return; 272 return;
186 273
187 event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT, 274 event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
188 sizeof(*entry), 0, 0); 275 sizeof(*entry), 0, 0);
189 if (!event) 276 if (!event)
190 return; 277 return;
@@ -193,58 +280,244 @@ void ftrace_syscall_exit(struct pt_regs *regs)
193 entry->nr = syscall_nr; 280 entry->nr = syscall_nr;
194 entry->ret = syscall_get_return_value(current, regs); 281 entry->ret = syscall_get_return_value(current, regs);
195 282
196 trace_current_buffer_unlock_commit(event, 0, 0); 283 if (!filter_current_check_discard(buffer, sys_data->exit_event,
197 trace_wake_up(); 284 entry, event))
285 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
198} 286}
199 287
200static int init_syscall_tracer(struct trace_array *tr) 288int reg_event_syscall_enter(void *ptr)
201{ 289{
202 start_ftrace_syscalls(); 290 int ret = 0;
291 int num;
292 char *name;
293
294 name = (char *)ptr;
295 num = syscall_name_to_nr(name);
296 if (num < 0 || num >= NR_syscalls)
297 return -ENOSYS;
298 mutex_lock(&syscall_trace_lock);
299 if (!sys_refcount_enter)
300 ret = register_trace_sys_enter(ftrace_syscall_enter);
301 if (ret) {
302 pr_info("event trace: Could not activate"
303 "syscall entry trace point");
304 } else {
305 set_bit(num, enabled_enter_syscalls);
306 sys_refcount_enter++;
307 }
308 mutex_unlock(&syscall_trace_lock);
309 return ret;
310}
311
312void unreg_event_syscall_enter(void *ptr)
313{
314 int num;
315 char *name;
203 316
204 return 0; 317 name = (char *)ptr;
318 num = syscall_name_to_nr(name);
319 if (num < 0 || num >= NR_syscalls)
320 return;
321 mutex_lock(&syscall_trace_lock);
322 sys_refcount_enter--;
323 clear_bit(num, enabled_enter_syscalls);
324 if (!sys_refcount_enter)
325 unregister_trace_sys_enter(ftrace_syscall_enter);
326 mutex_unlock(&syscall_trace_lock);
205} 327}
206 328
207static void reset_syscall_tracer(struct trace_array *tr) 329int reg_event_syscall_exit(void *ptr)
208{ 330{
209 stop_ftrace_syscalls(); 331 int ret = 0;
210 tracing_reset_online_cpus(tr); 332 int num;
333 char *name;
334
335 name = (char *)ptr;
336 num = syscall_name_to_nr(name);
337 if (num < 0 || num >= NR_syscalls)
338 return -ENOSYS;
339 mutex_lock(&syscall_trace_lock);
340 if (!sys_refcount_exit)
341 ret = register_trace_sys_exit(ftrace_syscall_exit);
342 if (ret) {
343 pr_info("event trace: Could not activate"
344 "syscall exit trace point");
345 } else {
346 set_bit(num, enabled_exit_syscalls);
347 sys_refcount_exit++;
348 }
349 mutex_unlock(&syscall_trace_lock);
350 return ret;
211} 351}
212 352
213static struct trace_event syscall_enter_event = { 353void unreg_event_syscall_exit(void *ptr)
214 .type = TRACE_SYSCALL_ENTER, 354{
215 .trace = print_syscall_enter, 355 int num;
216}; 356 char *name;
357
358 name = (char *)ptr;
359 num = syscall_name_to_nr(name);
360 if (num < 0 || num >= NR_syscalls)
361 return;
362 mutex_lock(&syscall_trace_lock);
363 sys_refcount_exit--;
364 clear_bit(num, enabled_exit_syscalls);
365 if (!sys_refcount_exit)
366 unregister_trace_sys_exit(ftrace_syscall_exit);
367 mutex_unlock(&syscall_trace_lock);
368}
217 369
218static struct trace_event syscall_exit_event = { 370struct trace_event event_syscall_enter = {
219 .type = TRACE_SYSCALL_EXIT, 371 .trace = print_syscall_enter,
220 .trace = print_syscall_exit,
221}; 372};
222 373
223static struct tracer syscall_tracer __read_mostly = { 374struct trace_event event_syscall_exit = {
224 .name = "syscall", 375 .trace = print_syscall_exit,
225 .init = init_syscall_tracer,
226 .reset = reset_syscall_tracer,
227 .flags = &syscalls_flags,
228}; 376};
229 377
230__init int register_ftrace_syscalls(void) 378#ifdef CONFIG_EVENT_PROFILE
379
380static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
381static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
382static int sys_prof_refcount_enter;
383static int sys_prof_refcount_exit;
384
385static void prof_syscall_enter(struct pt_regs *regs, long id)
231{ 386{
232 int ret; 387 struct syscall_trace_enter *rec;
388 struct syscall_metadata *sys_data;
389 int syscall_nr;
390 int size;
233 391
234 ret = register_ftrace_event(&syscall_enter_event); 392 syscall_nr = syscall_get_nr(current, regs);
235 if (!ret) { 393 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
236 printk(KERN_WARNING "event %d failed to register\n", 394 return;
237 syscall_enter_event.type); 395
238 WARN_ON_ONCE(1); 396 sys_data = syscall_nr_to_meta(syscall_nr);
397 if (!sys_data)
398 return;
399
400 /* get the size after alignment with the u32 buffer size field */
401 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
402 size = ALIGN(size + sizeof(u32), sizeof(u64));
403 size -= sizeof(u32);
404
405 do {
406 char raw_data[size];
407
408 /* zero the dead bytes from align to not leak stack to user */
409 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
410
411 rec = (struct syscall_trace_enter *) raw_data;
412 tracing_generic_entry_update(&rec->ent, 0, 0);
413 rec->ent.type = sys_data->enter_id;
414 rec->nr = syscall_nr;
415 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
416 (unsigned long *)&rec->args);
417 perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size);
418 } while(0);
419}
420
421int reg_prof_syscall_enter(char *name)
422{
423 int ret = 0;
424 int num;
425
426 num = syscall_name_to_nr(name);
427 if (num < 0 || num >= NR_syscalls)
428 return -ENOSYS;
429
430 mutex_lock(&syscall_trace_lock);
431 if (!sys_prof_refcount_enter)
432 ret = register_trace_sys_enter(prof_syscall_enter);
433 if (ret) {
434 pr_info("event trace: Could not activate"
435 "syscall entry trace point");
436 } else {
437 set_bit(num, enabled_prof_enter_syscalls);
438 sys_prof_refcount_enter++;
239 } 439 }
440 mutex_unlock(&syscall_trace_lock);
441 return ret;
442}
240 443
241 ret = register_ftrace_event(&syscall_exit_event); 444void unreg_prof_syscall_enter(char *name)
242 if (!ret) { 445{
243 printk(KERN_WARNING "event %d failed to register\n", 446 int num;
244 syscall_exit_event.type); 447
245 WARN_ON_ONCE(1); 448 num = syscall_name_to_nr(name);
449 if (num < 0 || num >= NR_syscalls)
450 return;
451
452 mutex_lock(&syscall_trace_lock);
453 sys_prof_refcount_enter--;
454 clear_bit(num, enabled_prof_enter_syscalls);
455 if (!sys_prof_refcount_enter)
456 unregister_trace_sys_enter(prof_syscall_enter);
457 mutex_unlock(&syscall_trace_lock);
458}
459
460static void prof_syscall_exit(struct pt_regs *regs, long ret)
461{
462 struct syscall_metadata *sys_data;
463 struct syscall_trace_exit rec;
464 int syscall_nr;
465
466 syscall_nr = syscall_get_nr(current, regs);
467 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
468 return;
469
470 sys_data = syscall_nr_to_meta(syscall_nr);
471 if (!sys_data)
472 return;
473
474 tracing_generic_entry_update(&rec.ent, 0, 0);
475 rec.ent.type = sys_data->exit_id;
476 rec.nr = syscall_nr;
477 rec.ret = syscall_get_return_value(current, regs);
478
479 perf_tpcounter_event(sys_data->exit_id, 0, 1, &rec, sizeof(rec));
480}
481
482int reg_prof_syscall_exit(char *name)
483{
484 int ret = 0;
485 int num;
486
487 num = syscall_name_to_nr(name);
488 if (num < 0 || num >= NR_syscalls)
489 return -ENOSYS;
490
491 mutex_lock(&syscall_trace_lock);
492 if (!sys_prof_refcount_exit)
493 ret = register_trace_sys_exit(prof_syscall_exit);
494 if (ret) {
495 pr_info("event trace: Could not activate"
496 "syscall entry trace point");
497 } else {
498 set_bit(num, enabled_prof_exit_syscalls);
499 sys_prof_refcount_exit++;
246 } 500 }
501 mutex_unlock(&syscall_trace_lock);
502 return ret;
503}
247 504
248 return register_tracer(&syscall_tracer); 505void unreg_prof_syscall_exit(char *name)
506{
507 int num;
508
509 num = syscall_name_to_nr(name);
510 if (num < 0 || num >= NR_syscalls)
511 return;
512
513 mutex_lock(&syscall_trace_lock);
514 sys_prof_refcount_exit--;
515 clear_bit(num, enabled_prof_exit_syscalls);
516 if (!sys_prof_refcount_exit)
517 unregister_trace_sys_exit(prof_syscall_exit);
518 mutex_unlock(&syscall_trace_lock);
249} 519}
250device_initcall(register_ftrace_syscalls); 520
521#endif
522
523
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 97fcea4acce1..40cafb07dffd 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -9,6 +9,7 @@
9#include <trace/events/workqueue.h> 9#include <trace/events/workqueue.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/kref.h>
12#include "trace_stat.h" 13#include "trace_stat.h"
13#include "trace.h" 14#include "trace.h"
14 15
@@ -16,6 +17,7 @@
16/* A cpu workqueue thread */ 17/* A cpu workqueue thread */
17struct cpu_workqueue_stats { 18struct cpu_workqueue_stats {
18 struct list_head list; 19 struct list_head list;
20 struct kref kref;
19 int cpu; 21 int cpu;
20 pid_t pid; 22 pid_t pid;
21/* Can be inserted from interrupt or user context, need to be atomic */ 23/* Can be inserted from interrupt or user context, need to be atomic */
@@ -39,6 +41,11 @@ struct workqueue_global_stats {
39static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); 41static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
40#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) 42#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
41 43
44static void cpu_workqueue_stat_free(struct kref *kref)
45{
46 kfree(container_of(kref, struct cpu_workqueue_stats, kref));
47}
48
42/* Insertion of a work */ 49/* Insertion of a work */
43static void 50static void
44probe_workqueue_insertion(struct task_struct *wq_thread, 51probe_workqueue_insertion(struct task_struct *wq_thread,
@@ -96,8 +103,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
96 return; 103 return;
97 } 104 }
98 INIT_LIST_HEAD(&cws->list); 105 INIT_LIST_HEAD(&cws->list);
106 kref_init(&cws->kref);
99 cws->cpu = cpu; 107 cws->cpu = cpu;
100
101 cws->pid = wq_thread->pid; 108 cws->pid = wq_thread->pid;
102 109
103 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 110 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
@@ -118,7 +125,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
118 list) { 125 list) {
119 if (node->pid == wq_thread->pid) { 126 if (node->pid == wq_thread->pid) {
120 list_del(&node->list); 127 list_del(&node->list);
121 kfree(node); 128 kref_put(&node->kref, cpu_workqueue_stat_free);
122 goto found; 129 goto found;
123 } 130 }
124 } 131 }
@@ -137,9 +144,11 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
137 144
138 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 145 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
139 146
140 if (!list_empty(&workqueue_cpu_stat(cpu)->list)) 147 if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
141 ret = list_entry(workqueue_cpu_stat(cpu)->list.next, 148 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
142 struct cpu_workqueue_stats, list); 149 struct cpu_workqueue_stats, list);
150 kref_get(&ret->kref);
151 }
143 152
144 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 153 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
145 154
@@ -162,9 +171,9 @@ static void *workqueue_stat_start(struct tracer_stat *trace)
162static void *workqueue_stat_next(void *prev, int idx) 171static void *workqueue_stat_next(void *prev, int idx)
163{ 172{
164 struct cpu_workqueue_stats *prev_cws = prev; 173 struct cpu_workqueue_stats *prev_cws = prev;
174 struct cpu_workqueue_stats *ret;
165 int cpu = prev_cws->cpu; 175 int cpu = prev_cws->cpu;
166 unsigned long flags; 176 unsigned long flags;
167 void *ret = NULL;
168 177
169 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); 178 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
170 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { 179 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
@@ -175,11 +184,14 @@ static void *workqueue_stat_next(void *prev, int idx)
175 return NULL; 184 return NULL;
176 } while (!(ret = workqueue_stat_start_cpu(cpu))); 185 } while (!(ret = workqueue_stat_start_cpu(cpu)));
177 return ret; 186 return ret;
187 } else {
188 ret = list_entry(prev_cws->list.next,
189 struct cpu_workqueue_stats, list);
190 kref_get(&ret->kref);
178 } 191 }
179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); 192 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
180 193
181 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, 194 return ret;
182 list);
183} 195}
184 196
185static int workqueue_stat_show(struct seq_file *s, void *p) 197static int workqueue_stat_show(struct seq_file *s, void *p)
@@ -203,6 +215,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
203 return 0; 215 return 0;
204} 216}
205 217
218static void workqueue_stat_release(void *stat)
219{
220 struct cpu_workqueue_stats *node = stat;
221
222 kref_put(&node->kref, cpu_workqueue_stat_free);
223}
224
206static int workqueue_stat_headers(struct seq_file *s) 225static int workqueue_stat_headers(struct seq_file *s)
207{ 226{
208 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); 227 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
@@ -215,6 +234,7 @@ struct tracer_stat workqueue_stats __read_mostly = {
215 .stat_start = workqueue_stat_start, 234 .stat_start = workqueue_stat_start,
216 .stat_next = workqueue_stat_next, 235 .stat_next = workqueue_stat_next,
217 .stat_show = workqueue_stat_show, 236 .stat_show = workqueue_stat_show,
237 .stat_release = workqueue_stat_release,
218 .stat_headers = workqueue_stat_headers 238 .stat_headers = workqueue_stat_headers
219}; 239};
220 240
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 1ef5d3a601c7..9489a0a9b1be 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -24,6 +24,7 @@
24#include <linux/tracepoint.h> 24#include <linux/tracepoint.h>
25#include <linux/err.h> 25#include <linux/err.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/sched.h>
27 28
28extern struct tracepoint __start___tracepoints[]; 29extern struct tracepoint __start___tracepoints[];
29extern struct tracepoint __stop___tracepoints[]; 30extern struct tracepoint __stop___tracepoints[];
@@ -242,6 +243,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
242{ 243{
243 WARN_ON(strcmp((*entry)->name, elem->name) != 0); 244 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
244 245
246 if (elem->regfunc && !elem->state && active)
247 elem->regfunc();
248 else if (elem->unregfunc && elem->state && !active)
249 elem->unregfunc();
250
245 /* 251 /*
246 * rcu_assign_pointer has a smp_wmb() which makes sure that the new 252 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
247 * probe callbacks array is consistent before setting a pointer to it. 253 * probe callbacks array is consistent before setting a pointer to it.
@@ -261,6 +267,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
261 */ 267 */
262static void disable_tracepoint(struct tracepoint *elem) 268static void disable_tracepoint(struct tracepoint *elem)
263{ 269{
270 if (elem->unregfunc && elem->state)
271 elem->unregfunc();
272
264 elem->state = 0; 273 elem->state = 0;
265 rcu_assign_pointer(elem->funcs, NULL); 274 rcu_assign_pointer(elem->funcs, NULL);
266} 275}
@@ -554,9 +563,6 @@ int tracepoint_module_notify(struct notifier_block *self,
554 563
555 switch (val) { 564 switch (val) {
556 case MODULE_STATE_COMING: 565 case MODULE_STATE_COMING:
557 tracepoint_update_probe_range(mod->tracepoints,
558 mod->tracepoints + mod->num_tracepoints);
559 break;
560 case MODULE_STATE_GOING: 566 case MODULE_STATE_GOING:
561 tracepoint_update_probe_range(mod->tracepoints, 567 tracepoint_update_probe_range(mod->tracepoints,
562 mod->tracepoints + mod->num_tracepoints); 568 mod->tracepoints + mod->num_tracepoints);
@@ -577,3 +583,41 @@ static int init_tracepoints(void)
577__initcall(init_tracepoints); 583__initcall(init_tracepoints);
578 584
579#endif /* CONFIG_MODULES */ 585#endif /* CONFIG_MODULES */
586
587#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
588
589/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
590static int sys_tracepoint_refcount;
591
592void syscall_regfunc(void)
593{
594 unsigned long flags;
595 struct task_struct *g, *t;
596
597 if (!sys_tracepoint_refcount) {
598 read_lock_irqsave(&tasklist_lock, flags);
599 do_each_thread(g, t) {
600 /* Skip kernel threads. */
601 if (t->mm)
602 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
603 } while_each_thread(g, t);
604 read_unlock_irqrestore(&tasklist_lock, flags);
605 }
606 sys_tracepoint_refcount++;
607}
608
609void syscall_unregfunc(void)
610{
611 unsigned long flags;
612 struct task_struct *g, *t;
613
614 sys_tracepoint_refcount--;
615 if (!sys_tracepoint_refcount) {
616 read_lock_irqsave(&tasklist_lock, flags);
617 do_each_thread(g, t) {
618 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
619 } while_each_thread(g, t);
620 read_unlock_irqrestore(&tasklist_lock, flags);
621 }
622}
623#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0668795d8818..addfe2df93b1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -317,8 +317,6 @@ static int worker_thread(void *__cwq)
317 if (cwq->wq->freezeable) 317 if (cwq->wq->freezeable)
318 set_freezable(); 318 set_freezable();
319 319
320 set_user_nice(current, -5);
321
322 for (;;) { 320 for (;;) {
323 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
324 if (!freezing(current) && 322 if (!freezing(current) &&
@@ -600,7 +598,12 @@ static struct workqueue_struct *keventd_wq __read_mostly;
600 * schedule_work - put work task in global workqueue 598 * schedule_work - put work task in global workqueue
601 * @work: job to be done 599 * @work: job to be done
602 * 600 *
603 * This puts a job in the kernel-global workqueue. 601 * Returns zero if @work was already on the kernel-global workqueue and
602 * non-zero otherwise.
603 *
604 * This puts a job in the kernel-global workqueue if it was not already
605 * queued and leaves it in the same position on the kernel-global
606 * workqueue otherwise.
604 */ 607 */
605int schedule_work(struct work_struct *work) 608int schedule_work(struct work_struct *work)
606{ 609{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 12327b2bb785..7dbd5d9c29a4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -653,6 +653,21 @@ config DEBUG_NOTIFIERS
653 This is a relatively cheap check but if you care about maximum 653 This is a relatively cheap check but if you care about maximum
654 performance, say N. 654 performance, say N.
655 655
656config DEBUG_CREDENTIALS
657 bool "Debug credential management"
658 depends on DEBUG_KERNEL
659 help
660 Enable this to turn on some debug checking for credential
661 management. The additional code keeps track of the number of
662 pointers from task_structs to any given cred struct, and checks to
663 see that this number never exceeds the usage count of the cred
664 struct.
665
666 Furthermore, if SELinux is enabled, this also checks that the
667 security pointer in the cred struct is never seen to be invalid.
668
669 If unsure, say N.
670
656# 671#
657# Select this config option from the architecture Kconfig, if it 672# Select this config option from the architecture Kconfig, if it
658# it is preferred to always offer frame pointers as a config 673# it is preferred to always offer frame pointers as a config
@@ -725,7 +740,7 @@ config RCU_TORTURE_TEST_RUNNABLE
725 740
726config RCU_CPU_STALL_DETECTOR 741config RCU_CPU_STALL_DETECTOR
727 bool "Check for stalled CPUs delaying RCU grace periods" 742 bool "Check for stalled CPUs delaying RCU grace periods"
728 depends on CLASSIC_RCU || TREE_RCU 743 depends on TREE_RCU || TREE_PREEMPT_RCU
729 default n 744 default n
730 help 745 help
731 This option causes RCU to printk information on which 746 This option causes RCU to printk information on which
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index f1ed2fe76c65..bd2bea963364 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -12,34 +12,47 @@
12 12
13#include <linux/sched.h> 13#include <linux/sched.h>
14 14
15/** 15/*
16 * is_single_threaded - Determine if a thread group is single-threaded or not 16 * Returns true if the task does not share ->mm with another thread/process.
17 * @p: A task in the thread group in question
18 *
19 * This returns true if the thread group to which a task belongs is single
20 * threaded, false if it is not.
21 */ 17 */
22bool is_single_threaded(struct task_struct *p) 18bool current_is_single_threaded(void)
23{ 19{
24 struct task_struct *g, *t; 20 struct task_struct *task = current;
25 struct mm_struct *mm = p->mm; 21 struct mm_struct *mm = task->mm;
22 struct task_struct *p, *t;
23 bool ret;
26 24
27 if (atomic_read(&p->signal->count) != 1) 25 if (atomic_read(&task->signal->live) != 1)
28 goto no; 26 return false;
29 27
30 if (atomic_read(&p->mm->mm_users) != 1) { 28 if (atomic_read(&mm->mm_users) == 1)
31 read_lock(&tasklist_lock); 29 return true;
32 do_each_thread(g, t) {
33 if (t->mm == mm && t != p)
34 goto no_unlock;
35 } while_each_thread(g, t);
36 read_unlock(&tasklist_lock);
37 }
38 30
39 return true; 31 ret = false;
32 rcu_read_lock();
33 for_each_process(p) {
34 if (unlikely(p->flags & PF_KTHREAD))
35 continue;
36 if (unlikely(p == task->group_leader))
37 continue;
38
39 t = p;
40 do {
41 if (unlikely(t->mm == mm))
42 goto found;
43 if (likely(t->mm))
44 break;
45 /*
46 * t->mm == NULL. Make sure next_thread/next_task
47 * will see other CLONE_VM tasks which might be
48 * forked before exiting.
49 */
50 smp_rmb();
51 } while_each_thread(p, t);
52 }
53 ret = true;
54found:
55 rcu_read_unlock();
40 56
41no_unlock: 57 return ret;
42 read_unlock(&tasklist_lock);
43no:
44 return false;
45} 58}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bffe6d7ef9d9..ac25cd28e807 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -114,46 +114,11 @@ setup_io_tlb_npages(char *str)
114__setup("swiotlb=", setup_io_tlb_npages); 114__setup("swiotlb=", setup_io_tlb_npages);
115/* make io_tlb_overflow tunable too? */ 115/* make io_tlb_overflow tunable too? */
116 116
117void * __weak __init swiotlb_alloc_boot(size_t size, unsigned long nslabs) 117/* Note that this doesn't work with highmem page */
118{
119 return alloc_bootmem_low_pages(size);
120}
121
122void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
123{
124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
125}
126
127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
128{
129 return paddr;
130}
131
132phys_addr_t __weak swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
133{
134 return baddr;
135}
136
137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address) 119 volatile void *address)
139{ 120{
140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address)); 121 return phys_to_dma(hwdev, virt_to_phys(address));
141}
142
143void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
144{
145 return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
146}
147
148int __weak swiotlb_arch_address_needs_mapping(struct device *hwdev,
149 dma_addr_t addr, size_t size)
150{
151 return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
152}
153
154int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size)
155{
156 return 0;
157} 122}
158 123
159static void swiotlb_print_info(unsigned long bytes) 124static void swiotlb_print_info(unsigned long bytes)
@@ -189,7 +154,7 @@ swiotlb_init_with_default_size(size_t default_size)
189 /* 154 /*
190 * Get IO TLB memory from the low pages 155 * Get IO TLB memory from the low pages
191 */ 156 */
192 io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs); 157 io_tlb_start = alloc_bootmem_low_pages(bytes);
193 if (!io_tlb_start) 158 if (!io_tlb_start)
194 panic("Cannot allocate SWIOTLB buffer"); 159 panic("Cannot allocate SWIOTLB buffer");
195 io_tlb_end = io_tlb_start + bytes; 160 io_tlb_end = io_tlb_start + bytes;
@@ -245,7 +210,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
245 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
246 211
247 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
248 io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs); 213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
249 if (io_tlb_start) 215 if (io_tlb_start)
250 break; 216 break;
251 order--; 217 order--;
@@ -315,20 +281,10 @@ cleanup1:
315 return -ENOMEM; 281 return -ENOMEM;
316} 282}
317 283
318static inline int 284static int is_swiotlb_buffer(phys_addr_t paddr)
319address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
320{ 285{
321 return swiotlb_arch_address_needs_mapping(hwdev, addr, size); 286 return paddr >= virt_to_phys(io_tlb_start) &&
322} 287 paddr < virt_to_phys(io_tlb_end);
323
324static inline int range_needs_mapping(phys_addr_t paddr, size_t size)
325{
326 return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size);
327}
328
329static int is_swiotlb_buffer(char *addr)
330{
331 return addr >= io_tlb_start && addr < io_tlb_end;
332} 288}
333 289
334/* 290/*
@@ -561,9 +517,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
561 dma_mask = hwdev->coherent_dma_mask; 517 dma_mask = hwdev->coherent_dma_mask;
562 518
563 ret = (void *)__get_free_pages(flags, order); 519 ret = (void *)__get_free_pages(flags, order);
564 if (ret && 520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
565 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
566 size)) {
567 /* 521 /*
568 * The allocated memory isn't reachable by the device. 522 * The allocated memory isn't reachable by the device.
569 */ 523 */
@@ -585,7 +539,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
585 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
586 540
587 /* Confirm address can be DMA'd by device */ 541 /* Confirm address can be DMA'd by device */
588 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 542 if (dev_addr + size > dma_mask) {
589 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
590 (unsigned long long)dma_mask, 544 (unsigned long long)dma_mask,
591 (unsigned long long)dev_addr); 545 (unsigned long long)dev_addr);
@@ -601,11 +555,13 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
601 555
602void 556void
603swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
604 dma_addr_t dma_handle) 558 dma_addr_t dev_addr)
605{ 559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
606 WARN_ON(irqs_disabled()); 562 WARN_ON(irqs_disabled());
607 if (!is_swiotlb_buffer(vaddr)) 563 if (!is_swiotlb_buffer(paddr))
608 free_pages((unsigned long) vaddr, get_order(size)); 564 free_pages((unsigned long)vaddr, get_order(size));
609 else 565 else
610 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 566 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
611 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
@@ -625,12 +581,15 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
625 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
626 "device %s\n", size, dev ? dev_name(dev) : "?"); 582 "device %s\n", size, dev ? dev_name(dev) : "?");
627 583
628 if (size > io_tlb_overflow && do_panic) { 584 if (size <= io_tlb_overflow || !do_panic)
629 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 585 return;
630 panic("DMA: Memory would be corrupted\n"); 586
631 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 587 if (dir == DMA_BIDIRECTIONAL)
632 panic("DMA: Random memory would be DMAed\n"); 588 panic("DMA: Random memory could be DMA accessed\n");
633 } 589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
634} 593}
635 594
636/* 595/*
@@ -646,7 +605,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
646 struct dma_attrs *attrs) 605 struct dma_attrs *attrs)
647{ 606{
648 phys_addr_t phys = page_to_phys(page) + offset; 607 phys_addr_t phys = page_to_phys(page) + offset;
649 dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); 608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
650 void *map; 609 void *map;
651 610
652 BUG_ON(dir == DMA_NONE); 611 BUG_ON(dir == DMA_NONE);
@@ -655,8 +614,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
655 * we can safely return the device addr and not worry about bounce 614 * we can safely return the device addr and not worry about bounce
656 * buffering it. 615 * buffering it.
657 */ 616 */
658 if (!address_needs_mapping(dev, dev_addr, size) && 617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
659 !range_needs_mapping(phys, size))
660 return dev_addr; 618 return dev_addr;
661 619
662 /* 620 /*
@@ -673,7 +631,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
673 /* 631 /*
674 * Ensure that the address returned is DMA'ble 632 * Ensure that the address returned is DMA'ble
675 */ 633 */
676 if (address_needs_mapping(dev, dev_addr, size)) 634 if (!dma_capable(dev, dev_addr, size))
677 panic("map_single: bounce buffer is not DMA'ble"); 635 panic("map_single: bounce buffer is not DMA'ble");
678 636
679 return dev_addr; 637 return dev_addr;
@@ -691,19 +649,25 @@ EXPORT_SYMBOL_GPL(swiotlb_map_page);
691static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
692 size_t size, int dir) 650 size_t size, int dir)
693{ 651{
694 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
695 653
696 BUG_ON(dir == DMA_NONE); 654 BUG_ON(dir == DMA_NONE);
697 655
698 if (is_swiotlb_buffer(dma_addr)) { 656 if (is_swiotlb_buffer(paddr)) {
699 do_unmap_single(hwdev, dma_addr, size, dir); 657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
700 return; 658 return;
701 } 659 }
702 660
703 if (dir != DMA_FROM_DEVICE) 661 if (dir != DMA_FROM_DEVICE)
704 return; 662 return;
705 663
706 dma_mark_clean(dma_addr, size); 664 /*
665 * phys_to_virt doesn't work with hihgmem page but we could
666 * call dma_mark_clean() with hihgmem page here. However, we
667 * are fine since dma_mark_clean() is null on POWERPC. We can
668 * make dma_mark_clean() take a physical address if necessary.
669 */
670 dma_mark_clean(phys_to_virt(paddr), size);
707} 671}
708 672
709void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
@@ -728,19 +692,19 @@ static void
728swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
729 size_t size, int dir, int target) 693 size_t size, int dir, int target)
730{ 694{
731 char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); 695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
732 696
733 BUG_ON(dir == DMA_NONE); 697 BUG_ON(dir == DMA_NONE);
734 698
735 if (is_swiotlb_buffer(dma_addr)) { 699 if (is_swiotlb_buffer(paddr)) {
736 sync_single(hwdev, dma_addr, size, dir, target); 700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
737 return; 701 return;
738 } 702 }
739 703
740 if (dir != DMA_FROM_DEVICE) 704 if (dir != DMA_FROM_DEVICE)
741 return; 705 return;
742 706
743 dma_mark_clean(dma_addr, size); 707 dma_mark_clean(phys_to_virt(paddr), size);
744} 708}
745 709
746void 710void
@@ -817,10 +781,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
817 781
818 for_each_sg(sgl, sg, nelems, i) { 782 for_each_sg(sgl, sg, nelems, i) {
819 phys_addr_t paddr = sg_phys(sg); 783 phys_addr_t paddr = sg_phys(sg);
820 dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); 784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
821 785
822 if (range_needs_mapping(paddr, sg->length) || 786 if (swiotlb_force ||
823 address_needs_mapping(hwdev, dev_addr, sg->length)) { 787 !dma_capable(hwdev, dev_addr, sg->length)) {
824 void *map = map_single(hwdev, sg_phys(sg), 788 void *map = map_single(hwdev, sg_phys(sg),
825 sg->length, dir); 789 sg->length, dir);
826 if (!map) { 790 if (!map) {
diff --git a/mm/Makefile b/mm/Makefile
index 5e0bd6426693..147a7a7873c4 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -8,7 +8,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
8 vmalloc.o 8 vmalloc.o
9 9
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 maccess.o page_alloc.o page-writeback.o pdflush.o \ 11 maccess.o page_alloc.o page-writeback.o \
12 readahead.o swap.o truncate.o vmscan.o shmem.o \ 12 readahead.o swap.o truncate.o vmscan.o shmem.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 page_isolation.o mm_init.o $(mmu-y) 14 page_isolation.o mm_init.o $(mmu-y)
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index c86edd244294..d3ca0dac1111 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1,8 +1,11 @@
1 1
2#include <linux/wait.h> 2#include <linux/wait.h>
3#include <linux/backing-dev.h> 3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
4#include <linux/fs.h> 6#include <linux/fs.h>
5#include <linux/pagemap.h> 7#include <linux/pagemap.h>
8#include <linux/mm.h>
6#include <linux/sched.h> 9#include <linux/sched.h>
7#include <linux/module.h> 10#include <linux/module.h>
8#include <linux/writeback.h> 11#include <linux/writeback.h>
@@ -14,6 +17,7 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
14EXPORT_SYMBOL(default_unplug_io_fn); 17EXPORT_SYMBOL(default_unplug_io_fn);
15 18
16struct backing_dev_info default_backing_dev_info = { 19struct backing_dev_info default_backing_dev_info = {
20 .name = "default",
17 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 21 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
18 .state = 0, 22 .state = 0,
19 .capabilities = BDI_CAP_MAP_COPY, 23 .capabilities = BDI_CAP_MAP_COPY,
@@ -22,6 +26,18 @@ struct backing_dev_info default_backing_dev_info = {
22EXPORT_SYMBOL_GPL(default_backing_dev_info); 26EXPORT_SYMBOL_GPL(default_backing_dev_info);
23 27
24static struct class *bdi_class; 28static struct class *bdi_class;
29DEFINE_SPINLOCK(bdi_lock);
30LIST_HEAD(bdi_list);
31LIST_HEAD(bdi_pending_list);
32
33static struct task_struct *sync_supers_tsk;
34static struct timer_list sync_supers_timer;
35
36static int bdi_sync_supers(void *);
37static void sync_supers_timer_fn(unsigned long);
38static void arm_supers_timer(void);
39
40static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
25 41
26#ifdef CONFIG_DEBUG_FS 42#ifdef CONFIG_DEBUG_FS
27#include <linux/debugfs.h> 43#include <linux/debugfs.h>
@@ -37,9 +53,29 @@ static void bdi_debug_init(void)
37static int bdi_debug_stats_show(struct seq_file *m, void *v) 53static int bdi_debug_stats_show(struct seq_file *m, void *v)
38{ 54{
39 struct backing_dev_info *bdi = m->private; 55 struct backing_dev_info *bdi = m->private;
56 struct bdi_writeback *wb;
40 unsigned long background_thresh; 57 unsigned long background_thresh;
41 unsigned long dirty_thresh; 58 unsigned long dirty_thresh;
42 unsigned long bdi_thresh; 59 unsigned long bdi_thresh;
60 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
61 struct inode *inode;
62
63 /*
64 * inode lock is enough here, the bdi->wb_list is protected by
65 * RCU on the reader side
66 */
67 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
68 spin_lock(&inode_lock);
69 list_for_each_entry(wb, &bdi->wb_list, list) {
70 nr_wb++;
71 list_for_each_entry(inode, &wb->b_dirty, i_list)
72 nr_dirty++;
73 list_for_each_entry(inode, &wb->b_io, i_list)
74 nr_io++;
75 list_for_each_entry(inode, &wb->b_more_io, i_list)
76 nr_more_io++;
77 }
78 spin_unlock(&inode_lock);
43 79
44 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); 80 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
45 81
@@ -49,12 +85,22 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
49 "BdiReclaimable: %8lu kB\n" 85 "BdiReclaimable: %8lu kB\n"
50 "BdiDirtyThresh: %8lu kB\n" 86 "BdiDirtyThresh: %8lu kB\n"
51 "DirtyThresh: %8lu kB\n" 87 "DirtyThresh: %8lu kB\n"
52 "BackgroundThresh: %8lu kB\n", 88 "BackgroundThresh: %8lu kB\n"
89 "WriteBack threads:%8lu\n"
90 "b_dirty: %8lu\n"
91 "b_io: %8lu\n"
92 "b_more_io: %8lu\n"
93 "bdi_list: %8u\n"
94 "state: %8lx\n"
95 "wb_mask: %8lx\n"
96 "wb_list: %8u\n"
97 "wb_cnt: %8u\n",
53 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 98 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
54 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 99 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
55 K(bdi_thresh), 100 K(bdi_thresh), K(dirty_thresh),
56 K(dirty_thresh), 101 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
57 K(background_thresh)); 102 !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
103 !list_empty(&bdi->wb_list), bdi->wb_cnt);
58#undef K 104#undef K
59 105
60 return 0; 106 return 0;
@@ -185,6 +231,13 @@ static int __init default_bdi_init(void)
185{ 231{
186 int err; 232 int err;
187 233
234 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
235 BUG_ON(IS_ERR(sync_supers_tsk));
236
237 init_timer(&sync_supers_timer);
238 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
239 arm_supers_timer();
240
188 err = bdi_init(&default_backing_dev_info); 241 err = bdi_init(&default_backing_dev_info);
189 if (!err) 242 if (!err)
190 bdi_register(&default_backing_dev_info, NULL, "default"); 243 bdi_register(&default_backing_dev_info, NULL, "default");
@@ -193,6 +246,248 @@ static int __init default_bdi_init(void)
193} 246}
194subsys_initcall(default_bdi_init); 247subsys_initcall(default_bdi_init);
195 248
249static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
250{
251 memset(wb, 0, sizeof(*wb));
252
253 wb->bdi = bdi;
254 wb->last_old_flush = jiffies;
255 INIT_LIST_HEAD(&wb->b_dirty);
256 INIT_LIST_HEAD(&wb->b_io);
257 INIT_LIST_HEAD(&wb->b_more_io);
258}
259
260static void bdi_task_init(struct backing_dev_info *bdi,
261 struct bdi_writeback *wb)
262{
263 struct task_struct *tsk = current;
264
265 spin_lock(&bdi->wb_lock);
266 list_add_tail_rcu(&wb->list, &bdi->wb_list);
267 spin_unlock(&bdi->wb_lock);
268
269 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
270 set_freezable();
271
272 /*
273 * Our parent may run at a different priority, just set us to normal
274 */
275 set_user_nice(tsk, 0);
276}
277
278static int bdi_start_fn(void *ptr)
279{
280 struct bdi_writeback *wb = ptr;
281 struct backing_dev_info *bdi = wb->bdi;
282 int ret;
283
284 /*
285 * Add us to the active bdi_list
286 */
287 spin_lock(&bdi_lock);
288 list_add(&bdi->bdi_list, &bdi_list);
289 spin_unlock(&bdi_lock);
290
291 bdi_task_init(bdi, wb);
292
293 /*
294 * Clear pending bit and wakeup anybody waiting to tear us down
295 */
296 clear_bit(BDI_pending, &bdi->state);
297 smp_mb__after_clear_bit();
298 wake_up_bit(&bdi->state, BDI_pending);
299
300 ret = bdi_writeback_task(wb);
301
302 /*
303 * Remove us from the list
304 */
305 spin_lock(&bdi->wb_lock);
306 list_del_rcu(&wb->list);
307 spin_unlock(&bdi->wb_lock);
308
309 /*
310 * Flush any work that raced with us exiting. No new work
311 * will be added, since this bdi isn't discoverable anymore.
312 */
313 if (!list_empty(&bdi->work_list))
314 wb_do_writeback(wb, 1);
315
316 wb->task = NULL;
317 return ret;
318}
319
320int bdi_has_dirty_io(struct backing_dev_info *bdi)
321{
322 return wb_has_dirty_io(&bdi->wb);
323}
324
325static void bdi_flush_io(struct backing_dev_info *bdi)
326{
327 struct writeback_control wbc = {
328 .bdi = bdi,
329 .sync_mode = WB_SYNC_NONE,
330 .older_than_this = NULL,
331 .range_cyclic = 1,
332 .nr_to_write = 1024,
333 };
334
335 writeback_inodes_wbc(&wbc);
336}
337
338/*
339 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
340 * or we risk deadlocking on ->s_umount. The longer term solution would be
341 * to implement sync_supers_bdi() or similar and simply do it from the
342 * bdi writeback tasks individually.
343 */
344static int bdi_sync_supers(void *unused)
345{
346 set_user_nice(current, 0);
347
348 while (!kthread_should_stop()) {
349 set_current_state(TASK_INTERRUPTIBLE);
350 schedule();
351
352 /*
353 * Do this periodically, like kupdated() did before.
354 */
355 sync_supers();
356 }
357
358 return 0;
359}
360
361static void arm_supers_timer(void)
362{
363 unsigned long next;
364
365 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
366 mod_timer(&sync_supers_timer, round_jiffies_up(next));
367}
368
369static void sync_supers_timer_fn(unsigned long unused)
370{
371 wake_up_process(sync_supers_tsk);
372 arm_supers_timer();
373}
374
375static int bdi_forker_task(void *ptr)
376{
377 struct bdi_writeback *me = ptr;
378
379 bdi_task_init(me->bdi, me);
380
381 for (;;) {
382 struct backing_dev_info *bdi, *tmp;
383 struct bdi_writeback *wb;
384
385 /*
386 * Temporary measure, we want to make sure we don't see
387 * dirty data on the default backing_dev_info
388 */
389 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
390 wb_do_writeback(me, 0);
391
392 spin_lock(&bdi_lock);
393
394 /*
395 * Check if any existing bdi's have dirty data without
396 * a thread registered. If so, set that up.
397 */
398 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
399 if (bdi->wb.task)
400 continue;
401 if (list_empty(&bdi->work_list) &&
402 !bdi_has_dirty_io(bdi))
403 continue;
404
405 bdi_add_default_flusher_task(bdi);
406 }
407
408 set_current_state(TASK_INTERRUPTIBLE);
409
410 if (list_empty(&bdi_pending_list)) {
411 unsigned long wait;
412
413 spin_unlock(&bdi_lock);
414 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
415 schedule_timeout(wait);
416 try_to_freeze();
417 continue;
418 }
419
420 __set_current_state(TASK_RUNNING);
421
422 /*
423 * This is our real job - check for pending entries in
424 * bdi_pending_list, and create the tasks that got added
425 */
426 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
427 bdi_list);
428 list_del_init(&bdi->bdi_list);
429 spin_unlock(&bdi_lock);
430
431 wb = &bdi->wb;
432 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
433 dev_name(bdi->dev));
434 /*
435 * If task creation fails, then readd the bdi to
436 * the pending list and force writeout of the bdi
437 * from this forker thread. That will free some memory
438 * and we can try again.
439 */
440 if (IS_ERR(wb->task)) {
441 wb->task = NULL;
442
443 /*
444 * Add this 'bdi' to the back, so we get
445 * a chance to flush other bdi's to free
446 * memory.
447 */
448 spin_lock(&bdi_lock);
449 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
450 spin_unlock(&bdi_lock);
451
452 bdi_flush_io(bdi);
453 }
454 }
455
456 return 0;
457}
458
459/*
460 * Add the default flusher task that gets created for any bdi
461 * that has dirty data pending writeout
462 */
463void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
464{
465 if (!bdi_cap_writeback_dirty(bdi))
466 return;
467
468 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
469 printk(KERN_ERR "bdi %p/%s is not registered!\n",
470 bdi, bdi->name);
471 return;
472 }
473
474 /*
475 * Check with the helper whether to proceed adding a task. Will only
476 * abort if we two or more simultanous calls to
477 * bdi_add_default_flusher_task() occured, further additions will block
478 * waiting for previous additions to finish.
479 */
480 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
481 list_move_tail(&bdi->bdi_list, &bdi_pending_list);
482
483 /*
484 * We are now on the pending list, wake up bdi_forker_task()
485 * to finish the job and add us back to the active bdi_list
486 */
487 wake_up_process(default_backing_dev_info.wb.task);
488 }
489}
490
196int bdi_register(struct backing_dev_info *bdi, struct device *parent, 491int bdi_register(struct backing_dev_info *bdi, struct device *parent,
197 const char *fmt, ...) 492 const char *fmt, ...)
198{ 493{
@@ -211,9 +506,35 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
211 goto exit; 506 goto exit;
212 } 507 }
213 508
509 spin_lock(&bdi_lock);
510 list_add_tail(&bdi->bdi_list, &bdi_list);
511 spin_unlock(&bdi_lock);
512
214 bdi->dev = dev; 513 bdi->dev = dev;
215 bdi_debug_register(bdi, dev_name(dev));
216 514
515 /*
516 * Just start the forker thread for our default backing_dev_info,
517 * and add other bdi's to the list. They will get a thread created
518 * on-demand when they need it.
519 */
520 if (bdi_cap_flush_forker(bdi)) {
521 struct bdi_writeback *wb = &bdi->wb;
522
523 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
524 dev_name(dev));
525 if (IS_ERR(wb->task)) {
526 wb->task = NULL;
527 ret = -ENOMEM;
528
529 spin_lock(&bdi_lock);
530 list_del(&bdi->bdi_list);
531 spin_unlock(&bdi_lock);
532 goto exit;
533 }
534 }
535
536 bdi_debug_register(bdi, dev_name(dev));
537 set_bit(BDI_registered, &bdi->state);
217exit: 538exit:
218 return ret; 539 return ret;
219} 540}
@@ -225,9 +546,42 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
225} 546}
226EXPORT_SYMBOL(bdi_register_dev); 547EXPORT_SYMBOL(bdi_register_dev);
227 548
549/*
550 * Remove bdi from the global list and shutdown any threads we have running
551 */
552static void bdi_wb_shutdown(struct backing_dev_info *bdi)
553{
554 struct bdi_writeback *wb;
555
556 if (!bdi_cap_writeback_dirty(bdi))
557 return;
558
559 /*
560 * If setup is pending, wait for that to complete first
561 */
562 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
563 TASK_UNINTERRUPTIBLE);
564
565 /*
566 * Make sure nobody finds us on the bdi_list anymore
567 */
568 spin_lock(&bdi_lock);
569 list_del(&bdi->bdi_list);
570 spin_unlock(&bdi_lock);
571
572 /*
573 * Finally, kill the kernel threads. We don't need to be RCU
574 * safe anymore, since the bdi is gone from visibility.
575 */
576 list_for_each_entry(wb, &bdi->wb_list, list)
577 kthread_stop(wb->task);
578}
579
228void bdi_unregister(struct backing_dev_info *bdi) 580void bdi_unregister(struct backing_dev_info *bdi)
229{ 581{
230 if (bdi->dev) { 582 if (bdi->dev) {
583 if (!bdi_cap_flush_forker(bdi))
584 bdi_wb_shutdown(bdi);
231 bdi_debug_unregister(bdi); 585 bdi_debug_unregister(bdi);
232 device_unregister(bdi->dev); 586 device_unregister(bdi->dev);
233 bdi->dev = NULL; 587 bdi->dev = NULL;
@@ -237,14 +591,25 @@ EXPORT_SYMBOL(bdi_unregister);
237 591
238int bdi_init(struct backing_dev_info *bdi) 592int bdi_init(struct backing_dev_info *bdi)
239{ 593{
240 int i; 594 int i, err;
241 int err;
242 595
243 bdi->dev = NULL; 596 bdi->dev = NULL;
244 597
245 bdi->min_ratio = 0; 598 bdi->min_ratio = 0;
246 bdi->max_ratio = 100; 599 bdi->max_ratio = 100;
247 bdi->max_prop_frac = PROP_FRAC_BASE; 600 bdi->max_prop_frac = PROP_FRAC_BASE;
601 spin_lock_init(&bdi->wb_lock);
602 INIT_LIST_HEAD(&bdi->bdi_list);
603 INIT_LIST_HEAD(&bdi->wb_list);
604 INIT_LIST_HEAD(&bdi->work_list);
605
606 bdi_wb_init(&bdi->wb, bdi);
607
608 /*
609 * Just one thread support for now, hard code mask and count
610 */
611 bdi->wb_mask = 1;
612 bdi->wb_cnt = 1;
248 613
249 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 614 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
250 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 615 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
@@ -269,6 +634,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
269{ 634{
270 int i; 635 int i;
271 636
637 WARN_ON(bdi_has_dirty_io(bdi));
638
272 bdi_unregister(bdi); 639 bdi_unregister(bdi);
273 640
274 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 641 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 701740c9e81b..555d5d2731c6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -521,7 +521,11 @@ find_block:
521 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) + 521 region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
522 start_off); 522 start_off);
523 memset(region, 0, size); 523 memset(region, 0, size);
524 kmemleak_alloc(region, size, 1, 0); 524 /*
525 * The min_count is set to 0 so that bootmem allocated blocks
526 * are never reported as leaks.
527 */
528 kmemleak_alloc(region, size, 0, 0);
525 return region; 529 return region;
526 } 530 }
527 531
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 487267310a84..4ea4510e2996 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -92,11 +92,13 @@
92#include <linux/string.h> 92#include <linux/string.h>
93#include <linux/nodemask.h> 93#include <linux/nodemask.h>
94#include <linux/mm.h> 94#include <linux/mm.h>
95#include <linux/workqueue.h>
95 96
96#include <asm/sections.h> 97#include <asm/sections.h>
97#include <asm/processor.h> 98#include <asm/processor.h>
98#include <asm/atomic.h> 99#include <asm/atomic.h>
99 100
101#include <linux/kmemcheck.h>
100#include <linux/kmemleak.h> 102#include <linux/kmemleak.h>
101 103
102/* 104/*
@@ -107,6 +109,7 @@
107#define SECS_FIRST_SCAN 60 /* delay before the first scan */ 109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
108#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
109#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ 111#define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */
112#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
110 113
111#define BYTES_PER_POINTER sizeof(void *) 114#define BYTES_PER_POINTER sizeof(void *)
112 115
@@ -120,6 +123,9 @@ struct kmemleak_scan_area {
120 size_t length; 123 size_t length;
121}; 124};
122 125
126#define KMEMLEAK_GREY 0
127#define KMEMLEAK_BLACK -1
128
123/* 129/*
124 * Structure holding the metadata for each allocated memory block. 130 * Structure holding the metadata for each allocated memory block.
125 * Modifications to such objects should be made while holding the 131 * Modifications to such objects should be made while holding the
@@ -161,6 +167,15 @@ struct kmemleak_object {
161/* flag set on newly allocated objects */ 167/* flag set on newly allocated objects */
162#define OBJECT_NEW (1 << 3) 168#define OBJECT_NEW (1 << 3)
163 169
170/* number of bytes to print per line; must be 16 or 32 */
171#define HEX_ROW_SIZE 16
172/* number of bytes to print at a time (1, 2, 4, 8) */
173#define HEX_GROUP_SIZE 1
174/* include ASCII after the hex output */
175#define HEX_ASCII 1
176/* max number of lines to be printed */
177#define HEX_MAX_LINES 2
178
164/* the list of all allocated objects */ 179/* the list of all allocated objects */
165static LIST_HEAD(object_list); 180static LIST_HEAD(object_list);
166/* the list of gray-colored objects (see color_gray comment below) */ 181/* the list of gray-colored objects (see color_gray comment below) */
@@ -228,11 +243,14 @@ struct early_log {
228 int min_count; /* minimum reference count */ 243 int min_count; /* minimum reference count */
229 unsigned long offset; /* scan area offset */ 244 unsigned long offset; /* scan area offset */
230 size_t length; /* scan area length */ 245 size_t length; /* scan area length */
246 unsigned long trace[MAX_TRACE]; /* stack trace */
247 unsigned int trace_len; /* stack trace length */
231}; 248};
232 249
233/* early logging buffer and current position */ 250/* early logging buffer and current position */
234static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; 251static struct early_log
235static int crt_early_log; 252 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
253static int crt_early_log __initdata;
236 254
237static void kmemleak_disable(void); 255static void kmemleak_disable(void);
238 256
@@ -255,6 +273,35 @@ static void kmemleak_disable(void);
255} while (0) 273} while (0)
256 274
257/* 275/*
276 * Printing of the objects hex dump to the seq file. The number of lines to be
277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279 * with the object->lock held.
280 */
281static void hex_dump_object(struct seq_file *seq,
282 struct kmemleak_object *object)
283{
284 const u8 *ptr = (const u8 *)object->pointer;
285 int i, len, remaining;
286 unsigned char linebuf[HEX_ROW_SIZE * 5];
287
288 /* limit the number of lines to HEX_MAX_LINES */
289 remaining = len =
290 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
291
292 seq_printf(seq, " hex dump (first %d bytes):\n", len);
293 for (i = 0; i < len; i += HEX_ROW_SIZE) {
294 int linelen = min(remaining, HEX_ROW_SIZE);
295
296 remaining -= HEX_ROW_SIZE;
297 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
298 HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
299 HEX_ASCII);
300 seq_printf(seq, " %s\n", linebuf);
301 }
302}
303
304/*
258 * Object colors, encoded with count and min_count: 305 * Object colors, encoded with count and min_count:
259 * - white - orphan object, not enough references to it (count < min_count) 306 * - white - orphan object, not enough references to it (count < min_count)
260 * - gray - not orphan, not marked as false positive (min_count == 0) or 307 * - gray - not orphan, not marked as false positive (min_count == 0) or
@@ -264,19 +311,21 @@ static void kmemleak_disable(void);
264 * Newly created objects don't have any color assigned (object->count == -1) 311 * Newly created objects don't have any color assigned (object->count == -1)
265 * before the next memory scan when they become white. 312 * before the next memory scan when they become white.
266 */ 313 */
267static int color_white(const struct kmemleak_object *object) 314static bool color_white(const struct kmemleak_object *object)
268{ 315{
269 return object->count != -1 && object->count < object->min_count; 316 return object->count != KMEMLEAK_BLACK &&
317 object->count < object->min_count;
270} 318}
271 319
272static int color_gray(const struct kmemleak_object *object) 320static bool color_gray(const struct kmemleak_object *object)
273{ 321{
274 return object->min_count != -1 && object->count >= object->min_count; 322 return object->min_count != KMEMLEAK_BLACK &&
323 object->count >= object->min_count;
275} 324}
276 325
277static int color_black(const struct kmemleak_object *object) 326static bool color_black(const struct kmemleak_object *object)
278{ 327{
279 return object->min_count == -1; 328 return object->min_count == KMEMLEAK_BLACK;
280} 329}
281 330
282/* 331/*
@@ -284,7 +333,7 @@ static int color_black(const struct kmemleak_object *object)
284 * not be deleted and have a minimum age to avoid false positives caused by 333 * not be deleted and have a minimum age to avoid false positives caused by
285 * pointers temporarily stored in CPU registers. 334 * pointers temporarily stored in CPU registers.
286 */ 335 */
287static int unreferenced_object(struct kmemleak_object *object) 336static bool unreferenced_object(struct kmemleak_object *object)
288{ 337{
289 return (object->flags & OBJECT_ALLOCATED) && color_white(object) && 338 return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
290 time_before_eq(object->jiffies + jiffies_min_age, 339 time_before_eq(object->jiffies + jiffies_min_age,
@@ -304,6 +353,7 @@ static void print_unreferenced(struct seq_file *seq,
304 object->pointer, object->size); 353 object->pointer, object->size);
305 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", 354 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
306 object->comm, object->pid, object->jiffies); 355 object->comm, object->pid, object->jiffies);
356 hex_dump_object(seq, object);
307 seq_printf(seq, " backtrace:\n"); 357 seq_printf(seq, " backtrace:\n");
308 358
309 for (i = 0; i < object->trace_len; i++) { 359 for (i = 0; i < object->trace_len; i++) {
@@ -330,6 +380,7 @@ static void dump_object_info(struct kmemleak_object *object)
330 object->comm, object->pid, object->jiffies); 380 object->comm, object->pid, object->jiffies);
331 pr_notice(" min_count = %d\n", object->min_count); 381 pr_notice(" min_count = %d\n", object->min_count);
332 pr_notice(" count = %d\n", object->count); 382 pr_notice(" count = %d\n", object->count);
383 pr_notice(" flags = 0x%lx\n", object->flags);
333 pr_notice(" backtrace:\n"); 384 pr_notice(" backtrace:\n");
334 print_stack_trace(&trace, 4); 385 print_stack_trace(&trace, 4);
335} 386}
@@ -434,21 +485,36 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
434} 485}
435 486
436/* 487/*
488 * Save stack trace to the given array of MAX_TRACE size.
489 */
490static int __save_stack_trace(unsigned long *trace)
491{
492 struct stack_trace stack_trace;
493
494 stack_trace.max_entries = MAX_TRACE;
495 stack_trace.nr_entries = 0;
496 stack_trace.entries = trace;
497 stack_trace.skip = 2;
498 save_stack_trace(&stack_trace);
499
500 return stack_trace.nr_entries;
501}
502
503/*
437 * Create the metadata (struct kmemleak_object) corresponding to an allocated 504 * Create the metadata (struct kmemleak_object) corresponding to an allocated
438 * memory block and add it to the object_list and object_tree_root. 505 * memory block and add it to the object_list and object_tree_root.
439 */ 506 */
440static void create_object(unsigned long ptr, size_t size, int min_count, 507static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
441 gfp_t gfp) 508 int min_count, gfp_t gfp)
442{ 509{
443 unsigned long flags; 510 unsigned long flags;
444 struct kmemleak_object *object; 511 struct kmemleak_object *object;
445 struct prio_tree_node *node; 512 struct prio_tree_node *node;
446 struct stack_trace trace;
447 513
448 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); 514 object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
449 if (!object) { 515 if (!object) {
450 kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); 516 kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
451 return; 517 return NULL;
452 } 518 }
453 519
454 INIT_LIST_HEAD(&object->object_list); 520 INIT_LIST_HEAD(&object->object_list);
@@ -482,18 +548,14 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
482 } 548 }
483 549
484 /* kernel backtrace */ 550 /* kernel backtrace */
485 trace.max_entries = MAX_TRACE; 551 object->trace_len = __save_stack_trace(object->trace);
486 trace.nr_entries = 0;
487 trace.entries = object->trace;
488 trace.skip = 1;
489 save_stack_trace(&trace);
490 object->trace_len = trace.nr_entries;
491 552
492 INIT_PRIO_TREE_NODE(&object->tree_node); 553 INIT_PRIO_TREE_NODE(&object->tree_node);
493 object->tree_node.start = ptr; 554 object->tree_node.start = ptr;
494 object->tree_node.last = ptr + size - 1; 555 object->tree_node.last = ptr + size - 1;
495 556
496 write_lock_irqsave(&kmemleak_lock, flags); 557 write_lock_irqsave(&kmemleak_lock, flags);
558
497 min_addr = min(min_addr, ptr); 559 min_addr = min(min_addr, ptr);
498 max_addr = max(max_addr, ptr + size); 560 max_addr = max(max_addr, ptr + size);
499 node = prio_tree_insert(&object_tree_root, &object->tree_node); 561 node = prio_tree_insert(&object_tree_root, &object->tree_node);
@@ -504,20 +566,19 @@ static void create_object(unsigned long ptr, size_t size, int min_count,
504 * random memory blocks. 566 * random memory blocks.
505 */ 567 */
506 if (node != &object->tree_node) { 568 if (node != &object->tree_node) {
507 unsigned long flags;
508
509 kmemleak_stop("Cannot insert 0x%lx into the object search tree " 569 kmemleak_stop("Cannot insert 0x%lx into the object search tree "
510 "(already existing)\n", ptr); 570 "(already existing)\n", ptr);
511 object = lookup_object(ptr, 1); 571 object = lookup_object(ptr, 1);
512 spin_lock_irqsave(&object->lock, flags); 572 spin_lock(&object->lock);
513 dump_object_info(object); 573 dump_object_info(object);
514 spin_unlock_irqrestore(&object->lock, flags); 574 spin_unlock(&object->lock);
515 575
516 goto out; 576 goto out;
517 } 577 }
518 list_add_tail_rcu(&object->object_list, &object_list); 578 list_add_tail_rcu(&object->object_list, &object_list);
519out: 579out:
520 write_unlock_irqrestore(&kmemleak_lock, flags); 580 write_unlock_irqrestore(&kmemleak_lock, flags);
581 return object;
521} 582}
522 583
523/* 584/*
@@ -604,46 +665,55 @@ static void delete_object_part(unsigned long ptr, size_t size)
604 665
605 put_object(object); 666 put_object(object);
606} 667}
607/* 668
608 * Make a object permanently as gray-colored so that it can no longer be 669static void __paint_it(struct kmemleak_object *object, int color)
609 * reported as a leak. This is used in general to mark a false positive. 670{
610 */ 671 object->min_count = color;
611static void make_gray_object(unsigned long ptr) 672 if (color == KMEMLEAK_BLACK)
673 object->flags |= OBJECT_NO_SCAN;
674}
675
676static void paint_it(struct kmemleak_object *object, int color)
612{ 677{
613 unsigned long flags; 678 unsigned long flags;
679
680 spin_lock_irqsave(&object->lock, flags);
681 __paint_it(object, color);
682 spin_unlock_irqrestore(&object->lock, flags);
683}
684
685static void paint_ptr(unsigned long ptr, int color)
686{
614 struct kmemleak_object *object; 687 struct kmemleak_object *object;
615 688
616 object = find_and_get_object(ptr, 0); 689 object = find_and_get_object(ptr, 0);
617 if (!object) { 690 if (!object) {
618 kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); 691 kmemleak_warn("Trying to color unknown object "
692 "at 0x%08lx as %s\n", ptr,
693 (color == KMEMLEAK_GREY) ? "Grey" :
694 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
619 return; 695 return;
620 } 696 }
621 697 paint_it(object, color);
622 spin_lock_irqsave(&object->lock, flags);
623 object->min_count = 0;
624 spin_unlock_irqrestore(&object->lock, flags);
625 put_object(object); 698 put_object(object);
626} 699}
627 700
628/* 701/*
702 * Make a object permanently as gray-colored so that it can no longer be
703 * reported as a leak. This is used in general to mark a false positive.
704 */
705static void make_gray_object(unsigned long ptr)
706{
707 paint_ptr(ptr, KMEMLEAK_GREY);
708}
709
710/*
629 * Mark the object as black-colored so that it is ignored from scans and 711 * Mark the object as black-colored so that it is ignored from scans and
630 * reporting. 712 * reporting.
631 */ 713 */
632static void make_black_object(unsigned long ptr) 714static void make_black_object(unsigned long ptr)
633{ 715{
634 unsigned long flags; 716 paint_ptr(ptr, KMEMLEAK_BLACK);
635 struct kmemleak_object *object;
636
637 object = find_and_get_object(ptr, 0);
638 if (!object) {
639 kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr);
640 return;
641 }
642
643 spin_lock_irqsave(&object->lock, flags);
644 object->min_count = -1;
645 spin_unlock_irqrestore(&object->lock, flags);
646 put_object(object);
647} 717}
648 718
649/* 719/*
@@ -715,14 +785,15 @@ static void object_no_scan(unsigned long ptr)
715 * Log an early kmemleak_* call to the early_log buffer. These calls will be 785 * Log an early kmemleak_* call to the early_log buffer. These calls will be
716 * processed later once kmemleak is fully initialized. 786 * processed later once kmemleak is fully initialized.
717 */ 787 */
718static void log_early(int op_type, const void *ptr, size_t size, 788static void __init log_early(int op_type, const void *ptr, size_t size,
719 int min_count, unsigned long offset, size_t length) 789 int min_count, unsigned long offset, size_t length)
720{ 790{
721 unsigned long flags; 791 unsigned long flags;
722 struct early_log *log; 792 struct early_log *log;
723 793
724 if (crt_early_log >= ARRAY_SIZE(early_log)) { 794 if (crt_early_log >= ARRAY_SIZE(early_log)) {
725 pr_warning("Early log buffer exceeded\n"); 795 pr_warning("Early log buffer exceeded, "
796 "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
726 kmemleak_disable(); 797 kmemleak_disable();
727 return; 798 return;
728 } 799 }
@@ -739,16 +810,45 @@ static void log_early(int op_type, const void *ptr, size_t size,
739 log->min_count = min_count; 810 log->min_count = min_count;
740 log->offset = offset; 811 log->offset = offset;
741 log->length = length; 812 log->length = length;
813 if (op_type == KMEMLEAK_ALLOC)
814 log->trace_len = __save_stack_trace(log->trace);
742 crt_early_log++; 815 crt_early_log++;
743 local_irq_restore(flags); 816 local_irq_restore(flags);
744} 817}
745 818
746/* 819/*
820 * Log an early allocated block and populate the stack trace.
821 */
822static void early_alloc(struct early_log *log)
823{
824 struct kmemleak_object *object;
825 unsigned long flags;
826 int i;
827
828 if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
829 return;
830
831 /*
832 * RCU locking needed to ensure object is not freed via put_object().
833 */
834 rcu_read_lock();
835 object = create_object((unsigned long)log->ptr, log->size,
836 log->min_count, GFP_KERNEL);
837 spin_lock_irqsave(&object->lock, flags);
838 for (i = 0; i < log->trace_len; i++)
839 object->trace[i] = log->trace[i];
840 object->trace_len = log->trace_len;
841 spin_unlock_irqrestore(&object->lock, flags);
842 rcu_read_unlock();
843}
844
845/*
747 * Memory allocation function callback. This function is called from the 846 * Memory allocation function callback. This function is called from the
748 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, 847 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
749 * vmalloc etc.). 848 * vmalloc etc.).
750 */ 849 */
751void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) 850void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
851 gfp_t gfp)
752{ 852{
753 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 853 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
754 854
@@ -763,7 +863,7 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
763 * Memory freeing function callback. This function is called from the kernel 863 * Memory freeing function callback. This function is called from the kernel
764 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). 864 * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
765 */ 865 */
766void kmemleak_free(const void *ptr) 866void __ref kmemleak_free(const void *ptr)
767{ 867{
768 pr_debug("%s(0x%p)\n", __func__, ptr); 868 pr_debug("%s(0x%p)\n", __func__, ptr);
769 869
@@ -778,7 +878,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free);
778 * Partial memory freeing function callback. This function is usually called 878 * Partial memory freeing function callback. This function is usually called
779 * from bootmem allocator when (part of) a memory block is freed. 879 * from bootmem allocator when (part of) a memory block is freed.
780 */ 880 */
781void kmemleak_free_part(const void *ptr, size_t size) 881void __ref kmemleak_free_part(const void *ptr, size_t size)
782{ 882{
783 pr_debug("%s(0x%p)\n", __func__, ptr); 883 pr_debug("%s(0x%p)\n", __func__, ptr);
784 884
@@ -793,7 +893,7 @@ EXPORT_SYMBOL_GPL(kmemleak_free_part);
793 * Mark an already allocated memory block as a false positive. This will cause 893 * Mark an already allocated memory block as a false positive. This will cause
794 * the block to no longer be reported as leak and always be scanned. 894 * the block to no longer be reported as leak and always be scanned.
795 */ 895 */
796void kmemleak_not_leak(const void *ptr) 896void __ref kmemleak_not_leak(const void *ptr)
797{ 897{
798 pr_debug("%s(0x%p)\n", __func__, ptr); 898 pr_debug("%s(0x%p)\n", __func__, ptr);
799 899
@@ -809,7 +909,7 @@ EXPORT_SYMBOL(kmemleak_not_leak);
809 * corresponding block is not a leak and does not contain any references to 909 * corresponding block is not a leak and does not contain any references to
810 * other allocated memory blocks. 910 * other allocated memory blocks.
811 */ 911 */
812void kmemleak_ignore(const void *ptr) 912void __ref kmemleak_ignore(const void *ptr)
813{ 913{
814 pr_debug("%s(0x%p)\n", __func__, ptr); 914 pr_debug("%s(0x%p)\n", __func__, ptr);
815 915
@@ -823,8 +923,8 @@ EXPORT_SYMBOL(kmemleak_ignore);
823/* 923/*
824 * Limit the range to be scanned in an allocated memory block. 924 * Limit the range to be scanned in an allocated memory block.
825 */ 925 */
826void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, 926void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
827 gfp_t gfp) 927 size_t length, gfp_t gfp)
828{ 928{
829 pr_debug("%s(0x%p)\n", __func__, ptr); 929 pr_debug("%s(0x%p)\n", __func__, ptr);
830 930
@@ -838,7 +938,7 @@ EXPORT_SYMBOL(kmemleak_scan_area);
838/* 938/*
839 * Inform kmemleak not to scan the given memory block. 939 * Inform kmemleak not to scan the given memory block.
840 */ 940 */
841void kmemleak_no_scan(const void *ptr) 941void __ref kmemleak_no_scan(const void *ptr)
842{ 942{
843 pr_debug("%s(0x%p)\n", __func__, ptr); 943 pr_debug("%s(0x%p)\n", __func__, ptr);
844 944
@@ -882,15 +982,22 @@ static void scan_block(void *_start, void *_end,
882 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 982 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
883 983
884 for (ptr = start; ptr < end; ptr++) { 984 for (ptr = start; ptr < end; ptr++) {
885 unsigned long flags;
886 unsigned long pointer = *ptr;
887 struct kmemleak_object *object; 985 struct kmemleak_object *object;
986 unsigned long flags;
987 unsigned long pointer;
888 988
889 if (allow_resched) 989 if (allow_resched)
890 cond_resched(); 990 cond_resched();
891 if (scan_should_stop()) 991 if (scan_should_stop())
892 break; 992 break;
893 993
994 /* don't scan uninitialized memory */
995 if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
996 BYTES_PER_POINTER))
997 continue;
998
999 pointer = *ptr;
1000
894 object = find_and_get_object(pointer, 1); 1001 object = find_and_get_object(pointer, 1);
895 if (!object) 1002 if (!object)
896 continue; 1003 continue;
@@ -949,10 +1056,21 @@ static void scan_object(struct kmemleak_object *object)
949 if (!(object->flags & OBJECT_ALLOCATED)) 1056 if (!(object->flags & OBJECT_ALLOCATED))
950 /* already freed object */ 1057 /* already freed object */
951 goto out; 1058 goto out;
952 if (hlist_empty(&object->area_list)) 1059 if (hlist_empty(&object->area_list)) {
953 scan_block((void *)object->pointer, 1060 void *start = (void *)object->pointer;
954 (void *)(object->pointer + object->size), object, 0); 1061 void *end = (void *)(object->pointer + object->size);
955 else 1062
1063 while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1064 !(object->flags & OBJECT_NO_SCAN)) {
1065 scan_block(start, min(start + MAX_SCAN_SIZE, end),
1066 object, 0);
1067 start += MAX_SCAN_SIZE;
1068
1069 spin_unlock_irqrestore(&object->lock, flags);
1070 cond_resched();
1071 spin_lock_irqsave(&object->lock, flags);
1072 }
1073 } else
956 hlist_for_each_entry(area, elem, &object->area_list, node) 1074 hlist_for_each_entry(area, elem, &object->area_list, node)
957 scan_block((void *)(object->pointer + area->offset), 1075 scan_block((void *)(object->pointer + area->offset),
958 (void *)(object->pointer + area->offset 1076 (void *)(object->pointer + area->offset
@@ -970,7 +1088,6 @@ static void kmemleak_scan(void)
970{ 1088{
971 unsigned long flags; 1089 unsigned long flags;
972 struct kmemleak_object *object, *tmp; 1090 struct kmemleak_object *object, *tmp;
973 struct task_struct *task;
974 int i; 1091 int i;
975 int new_leaks = 0; 1092 int new_leaks = 0;
976 int gray_list_pass = 0; 1093 int gray_list_pass = 0;
@@ -1037,15 +1154,16 @@ static void kmemleak_scan(void)
1037 } 1154 }
1038 1155
1039 /* 1156 /*
1040 * Scanning the task stacks may introduce false negatives and it is 1157 * Scanning the task stacks (may introduce false negatives).
1041 * not enabled by default.
1042 */ 1158 */
1043 if (kmemleak_stack_scan) { 1159 if (kmemleak_stack_scan) {
1160 struct task_struct *p, *g;
1161
1044 read_lock(&tasklist_lock); 1162 read_lock(&tasklist_lock);
1045 for_each_process(task) 1163 do_each_thread(g, p) {
1046 scan_block(task_stack_page(task), 1164 scan_block(task_stack_page(p), task_stack_page(p) +
1047 task_stack_page(task) + THREAD_SIZE, 1165 THREAD_SIZE, NULL, 0);
1048 NULL, 0); 1166 } while_each_thread(g, p);
1049 read_unlock(&tasklist_lock); 1167 read_unlock(&tasklist_lock);
1050 } 1168 }
1051 1169
@@ -1170,7 +1288,7 @@ static int kmemleak_scan_thread(void *arg)
1170 * Start the automatic memory scanning thread. This function must be called 1288 * Start the automatic memory scanning thread. This function must be called
1171 * with the scan_mutex held. 1289 * with the scan_mutex held.
1172 */ 1290 */
1173void start_scan_thread(void) 1291static void start_scan_thread(void)
1174{ 1292{
1175 if (scan_thread) 1293 if (scan_thread)
1176 return; 1294 return;
@@ -1185,7 +1303,7 @@ void start_scan_thread(void)
1185 * Stop the automatic memory scanning thread. This function must be called 1303 * Stop the automatic memory scanning thread. This function must be called
1186 * with the scan_mutex held. 1304 * with the scan_mutex held.
1187 */ 1305 */
1188void stop_scan_thread(void) 1306static void stop_scan_thread(void)
1189{ 1307{
1190 if (scan_thread) { 1308 if (scan_thread) {
1191 kthread_stop(scan_thread); 1309 kthread_stop(scan_thread);
@@ -1294,6 +1412,49 @@ static int kmemleak_release(struct inode *inode, struct file *file)
1294 return seq_release(inode, file); 1412 return seq_release(inode, file);
1295} 1413}
1296 1414
1415static int dump_str_object_info(const char *str)
1416{
1417 unsigned long flags;
1418 struct kmemleak_object *object;
1419 unsigned long addr;
1420
1421 addr= simple_strtoul(str, NULL, 0);
1422 object = find_and_get_object(addr, 0);
1423 if (!object) {
1424 pr_info("Unknown object at 0x%08lx\n", addr);
1425 return -EINVAL;
1426 }
1427
1428 spin_lock_irqsave(&object->lock, flags);
1429 dump_object_info(object);
1430 spin_unlock_irqrestore(&object->lock, flags);
1431
1432 put_object(object);
1433 return 0;
1434}
1435
1436/*
1437 * We use grey instead of black to ensure we can do future scans on the same
1438 * objects. If we did not do future scans these black objects could
1439 * potentially contain references to newly allocated objects in the future and
1440 * we'd end up with false positives.
1441 */
1442static void kmemleak_clear(void)
1443{
1444 struct kmemleak_object *object;
1445 unsigned long flags;
1446
1447 rcu_read_lock();
1448 list_for_each_entry_rcu(object, &object_list, object_list) {
1449 spin_lock_irqsave(&object->lock, flags);
1450 if ((object->flags & OBJECT_REPORTED) &&
1451 unreferenced_object(object))
1452 __paint_it(object, KMEMLEAK_GREY);
1453 spin_unlock_irqrestore(&object->lock, flags);
1454 }
1455 rcu_read_unlock();
1456}
1457
1297/* 1458/*
1298 * File write operation to configure kmemleak at run-time. The following 1459 * File write operation to configure kmemleak at run-time. The following
1299 * commands can be written to the /sys/kernel/debug/kmemleak file: 1460 * commands can be written to the /sys/kernel/debug/kmemleak file:
@@ -1305,6 +1466,9 @@ static int kmemleak_release(struct inode *inode, struct file *file)
1305 * scan=... - set the automatic memory scanning period in seconds (0 to 1466 * scan=... - set the automatic memory scanning period in seconds (0 to
1306 * disable it) 1467 * disable it)
1307 * scan - trigger a memory scan 1468 * scan - trigger a memory scan
1469 * clear - mark all current reported unreferenced kmemleak objects as
1470 * grey to ignore printing them
1471 * dump=... - dump information about the object found at the given address
1308 */ 1472 */
1309static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1473static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1310 size_t size, loff_t *ppos) 1474 size_t size, loff_t *ppos)
@@ -1345,6 +1509,10 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1345 } 1509 }
1346 } else if (strncmp(buf, "scan", 4) == 0) 1510 } else if (strncmp(buf, "scan", 4) == 0)
1347 kmemleak_scan(); 1511 kmemleak_scan();
1512 else if (strncmp(buf, "clear", 5) == 0)
1513 kmemleak_clear();
1514 else if (strncmp(buf, "dump=", 5) == 0)
1515 ret = dump_str_object_info(buf + 5);
1348 else 1516 else
1349 ret = -EINVAL; 1517 ret = -EINVAL;
1350 1518
@@ -1371,7 +1539,7 @@ static const struct file_operations kmemleak_fops = {
1371 * Perform the freeing of the kmemleak internal objects after waiting for any 1539 * Perform the freeing of the kmemleak internal objects after waiting for any
1372 * current memory scan to complete. 1540 * current memory scan to complete.
1373 */ 1541 */
1374static int kmemleak_cleanup_thread(void *arg) 1542static void kmemleak_do_cleanup(struct work_struct *work)
1375{ 1543{
1376 struct kmemleak_object *object; 1544 struct kmemleak_object *object;
1377 1545
@@ -1383,22 +1551,9 @@ static int kmemleak_cleanup_thread(void *arg)
1383 delete_object_full(object->pointer); 1551 delete_object_full(object->pointer);
1384 rcu_read_unlock(); 1552 rcu_read_unlock();
1385 mutex_unlock(&scan_mutex); 1553 mutex_unlock(&scan_mutex);
1386
1387 return 0;
1388} 1554}
1389 1555
1390/* 1556static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1391 * Start the clean-up thread.
1392 */
1393static void kmemleak_cleanup(void)
1394{
1395 struct task_struct *cleanup_thread;
1396
1397 cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL,
1398 "kmemleak-clean");
1399 if (IS_ERR(cleanup_thread))
1400 pr_warning("Failed to create the clean-up thread\n");
1401}
1402 1557
1403/* 1558/*
1404 * Disable kmemleak. No memory allocation/freeing will be traced once this 1559 * Disable kmemleak. No memory allocation/freeing will be traced once this
@@ -1416,7 +1571,7 @@ static void kmemleak_disable(void)
1416 1571
1417 /* check whether it is too early for a kernel thread */ 1572 /* check whether it is too early for a kernel thread */
1418 if (atomic_read(&kmemleak_initialized)) 1573 if (atomic_read(&kmemleak_initialized))
1419 kmemleak_cleanup(); 1574 schedule_work(&cleanup_work);
1420 1575
1421 pr_info("Kernel memory leak detector disabled\n"); 1576 pr_info("Kernel memory leak detector disabled\n");
1422} 1577}
@@ -1469,8 +1624,7 @@ void __init kmemleak_init(void)
1469 1624
1470 switch (log->op_type) { 1625 switch (log->op_type) {
1471 case KMEMLEAK_ALLOC: 1626 case KMEMLEAK_ALLOC:
1472 kmemleak_alloc(log->ptr, log->size, log->min_count, 1627 early_alloc(log);
1473 GFP_KERNEL);
1474 break; 1628 break;
1475 case KMEMLEAK_FREE: 1629 case KMEMLEAK_FREE:
1476 kmemleak_free(log->ptr); 1630 kmemleak_free(log->ptr);
@@ -1513,7 +1667,7 @@ static int __init kmemleak_late_init(void)
1513 * after setting kmemleak_initialized and we may end up with 1667 * after setting kmemleak_initialized and we may end up with
1514 * two clean-up threads but serialized by scan_mutex. 1668 * two clean-up threads but serialized by scan_mutex.
1515 */ 1669 */
1516 kmemleak_cleanup(); 1670 schedule_work(&cleanup_work);
1517 return -ENOMEM; 1671 return -ENOMEM;
1518 } 1672 }
1519 1673
diff --git a/mm/nommu.c b/mm/nommu.c
index 4bde489ec431..66e81e7e9fe9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file,
1352 } 1352 }
1353 1353
1354 vma->vm_region = region; 1354 vma->vm_region = region;
1355 add_nommu_region(region);
1355 1356
1356 /* set up the mapping */ 1357 /* set up the mapping */
1357 if (file && vma->vm_flags & VM_SHARED) 1358 if (file && vma->vm_flags & VM_SHARED)
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file,
1361 if (ret < 0) 1362 if (ret < 0)
1362 goto error_put_region; 1363 goto error_put_region;
1363 1364
1364 add_nommu_region(region);
1365
1366 /* okay... we have a mapping; now we have to register it */ 1365 /* okay... we have a mapping; now we have to register it */
1367 result = vma->vm_start; 1366 result = vma->vm_start;
1368 1367
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 81627ebcd313..25e7770309b8 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -36,15 +36,6 @@
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37 37
38/* 38/*
39 * The maximum number of pages to writeout in a single bdflush/kupdate
40 * operation. We do this so we don't hold I_SYNC against an inode for
41 * enormous amounts of time, which would block a userspace task which has
42 * been forced to throttle against that inode. Also, the code reevaluates
43 * the dirty each time it has written this many pages.
44 */
45#define MAX_WRITEBACK_PAGES 1024
46
47/*
48 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 39 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
49 * will look to see if it needs to force writeback or throttling. 40 * will look to see if it needs to force writeback or throttling.
50 */ 41 */
@@ -117,8 +108,6 @@ EXPORT_SYMBOL(laptop_mode);
117/* End of sysctl-exported parameters */ 108/* End of sysctl-exported parameters */
118 109
119 110
120static void background_writeout(unsigned long _min_pages);
121
122/* 111/*
123 * Scale the writeback cache size proportional to the relative writeout speeds. 112 * Scale the writeback cache size proportional to the relative writeout speeds.
124 * 113 *
@@ -320,15 +309,13 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
320/* 309/*
321 * 310 *
322 */ 311 */
323static DEFINE_SPINLOCK(bdi_lock);
324static unsigned int bdi_min_ratio; 312static unsigned int bdi_min_ratio;
325 313
326int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) 314int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
327{ 315{
328 int ret = 0; 316 int ret = 0;
329 unsigned long flags;
330 317
331 spin_lock_irqsave(&bdi_lock, flags); 318 spin_lock(&bdi_lock);
332 if (min_ratio > bdi->max_ratio) { 319 if (min_ratio > bdi->max_ratio) {
333 ret = -EINVAL; 320 ret = -EINVAL;
334 } else { 321 } else {
@@ -340,27 +327,26 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
340 ret = -EINVAL; 327 ret = -EINVAL;
341 } 328 }
342 } 329 }
343 spin_unlock_irqrestore(&bdi_lock, flags); 330 spin_unlock(&bdi_lock);
344 331
345 return ret; 332 return ret;
346} 333}
347 334
348int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) 335int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
349{ 336{
350 unsigned long flags;
351 int ret = 0; 337 int ret = 0;
352 338
353 if (max_ratio > 100) 339 if (max_ratio > 100)
354 return -EINVAL; 340 return -EINVAL;
355 341
356 spin_lock_irqsave(&bdi_lock, flags); 342 spin_lock(&bdi_lock);
357 if (bdi->min_ratio > max_ratio) { 343 if (bdi->min_ratio > max_ratio) {
358 ret = -EINVAL; 344 ret = -EINVAL;
359 } else { 345 } else {
360 bdi->max_ratio = max_ratio; 346 bdi->max_ratio = max_ratio;
361 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; 347 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
362 } 348 }
363 spin_unlock_irqrestore(&bdi_lock, flags); 349 spin_unlock(&bdi_lock);
364 350
365 return ret; 351 return ret;
366} 352}
@@ -546,7 +532,7 @@ static void balance_dirty_pages(struct address_space *mapping)
546 * up. 532 * up.
547 */ 533 */
548 if (bdi_nr_reclaimable > bdi_thresh) { 534 if (bdi_nr_reclaimable > bdi_thresh) {
549 writeback_inodes(&wbc); 535 writeback_inodes_wbc(&wbc);
550 pages_written += write_chunk - wbc.nr_to_write; 536 pages_written += write_chunk - wbc.nr_to_write;
551 get_dirty_limits(&background_thresh, &dirty_thresh, 537 get_dirty_limits(&background_thresh, &dirty_thresh,
552 &bdi_thresh, bdi); 538 &bdi_thresh, bdi);
@@ -575,7 +561,7 @@ static void balance_dirty_pages(struct address_space *mapping)
575 if (pages_written >= write_chunk) 561 if (pages_written >= write_chunk)
576 break; /* We've done our duty */ 562 break; /* We've done our duty */
577 563
578 congestion_wait(BLK_RW_ASYNC, HZ/10); 564 schedule_timeout(1);
579 } 565 }
580 566
581 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && 567 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -594,10 +580,18 @@ static void balance_dirty_pages(struct address_space *mapping)
594 * background_thresh, to keep the amount of dirty memory low. 580 * background_thresh, to keep the amount of dirty memory low.
595 */ 581 */
596 if ((laptop_mode && pages_written) || 582 if ((laptop_mode && pages_written) ||
597 (!laptop_mode && (global_page_state(NR_FILE_DIRTY) 583 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
598 + global_page_state(NR_UNSTABLE_NFS) 584 + global_page_state(NR_UNSTABLE_NFS))
599 > background_thresh))) 585 > background_thresh))) {
600 pdflush_operation(background_writeout, 0); 586 struct writeback_control wbc = {
587 .bdi = bdi,
588 .sync_mode = WB_SYNC_NONE,
589 .nr_to_write = nr_writeback,
590 };
591
592
593 bdi_start_writeback(&wbc);
594 }
601} 595}
602 596
603void set_page_dirty_balance(struct page *page, int page_mkwrite) 597void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -681,153 +675,35 @@ void throttle_vm_writeout(gfp_t gfp_mask)
681 } 675 }
682} 676}
683 677
684/*
685 * writeback at least _min_pages, and keep writing until the amount of dirty
686 * memory is less than the background threshold, or until we're all clean.
687 */
688static void background_writeout(unsigned long _min_pages)
689{
690 long min_pages = _min_pages;
691 struct writeback_control wbc = {
692 .bdi = NULL,
693 .sync_mode = WB_SYNC_NONE,
694 .older_than_this = NULL,
695 .nr_to_write = 0,
696 .nonblocking = 1,
697 .range_cyclic = 1,
698 };
699
700 for ( ; ; ) {
701 unsigned long background_thresh;
702 unsigned long dirty_thresh;
703
704 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
705 if (global_page_state(NR_FILE_DIRTY) +
706 global_page_state(NR_UNSTABLE_NFS) < background_thresh
707 && min_pages <= 0)
708 break;
709 wbc.more_io = 0;
710 wbc.encountered_congestion = 0;
711 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
712 wbc.pages_skipped = 0;
713 writeback_inodes(&wbc);
714 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
715 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
716 /* Wrote less than expected */
717 if (wbc.encountered_congestion || wbc.more_io)
718 congestion_wait(BLK_RW_ASYNC, HZ/10);
719 else
720 break;
721 }
722 }
723}
724
725/*
726 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
727 * the whole world. Returns 0 if a pdflush thread was dispatched. Returns
728 * -1 if all pdflush threads were busy.
729 */
730int wakeup_pdflush(long nr_pages)
731{
732 if (nr_pages == 0)
733 nr_pages = global_page_state(NR_FILE_DIRTY) +
734 global_page_state(NR_UNSTABLE_NFS);
735 return pdflush_operation(background_writeout, nr_pages);
736}
737
738static void wb_timer_fn(unsigned long unused);
739static void laptop_timer_fn(unsigned long unused); 678static void laptop_timer_fn(unsigned long unused);
740 679
741static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
742static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); 680static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
743 681
744/* 682/*
745 * Periodic writeback of "old" data.
746 *
747 * Define "old": the first time one of an inode's pages is dirtied, we mark the
748 * dirtying-time in the inode's address_space. So this periodic writeback code
749 * just walks the superblock inode list, writing back any inodes which are
750 * older than a specific point in time.
751 *
752 * Try to run once per dirty_writeback_interval. But if a writeback event
753 * takes longer than a dirty_writeback_interval interval, then leave a
754 * one-second gap.
755 *
756 * older_than_this takes precedence over nr_to_write. So we'll only write back
757 * all dirty pages if they are all attached to "old" mappings.
758 */
759static void wb_kupdate(unsigned long arg)
760{
761 unsigned long oldest_jif;
762 unsigned long start_jif;
763 unsigned long next_jif;
764 long nr_to_write;
765 struct writeback_control wbc = {
766 .bdi = NULL,
767 .sync_mode = WB_SYNC_NONE,
768 .older_than_this = &oldest_jif,
769 .nr_to_write = 0,
770 .nonblocking = 1,
771 .for_kupdate = 1,
772 .range_cyclic = 1,
773 };
774
775 sync_supers();
776
777 oldest_jif = jiffies - msecs_to_jiffies(dirty_expire_interval * 10);
778 start_jif = jiffies;
779 next_jif = start_jif + msecs_to_jiffies(dirty_writeback_interval * 10);
780 nr_to_write = global_page_state(NR_FILE_DIRTY) +
781 global_page_state(NR_UNSTABLE_NFS) +
782 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
783 while (nr_to_write > 0) {
784 wbc.more_io = 0;
785 wbc.encountered_congestion = 0;
786 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
787 writeback_inodes(&wbc);
788 if (wbc.nr_to_write > 0) {
789 if (wbc.encountered_congestion || wbc.more_io)
790 congestion_wait(BLK_RW_ASYNC, HZ/10);
791 else
792 break; /* All the old data is written */
793 }
794 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
795 }
796 if (time_before(next_jif, jiffies + HZ))
797 next_jif = jiffies + HZ;
798 if (dirty_writeback_interval)
799 mod_timer(&wb_timer, next_jif);
800}
801
802/*
803 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs 683 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
804 */ 684 */
805int dirty_writeback_centisecs_handler(ctl_table *table, int write, 685int dirty_writeback_centisecs_handler(ctl_table *table, int write,
806 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 686 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
807{ 687{
808 proc_dointvec(table, write, file, buffer, length, ppos); 688 proc_dointvec(table, write, file, buffer, length, ppos);
809 if (dirty_writeback_interval)
810 mod_timer(&wb_timer, jiffies +
811 msecs_to_jiffies(dirty_writeback_interval * 10));
812 else
813 del_timer(&wb_timer);
814 return 0; 689 return 0;
815} 690}
816 691
817static void wb_timer_fn(unsigned long unused) 692static void do_laptop_sync(struct work_struct *work)
818{
819 if (pdflush_operation(wb_kupdate, 0) < 0)
820 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
821}
822
823static void laptop_flush(unsigned long unused)
824{ 693{
825 sys_sync(); 694 wakeup_flusher_threads(0);
695 kfree(work);
826} 696}
827 697
828static void laptop_timer_fn(unsigned long unused) 698static void laptop_timer_fn(unsigned long unused)
829{ 699{
830 pdflush_operation(laptop_flush, 0); 700 struct work_struct *work;
701
702 work = kmalloc(sizeof(*work), GFP_ATOMIC);
703 if (work) {
704 INIT_WORK(work, do_laptop_sync);
705 schedule_work(work);
706 }
831} 707}
832 708
833/* 709/*
@@ -910,8 +786,6 @@ void __init page_writeback_init(void)
910{ 786{
911 int shift; 787 int shift;
912 788
913 mod_timer(&wb_timer,
914 jiffies + msecs_to_jiffies(dirty_writeback_interval * 10));
915 writeback_set_ratelimit(); 789 writeback_set_ratelimit();
916 register_cpu_notifier(&ratelimit_nb); 790 register_cpu_notifier(&ratelimit_nb);
917 791
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5cc986eb9f6f..a0de15f46987 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
817 * agressive about taking ownership of free pages 817 * agressive about taking ownership of free pages
818 */ 818 */
819 if (unlikely(current_order >= (pageblock_order >> 1)) || 819 if (unlikely(current_order >= (pageblock_order >> 1)) ||
820 start_migratetype == MIGRATE_RECLAIMABLE) { 820 start_migratetype == MIGRATE_RECLAIMABLE ||
821 page_group_by_mobility_disabled) {
821 unsigned long pages; 822 unsigned long pages;
822 pages = move_freepages_block(zone, page, 823 pages = move_freepages_block(zone, page,
823 start_migratetype); 824 start_migratetype);
824 825
825 /* Claim the whole block if over half of it is free */ 826 /* Claim the whole block if over half of it is free */
826 if (pages >= (1 << (pageblock_order-1))) 827 if (pages >= (1 << (pageblock_order-1)) ||
828 page_group_by_mobility_disabled)
827 set_pageblock_migratetype(page, 829 set_pageblock_migratetype(page,
828 start_migratetype); 830 start_migratetype);
829 831
diff --git a/mm/pdflush.c b/mm/pdflush.c
deleted file mode 100644
index 235ac440c44e..000000000000
--- a/mm/pdflush.c
+++ /dev/null
@@ -1,269 +0,0 @@
1/*
2 * mm/pdflush.c - worker threads for writing back filesystem data
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * 09Apr2002 Andrew Morton
7 * Initial version
8 * 29Feb2004 kaos@sgi.com
9 * Move worker thread creation to kthread to avoid chewing
10 * up stack space with nested calls to kernel_thread.
11 */
12
13#include <linux/sched.h>
14#include <linux/list.h>
15#include <linux/signal.h>
16#include <linux/spinlock.h>
17#include <linux/gfp.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/fs.h> /* Needed by writeback.h */
21#include <linux/writeback.h> /* Prototypes pdflush_operation() */
22#include <linux/kthread.h>
23#include <linux/cpuset.h>
24#include <linux/freezer.h>
25
26
27/*
28 * Minimum and maximum number of pdflush instances
29 */
30#define MIN_PDFLUSH_THREADS 2
31#define MAX_PDFLUSH_THREADS 8
32
33static void start_one_pdflush_thread(void);
34
35
36/*
37 * The pdflush threads are worker threads for writing back dirty data.
38 * Ideally, we'd like one thread per active disk spindle. But the disk
39 * topology is very hard to divine at this level. Instead, we take
40 * care in various places to prevent more than one pdflush thread from
41 * performing writeback against a single filesystem. pdflush threads
42 * have the PF_FLUSHER flag set in current->flags to aid in this.
43 */
44
45/*
46 * All the pdflush threads. Protected by pdflush_lock
47 */
48static LIST_HEAD(pdflush_list);
49static DEFINE_SPINLOCK(pdflush_lock);
50
51/*
52 * The count of currently-running pdflush threads. Protected
53 * by pdflush_lock.
54 *
55 * Readable by sysctl, but not writable. Published to userspace at
56 * /proc/sys/vm/nr_pdflush_threads.
57 */
58int nr_pdflush_threads = 0;
59
60/*
61 * The time at which the pdflush thread pool last went empty
62 */
63static unsigned long last_empty_jifs;
64
65/*
66 * The pdflush thread.
67 *
68 * Thread pool management algorithm:
69 *
70 * - The minimum and maximum number of pdflush instances are bound
71 * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
72 *
73 * - If there have been no idle pdflush instances for 1 second, create
74 * a new one.
75 *
76 * - If the least-recently-went-to-sleep pdflush thread has been asleep
77 * for more than one second, terminate a thread.
78 */
79
80/*
81 * A structure for passing work to a pdflush thread. Also for passing
82 * state information between pdflush threads. Protected by pdflush_lock.
83 */
84struct pdflush_work {
85 struct task_struct *who; /* The thread */
86 void (*fn)(unsigned long); /* A callback function */
87 unsigned long arg0; /* An argument to the callback */
88 struct list_head list; /* On pdflush_list, when idle */
89 unsigned long when_i_went_to_sleep;
90};
91
92static int __pdflush(struct pdflush_work *my_work)
93{
94 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
95 set_freezable();
96 my_work->fn = NULL;
97 my_work->who = current;
98 INIT_LIST_HEAD(&my_work->list);
99
100 spin_lock_irq(&pdflush_lock);
101 for ( ; ; ) {
102 struct pdflush_work *pdf;
103
104 set_current_state(TASK_INTERRUPTIBLE);
105 list_move(&my_work->list, &pdflush_list);
106 my_work->when_i_went_to_sleep = jiffies;
107 spin_unlock_irq(&pdflush_lock);
108 schedule();
109 try_to_freeze();
110 spin_lock_irq(&pdflush_lock);
111 if (!list_empty(&my_work->list)) {
112 /*
113 * Someone woke us up, but without removing our control
114 * structure from the global list. swsusp will do this
115 * in try_to_freeze()->refrigerator(). Handle it.
116 */
117 my_work->fn = NULL;
118 continue;
119 }
120 if (my_work->fn == NULL) {
121 printk("pdflush: bogus wakeup\n");
122 continue;
123 }
124 spin_unlock_irq(&pdflush_lock);
125
126 (*my_work->fn)(my_work->arg0);
127
128 spin_lock_irq(&pdflush_lock);
129
130 /*
131 * Thread creation: For how long have there been zero
132 * available threads?
133 *
134 * To throttle creation, we reset last_empty_jifs.
135 */
136 if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
137 if (list_empty(&pdflush_list)) {
138 if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
139 last_empty_jifs = jiffies;
140 nr_pdflush_threads++;
141 spin_unlock_irq(&pdflush_lock);
142 start_one_pdflush_thread();
143 spin_lock_irq(&pdflush_lock);
144 }
145 }
146 }
147
148 my_work->fn = NULL;
149
150 /*
151 * Thread destruction: For how long has the sleepiest
152 * thread slept?
153 */
154 if (list_empty(&pdflush_list))
155 continue;
156 if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
157 continue;
158 pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
159 if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
160 /* Limit exit rate */
161 pdf->when_i_went_to_sleep = jiffies;
162 break; /* exeunt */
163 }
164 }
165 nr_pdflush_threads--;
166 spin_unlock_irq(&pdflush_lock);
167 return 0;
168}
169
170/*
171 * Of course, my_work wants to be just a local in __pdflush(). It is
172 * separated out in this manner to hopefully prevent the compiler from
173 * performing unfortunate optimisations against the auto variables. Because
174 * these are visible to other tasks and CPUs. (No problem has actually
175 * been observed. This is just paranoia).
176 */
177static int pdflush(void *dummy)
178{
179 struct pdflush_work my_work;
180 cpumask_var_t cpus_allowed;
181
182 /*
183 * Since the caller doesn't even check kthread_run() worked, let's not
184 * freak out too much if this fails.
185 */
186 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
187 printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
188 return 0;
189 }
190
191 /*
192 * pdflush can spend a lot of time doing encryption via dm-crypt. We
193 * don't want to do that at keventd's priority.
194 */
195 set_user_nice(current, 0);
196
197 /*
198 * Some configs put our parent kthread in a limited cpuset,
199 * which kthread() overrides, forcing cpus_allowed == cpu_all_mask.
200 * Our needs are more modest - cut back to our cpusets cpus_allowed.
201 * This is needed as pdflush's are dynamically created and destroyed.
202 * The boottime pdflush's are easily placed w/o these 2 lines.
203 */
204 cpuset_cpus_allowed(current, cpus_allowed);
205 set_cpus_allowed_ptr(current, cpus_allowed);
206 free_cpumask_var(cpus_allowed);
207
208 return __pdflush(&my_work);
209}
210
211/*
212 * Attempt to wake up a pdflush thread, and get it to do some work for you.
213 * Returns zero if it indeed managed to find a worker thread, and passed your
214 * payload to it.
215 */
216int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0)
217{
218 unsigned long flags;
219 int ret = 0;
220
221 BUG_ON(fn == NULL); /* Hard to diagnose if it's deferred */
222
223 spin_lock_irqsave(&pdflush_lock, flags);
224 if (list_empty(&pdflush_list)) {
225 ret = -1;
226 } else {
227 struct pdflush_work *pdf;
228
229 pdf = list_entry(pdflush_list.next, struct pdflush_work, list);
230 list_del_init(&pdf->list);
231 if (list_empty(&pdflush_list))
232 last_empty_jifs = jiffies;
233 pdf->fn = fn;
234 pdf->arg0 = arg0;
235 wake_up_process(pdf->who);
236 }
237 spin_unlock_irqrestore(&pdflush_lock, flags);
238
239 return ret;
240}
241
242static void start_one_pdflush_thread(void)
243{
244 struct task_struct *k;
245
246 k = kthread_run(pdflush, NULL, "pdflush");
247 if (unlikely(IS_ERR(k))) {
248 spin_lock_irq(&pdflush_lock);
249 nr_pdflush_threads--;
250 spin_unlock_irq(&pdflush_lock);
251 }
252}
253
254static int __init pdflush_init(void)
255{
256 int i;
257
258 /*
259 * Pre-set nr_pdflush_threads... If we fail to create,
260 * the count will be decremented.
261 */
262 nr_pdflush_threads = MIN_PDFLUSH_THREADS;
263
264 for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
265 start_one_pdflush_thread();
266 return 0;
267}
268
269module_init(pdflush_init);
diff --git a/mm/percpu.c b/mm/percpu.c
index 5fe37842e0ea..3311c8919f37 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, 197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
198 int page_idx) 198 int page_idx)
199{ 199{
200 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; 200 /*
201 * Any possible cpu id can be used here, so there's no need to
202 * worry about preemption or cpu hotplug.
203 */
204 return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(),
205 page_idx) != NULL;
201} 206}
202 207
203/* set the pointer to a chunk in a page struct */ 208/* set the pointer to a chunk in a page struct */
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
297 return pcpu_first_chunk; 302 return pcpu_first_chunk;
298 } 303 }
299 304
305 /*
306 * The address is relative to unit0 which might be unused and
307 * thus unmapped. Offset the address to the unit space of the
308 * current processor before looking it up in the vmalloc
309 * space. Note that any possible cpu id can be used here, so
310 * there's no need to worry about preemption or cpu hotplug.
311 */
312 addr += raw_smp_processor_id() * pcpu_unit_size;
300 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 313 return pcpu_get_page_chunk(vmalloc_to_page(addr));
301} 314}
302 315
diff --git a/mm/shmem.c b/mm/shmem.c
index d713239ce2ce..5a0b3d4055f3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2446,7 +2446,7 @@ static const struct inode_operations shmem_inode_operations = {
2446 .getxattr = generic_getxattr, 2446 .getxattr = generic_getxattr,
2447 .listxattr = generic_listxattr, 2447 .listxattr = generic_listxattr,
2448 .removexattr = generic_removexattr, 2448 .removexattr = generic_removexattr,
2449 .permission = shmem_permission, 2449 .check_acl = shmem_check_acl,
2450#endif 2450#endif
2451 2451
2452}; 2452};
@@ -2469,7 +2469,7 @@ static const struct inode_operations shmem_dir_inode_operations = {
2469 .getxattr = generic_getxattr, 2469 .getxattr = generic_getxattr,
2470 .listxattr = generic_listxattr, 2470 .listxattr = generic_listxattr,
2471 .removexattr = generic_removexattr, 2471 .removexattr = generic_removexattr,
2472 .permission = shmem_permission, 2472 .check_acl = shmem_check_acl,
2473#endif 2473#endif
2474}; 2474};
2475 2475
@@ -2480,7 +2480,7 @@ static const struct inode_operations shmem_special_inode_operations = {
2480 .getxattr = generic_getxattr, 2480 .getxattr = generic_getxattr,
2481 .listxattr = generic_listxattr, 2481 .listxattr = generic_listxattr,
2482 .removexattr = generic_removexattr, 2482 .removexattr = generic_removexattr,
2483 .permission = shmem_permission, 2483 .check_acl = shmem_check_acl,
2484#endif 2484#endif
2485}; 2485};
2486 2486
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index 606a8e757a42..df2c87fdae50 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -157,7 +157,7 @@ shmem_acl_init(struct inode *inode, struct inode *dir)
157/** 157/**
158 * shmem_check_acl - check_acl() callback for generic_permission() 158 * shmem_check_acl - check_acl() callback for generic_permission()
159 */ 159 */
160static int 160int
161shmem_check_acl(struct inode *inode, int mask) 161shmem_check_acl(struct inode *inode, int mask)
162{ 162{
163 struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS); 163 struct posix_acl *acl = shmem_get_acl(inode, ACL_TYPE_ACCESS);
@@ -169,12 +169,3 @@ shmem_check_acl(struct inode *inode, int mask)
169 } 169 }
170 return -EAGAIN; 170 return -EAGAIN;
171} 171}
172
173/**
174 * shmem_permission - permission() inode operation
175 */
176int
177shmem_permission(struct inode *inode, int mask)
178{
179 return generic_permission(inode, mask, shmem_check_acl);
180}
diff --git a/mm/slub.c b/mm/slub.c
index b9f1491a58a1..b6276753626e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
2594 */ 2594 */
2595void kmem_cache_destroy(struct kmem_cache *s) 2595void kmem_cache_destroy(struct kmem_cache *s)
2596{ 2596{
2597 if (s->flags & SLAB_DESTROY_BY_RCU)
2598 rcu_barrier();
2599 down_write(&slub_lock); 2597 down_write(&slub_lock);
2600 s->refcount--; 2598 s->refcount--;
2601 if (!s->refcount) { 2599 if (!s->refcount) {
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
2606 "still has objects.\n", s->name, __func__); 2604 "still has objects.\n", s->name, __func__);
2607 dump_stack(); 2605 dump_stack();
2608 } 2606 }
2607 if (s->flags & SLAB_DESTROY_BY_RCU)
2608 rcu_barrier();
2609 sysfs_slab_remove(s); 2609 sysfs_slab_remove(s);
2610 } else 2610 } else
2611 up_write(&slub_lock); 2611 up_write(&slub_lock);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 42cd38eba79f..5ae6b8b78c80 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -34,6 +34,7 @@ static const struct address_space_operations swap_aops = {
34}; 34};
35 35
36static struct backing_dev_info swap_backing_dev_info = { 36static struct backing_dev_info swap_backing_dev_info = {
37 .name = "swap",
37 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 38 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
38 .unplug_io_fn = swap_unplug_io_fn, 39 .unplug_io_fn = swap_unplug_io_fn,
39}; 40};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 94e86dd6954c..ba8228e0a806 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1720,7 +1720,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1720 */ 1720 */
1721 if (total_scanned > sc->swap_cluster_max + 1721 if (total_scanned > sc->swap_cluster_max +
1722 sc->swap_cluster_max / 2) { 1722 sc->swap_cluster_max / 2) {
1723 wakeup_pdflush(laptop_mode ? 0 : total_scanned); 1723 wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
1724 sc->may_writepage = 1; 1724 sc->may_writepage = 1;
1725 } 1725 }
1726 1726
diff --git a/net/core/dev.c b/net/core/dev.c
index 6a94475aee85..278d489aad3b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1031,7 +1031,7 @@ void dev_load(struct net *net, const char *name)
1031 dev = __dev_get_by_name(net, name); 1031 dev = __dev_get_by_name(net, name);
1032 read_unlock(&dev_base_lock); 1032 read_unlock(&dev_base_lock);
1033 1033
1034 if (!dev && capable(CAP_SYS_MODULE)) 1034 if (!dev && capable(CAP_NET_ADMIN))
1035 request_module("%s", name); 1035 request_module("%s", name);
1036} 1036}
1037 1037
diff --git a/net/core/sock.c b/net/core/sock.c
index bbb25be7ddfe..76334228ed1c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1025,6 +1025,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1025 sk->sk_prot = sk->sk_prot_creator = prot; 1025 sk->sk_prot = sk->sk_prot_creator = prot;
1026 sock_lock_init(sk); 1026 sock_lock_init(sk);
1027 sock_net_set(sk, get_net(net)); 1027 sock_net_set(sk, get_net(net));
1028 atomic_set(&sk->sk_wmem_alloc, 1);
1028 } 1029 }
1029 1030
1030 return sk; 1031 return sk;
@@ -1872,7 +1873,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1872 */ 1873 */
1873 smp_wmb(); 1874 smp_wmb();
1874 atomic_set(&sk->sk_refcnt, 1); 1875 atomic_set(&sk->sk_refcnt, 1);
1875 atomic_set(&sk->sk_wmem_alloc, 1);
1876 atomic_set(&sk->sk_drops, 0); 1876 atomic_set(&sk->sk_drops, 0);
1877} 1877}
1878EXPORT_SYMBOL(sock_init_data); 1878EXPORT_SYMBOL(sock_init_data);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index e92beb9e55e0..6428b342b164 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -116,7 +116,7 @@ int tcp_set_default_congestion_control(const char *name)
116 spin_lock(&tcp_cong_list_lock); 116 spin_lock(&tcp_cong_list_lock);
117 ca = tcp_ca_find(name); 117 ca = tcp_ca_find(name);
118#ifdef CONFIG_MODULES 118#ifdef CONFIG_MODULES
119 if (!ca && capable(CAP_SYS_MODULE)) { 119 if (!ca && capable(CAP_NET_ADMIN)) {
120 spin_unlock(&tcp_cong_list_lock); 120 spin_unlock(&tcp_cong_list_lock);
121 121
122 request_module("tcp_%s", name); 122 request_module("tcp_%s", name);
@@ -246,7 +246,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
246 246
247#ifdef CONFIG_MODULES 247#ifdef CONFIG_MODULES
248 /* not found attempt to autoload module */ 248 /* not found attempt to autoload module */
249 if (!ca && capable(CAP_SYS_MODULE)) { 249 if (!ca && capable(CAP_NET_ADMIN)) {
250 rcu_read_unlock(); 250 rcu_read_unlock();
251 request_module("tcp_%s", name); 251 request_module("tcp_%s", name);
252 rcu_read_lock(); 252 rcu_read_lock();
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 92e6f3a52c13..fdb694e9f759 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -458,7 +458,7 @@ EXPORT_SYMBOL(qdisc_warn_nonwc);
458static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 458static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
459{ 459{
460 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 460 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
461 timer.timer); 461 timer);
462 462
463 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 463 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
464 __netif_schedule(qdisc_root(wd->qdisc)); 464 __netif_schedule(qdisc_root(wd->qdisc));
@@ -468,8 +468,8 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
468 468
469void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) 469void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
470{ 470{
471 tasklet_hrtimer_init(&wd->timer, qdisc_watchdog, 471 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
472 CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 472 wd->timer.function = qdisc_watchdog;
473 wd->qdisc = qdisc; 473 wd->qdisc = qdisc;
474} 474}
475EXPORT_SYMBOL(qdisc_watchdog_init); 475EXPORT_SYMBOL(qdisc_watchdog_init);
@@ -485,13 +485,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
485 wd->qdisc->flags |= TCQ_F_THROTTLED; 485 wd->qdisc->flags |= TCQ_F_THROTTLED;
486 time = ktime_set(0, 0); 486 time = ktime_set(0, 0);
487 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); 487 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
488 tasklet_hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
489} 489}
490EXPORT_SYMBOL(qdisc_watchdog_schedule); 490EXPORT_SYMBOL(qdisc_watchdog_schedule);
491 491
492void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) 492void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
493{ 493{
494 tasklet_hrtimer_cancel(&wd->timer); 494 hrtimer_cancel(&wd->timer);
495 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 495 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
496} 496}
497EXPORT_SYMBOL(qdisc_watchdog_cancel); 497EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -1456,6 +1456,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1456 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1456 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1457 tcm = NLMSG_DATA(nlh); 1457 tcm = NLMSG_DATA(nlh);
1458 tcm->tcm_family = AF_UNSPEC; 1458 tcm->tcm_family = AF_UNSPEC;
1459 tcm->tcm__pad1 = 0;
1460 tcm->tcm__pad2 = 0;
1459 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1461 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1460 tcm->tcm_parent = q->handle; 1462 tcm->tcm_parent = q->handle;
1461 tcm->tcm_handle = q->handle; 1463 tcm->tcm_handle = q->handle;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 149b0405c5ec..d5798e17a832 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -163,7 +163,7 @@ struct cbq_sched_data
163 psched_time_t now_rt; /* Cached real time */ 163 psched_time_t now_rt; /* Cached real time */
164 unsigned pmask; 164 unsigned pmask;
165 165
166 struct tasklet_hrtimer delay_timer; 166 struct hrtimer delay_timer;
167 struct qdisc_watchdog watchdog; /* Watchdog timer, 167 struct qdisc_watchdog watchdog; /* Watchdog timer,
168 started when CBQ has 168 started when CBQ has
169 backlog, but cannot 169 backlog, but cannot
@@ -503,8 +503,6 @@ static void cbq_ovl_delay(struct cbq_class *cl)
503 cl->undertime = q->now + delay; 503 cl->undertime = q->now + delay;
504 504
505 if (delay > 0) { 505 if (delay > 0) {
506 struct hrtimer *ht;
507
508 sched += delay + cl->penalty; 506 sched += delay + cl->penalty;
509 cl->penalized = sched; 507 cl->penalized = sched;
510 cl->cpriority = TC_CBQ_MAXPRIO; 508 cl->cpriority = TC_CBQ_MAXPRIO;
@@ -512,12 +510,12 @@ static void cbq_ovl_delay(struct cbq_class *cl)
512 510
513 expires = ktime_set(0, 0); 511 expires = ktime_set(0, 0);
514 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched)); 512 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
515 ht = &q->delay_timer.timer; 513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
516 if (hrtimer_try_to_cancel(ht) && 514 ktime_to_ns(ktime_sub(
517 ktime_to_ns(ktime_sub(hrtimer_get_expires(ht), 515 hrtimer_get_expires(&q->delay_timer),
518 expires)) > 0) 516 expires)) > 0)
519 hrtimer_set_expires(ht, expires); 517 hrtimer_set_expires(&q->delay_timer, expires);
520 hrtimer_restart(ht); 518 hrtimer_restart(&q->delay_timer);
521 cl->delayed = 1; 519 cl->delayed = 1;
522 cl->xstats.overactions++; 520 cl->xstats.overactions++;
523 return; 521 return;
@@ -593,7 +591,7 @@ static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
593static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) 591static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
594{ 592{
595 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, 593 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
596 delay_timer.timer); 594 delay_timer);
597 struct Qdisc *sch = q->watchdog.qdisc; 595 struct Qdisc *sch = q->watchdog.qdisc;
598 psched_time_t now; 596 psched_time_t now;
599 psched_tdiff_t delay = 0; 597 psched_tdiff_t delay = 0;
@@ -623,7 +621,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
623 621
624 time = ktime_set(0, 0); 622 time = ktime_set(0, 0);
625 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); 623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
626 tasklet_hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); 624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
627 } 625 }
628 626
629 sch->flags &= ~TCQ_F_THROTTLED; 627 sch->flags &= ~TCQ_F_THROTTLED;
@@ -1216,7 +1214,7 @@ cbq_reset(struct Qdisc* sch)
1216 q->tx_class = NULL; 1214 q->tx_class = NULL;
1217 q->tx_borrowed = NULL; 1215 q->tx_borrowed = NULL;
1218 qdisc_watchdog_cancel(&q->watchdog); 1216 qdisc_watchdog_cancel(&q->watchdog);
1219 tasklet_hrtimer_cancel(&q->delay_timer); 1217 hrtimer_cancel(&q->delay_timer);
1220 q->toplevel = TC_CBQ_MAXLEVEL; 1218 q->toplevel = TC_CBQ_MAXLEVEL;
1221 q->now = psched_get_time(); 1219 q->now = psched_get_time();
1222 q->now_rt = q->now; 1220 q->now_rt = q->now;
@@ -1399,8 +1397,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1399 q->link.minidle = -0x7FFFFFFF; 1397 q->link.minidle = -0x7FFFFFFF;
1400 1398
1401 qdisc_watchdog_init(&q->watchdog, sch); 1399 qdisc_watchdog_init(&q->watchdog, sch);
1402 tasklet_hrtimer_init(&q->delay_timer, cbq_undelay, 1400 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1403 CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1404 q->delay_timer.function = cbq_undelay; 1401 q->delay_timer.function = cbq_undelay;
1405 q->toplevel = TC_CBQ_MAXLEVEL; 1402 q->toplevel = TC_CBQ_MAXLEVEL;
1406 q->now = psched_get_time(); 1403 q->now = psched_get_time();
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index db73fd2a3f0e..9d2fca5ad14a 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
10sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ 10sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
11 auth.o auth_null.o auth_unix.o auth_generic.o \ 11 auth.o auth_null.o auth_unix.o auth_generic.o \
12 svc.o svcsock.o svcauth.o svcauth_unix.o \ 12 svc.o svcsock.o svcauth.o svcauth_unix.o \
13 rpcb_clnt.o timer.o xdr.o \ 13 addr.o rpcb_clnt.o timer.o xdr.o \
14 sunrpc_syms.o cache.o rpc_pipe.o \ 14 sunrpc_syms.o cache.o rpc_pipe.o \
15 svc_xprt.o 15 svc_xprt.o
16sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o 16sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
new file mode 100644
index 000000000000..22e8fd89477f
--- /dev/null
+++ b/net/sunrpc/addr.c
@@ -0,0 +1,364 @@
1/*
2 * Copyright 2009, Oracle. All rights reserved.
3 *
4 * Convert socket addresses to presentation addresses and universal
5 * addresses, and vice versa.
6 *
7 * Universal addresses are introduced by RFC 1833 and further refined by
8 * recent RFCs describing NFSv4. The universal address format is part
9 * of the external (network) interface provided by rpcbind version 3
10 * and 4, and by NFSv4. Such an address is a string containing a
11 * presentation format IP address followed by a port number in
12 * "hibyte.lobyte" format.
13 *
14 * IPv6 addresses can also include a scope ID, typically denoted by
15 * a '%' followed by a device name or a non-negative integer. Refer to
16 * RFC 4291, Section 2.2 for details on IPv6 presentation formats.
17 */
18
19#include <net/ipv6.h>
20#include <linux/sunrpc/clnt.h>
21
22#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
23
24static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
25 char *buf, const int buflen)
26{
27 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
28 const struct in6_addr *addr = &sin6->sin6_addr;
29
30 /*
31 * RFC 4291, Section 2.2.2
32 *
33 * Shorthanded ANY address
34 */
35 if (ipv6_addr_any(addr))
36 return snprintf(buf, buflen, "::");
37
38 /*
39 * RFC 4291, Section 2.2.2
40 *
41 * Shorthanded loopback address
42 */
43 if (ipv6_addr_loopback(addr))
44 return snprintf(buf, buflen, "::1");
45
46 /*
47 * RFC 4291, Section 2.2.3
48 *
49 * Special presentation address format for mapped v4
50 * addresses.
51 */
52 if (ipv6_addr_v4mapped(addr))
53 return snprintf(buf, buflen, "::ffff:%pI4",
54 &addr->s6_addr32[3]);
55
56 /*
57 * RFC 4291, Section 2.2.1
58 *
59 * To keep the result as short as possible, especially
60 * since we don't shorthand, we don't want leading zeros
61 * in each halfword, so avoid %pI6.
62 */
63 return snprintf(buf, buflen, "%x:%x:%x:%x:%x:%x:%x:%x",
64 ntohs(addr->s6_addr16[0]), ntohs(addr->s6_addr16[1]),
65 ntohs(addr->s6_addr16[2]), ntohs(addr->s6_addr16[3]),
66 ntohs(addr->s6_addr16[4]), ntohs(addr->s6_addr16[5]),
67 ntohs(addr->s6_addr16[6]), ntohs(addr->s6_addr16[7]));
68}
69
70static size_t rpc_ntop6(const struct sockaddr *sap,
71 char *buf, const size_t buflen)
72{
73 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
74 char scopebuf[IPV6_SCOPE_ID_LEN];
75 size_t len;
76 int rc;
77
78 len = rpc_ntop6_noscopeid(sap, buf, buflen);
79 if (unlikely(len == 0))
80 return len;
81
82 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
83 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
84 return len;
85
86 rc = snprintf(scopebuf, sizeof(scopebuf), "%c%u",
87 IPV6_SCOPE_DELIMITER, sin6->sin6_scope_id);
88 if (unlikely((size_t)rc > sizeof(scopebuf)))
89 return 0;
90
91 len += rc;
92 if (unlikely(len > buflen))
93 return 0;
94
95 strcat(buf, scopebuf);
96 return len;
97}
98
99#else /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
100
101static size_t rpc_ntop6_noscopeid(const struct sockaddr *sap,
102 char *buf, const int buflen)
103{
104 return 0;
105}
106
107static size_t rpc_ntop6(const struct sockaddr *sap,
108 char *buf, const size_t buflen)
109{
110 return 0;
111}
112
113#endif /* !(defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)) */
114
115static int rpc_ntop4(const struct sockaddr *sap,
116 char *buf, const size_t buflen)
117{
118 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
119
120 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
121}
122
123/**
124 * rpc_ntop - construct a presentation address in @buf
125 * @sap: socket address
126 * @buf: construction area
127 * @buflen: size of @buf, in bytes
128 *
129 * Plants a %NUL-terminated string in @buf and returns the length
130 * of the string, excluding the %NUL. Otherwise zero is returned.
131 */
132size_t rpc_ntop(const struct sockaddr *sap, char *buf, const size_t buflen)
133{
134 switch (sap->sa_family) {
135 case AF_INET:
136 return rpc_ntop4(sap, buf, buflen);
137 case AF_INET6:
138 return rpc_ntop6(sap, buf, buflen);
139 }
140
141 return 0;
142}
143EXPORT_SYMBOL_GPL(rpc_ntop);
144
145static size_t rpc_pton4(const char *buf, const size_t buflen,
146 struct sockaddr *sap, const size_t salen)
147{
148 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
149 u8 *addr = (u8 *)&sin->sin_addr.s_addr;
150
151 if (buflen > INET_ADDRSTRLEN || salen < sizeof(struct sockaddr_in))
152 return 0;
153
154 memset(sap, 0, sizeof(struct sockaddr_in));
155
156 if (in4_pton(buf, buflen, addr, '\0', NULL) == 0)
157 return 0;
158
159 sin->sin_family = AF_INET;
160 return sizeof(struct sockaddr_in);;
161}
162
163#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
164static int rpc_parse_scope_id(const char *buf, const size_t buflen,
165 const char *delim, struct sockaddr_in6 *sin6)
166{
167 char *p;
168 size_t len;
169
170 if ((buf + buflen) == delim)
171 return 1;
172
173 if (*delim != IPV6_SCOPE_DELIMITER)
174 return 0;
175
176 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
177 !(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_SITELOCAL))
178 return 0;
179
180 len = (buf + buflen) - delim - 1;
181 p = kstrndup(delim + 1, len, GFP_KERNEL);
182 if (p) {
183 unsigned long scope_id = 0;
184 struct net_device *dev;
185
186 dev = dev_get_by_name(&init_net, p);
187 if (dev != NULL) {
188 scope_id = dev->ifindex;
189 dev_put(dev);
190 } else {
191 if (strict_strtoul(p, 10, &scope_id) == 0) {
192 kfree(p);
193 return 0;
194 }
195 }
196
197 kfree(p);
198
199 sin6->sin6_scope_id = scope_id;
200 return 1;
201 }
202
203 return 0;
204}
205
206static size_t rpc_pton6(const char *buf, const size_t buflen,
207 struct sockaddr *sap, const size_t salen)
208{
209 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
210 u8 *addr = (u8 *)&sin6->sin6_addr.in6_u;
211 const char *delim;
212
213 if (buflen > (INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN) ||
214 salen < sizeof(struct sockaddr_in6))
215 return 0;
216
217 memset(sap, 0, sizeof(struct sockaddr_in6));
218
219 if (in6_pton(buf, buflen, addr, IPV6_SCOPE_DELIMITER, &delim) == 0)
220 return 0;
221
222 if (!rpc_parse_scope_id(buf, buflen, delim, sin6))
223 return 0;
224
225 sin6->sin6_family = AF_INET6;
226 return sizeof(struct sockaddr_in6);
227}
228#else
229static size_t rpc_pton6(const char *buf, const size_t buflen,
230 struct sockaddr *sap, const size_t salen)
231{
232 return 0;
233}
234#endif
235
236/**
237 * rpc_pton - Construct a sockaddr in @sap
238 * @buf: C string containing presentation format IP address
239 * @buflen: length of presentation address in bytes
240 * @sap: buffer into which to plant socket address
241 * @salen: size of buffer in bytes
242 *
243 * Returns the size of the socket address if successful; otherwise
244 * zero is returned.
245 *
246 * Plants a socket address in @sap and returns the size of the
247 * socket address, if successful. Returns zero if an error
248 * occurred.
249 */
250size_t rpc_pton(const char *buf, const size_t buflen,
251 struct sockaddr *sap, const size_t salen)
252{
253 unsigned int i;
254
255 for (i = 0; i < buflen; i++)
256 if (buf[i] == ':')
257 return rpc_pton6(buf, buflen, sap, salen);
258 return rpc_pton4(buf, buflen, sap, salen);
259}
260EXPORT_SYMBOL_GPL(rpc_pton);
261
262/**
263 * rpc_sockaddr2uaddr - Construct a universal address string from @sap.
264 * @sap: socket address
265 *
266 * Returns a %NUL-terminated string in dynamically allocated memory;
267 * otherwise NULL is returned if an error occurred. Caller must
268 * free the returned string.
269 */
270char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
271{
272 char portbuf[RPCBIND_MAXUADDRPLEN];
273 char addrbuf[RPCBIND_MAXUADDRLEN];
274 unsigned short port;
275
276 switch (sap->sa_family) {
277 case AF_INET:
278 if (rpc_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
279 return NULL;
280 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
281 break;
282 case AF_INET6:
283 if (rpc_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
284 return NULL;
285 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
286 break;
287 default:
288 return NULL;
289 }
290
291 if (snprintf(portbuf, sizeof(portbuf),
292 ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
293 return NULL;
294
295 if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
296 return NULL;
297
298 return kstrdup(addrbuf, GFP_KERNEL);
299}
300EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
301
302/**
303 * rpc_uaddr2sockaddr - convert a universal address to a socket address.
304 * @uaddr: C string containing universal address to convert
305 * @uaddr_len: length of universal address string
306 * @sap: buffer into which to plant socket address
307 * @salen: size of buffer
308 *
309 * Returns the size of the socket address if successful; otherwise
310 * zero is returned.
311 */
312size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
313 struct sockaddr *sap, const size_t salen)
314{
315 char *c, buf[RPCBIND_MAXUADDRLEN];
316 unsigned long portlo, porthi;
317 unsigned short port;
318
319 if (uaddr_len > sizeof(buf))
320 return 0;
321
322 memcpy(buf, uaddr, uaddr_len);
323
324 buf[uaddr_len] = '\n';
325 buf[uaddr_len + 1] = '\0';
326
327 c = strrchr(buf, '.');
328 if (unlikely(c == NULL))
329 return 0;
330 if (unlikely(strict_strtoul(c + 1, 10, &portlo) != 0))
331 return 0;
332 if (unlikely(portlo > 255))
333 return 0;
334
335 c[0] = '\n';
336 c[1] = '\0';
337
338 c = strrchr(buf, '.');
339 if (unlikely(c == NULL))
340 return 0;
341 if (unlikely(strict_strtoul(c + 1, 10, &porthi) != 0))
342 return 0;
343 if (unlikely(porthi > 255))
344 return 0;
345
346 port = (unsigned short)((porthi << 8) | portlo);
347
348 c[0] = '\0';
349
350 if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
351 return 0;
352
353 switch (sap->sa_family) {
354 case AF_INET:
355 ((struct sockaddr_in *)sap)->sin_port = htons(port);
356 return sizeof(struct sockaddr_in);
357 case AF_INET6:
358 ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
359 return sizeof(struct sockaddr_in6);
360 }
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(rpc_uaddr2sockaddr);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 66d458fc6920..fc6a43ccd950 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -89,8 +89,8 @@ static struct rpc_wait_queue pipe_version_rpc_waitqueue;
89static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 89static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
90 90
91static void gss_free_ctx(struct gss_cl_ctx *); 91static void gss_free_ctx(struct gss_cl_ctx *);
92static struct rpc_pipe_ops gss_upcall_ops_v0; 92static const struct rpc_pipe_ops gss_upcall_ops_v0;
93static struct rpc_pipe_ops gss_upcall_ops_v1; 93static const struct rpc_pipe_ops gss_upcall_ops_v1;
94 94
95static inline struct gss_cl_ctx * 95static inline struct gss_cl_ctx *
96gss_get_ctx(struct gss_cl_ctx *ctx) 96gss_get_ctx(struct gss_cl_ctx *ctx)
@@ -777,7 +777,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
777 * that we supported only the old pipe. So we instead create 777 * that we supported only the old pipe. So we instead create
778 * the new pipe first. 778 * the new pipe first.
779 */ 779 */
780 gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_dentry, 780 gss_auth->dentry[1] = rpc_mkpipe(clnt->cl_path.dentry,
781 "gssd", 781 "gssd",
782 clnt, &gss_upcall_ops_v1, 782 clnt, &gss_upcall_ops_v1,
783 RPC_PIPE_WAIT_FOR_OPEN); 783 RPC_PIPE_WAIT_FOR_OPEN);
@@ -786,7 +786,7 @@ gss_create(struct rpc_clnt *clnt, rpc_authflavor_t flavor)
786 goto err_put_mech; 786 goto err_put_mech;
787 } 787 }
788 788
789 gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_dentry, 789 gss_auth->dentry[0] = rpc_mkpipe(clnt->cl_path.dentry,
790 gss_auth->mech->gm_name, 790 gss_auth->mech->gm_name,
791 clnt, &gss_upcall_ops_v0, 791 clnt, &gss_upcall_ops_v0,
792 RPC_PIPE_WAIT_FOR_OPEN); 792 RPC_PIPE_WAIT_FOR_OPEN);
@@ -1507,7 +1507,7 @@ static const struct rpc_credops gss_nullops = {
1507 .crunwrap_resp = gss_unwrap_resp, 1507 .crunwrap_resp = gss_unwrap_resp,
1508}; 1508};
1509 1509
1510static struct rpc_pipe_ops gss_upcall_ops_v0 = { 1510static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
1511 .upcall = gss_pipe_upcall, 1511 .upcall = gss_pipe_upcall,
1512 .downcall = gss_pipe_downcall, 1512 .downcall = gss_pipe_downcall,
1513 .destroy_msg = gss_pipe_destroy_msg, 1513 .destroy_msg = gss_pipe_destroy_msg,
@@ -1515,7 +1515,7 @@ static struct rpc_pipe_ops gss_upcall_ops_v0 = {
1515 .release_pipe = gss_pipe_release, 1515 .release_pipe = gss_pipe_release,
1516}; 1516};
1517 1517
1518static struct rpc_pipe_ops gss_upcall_ops_v1 = { 1518static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
1519 .upcall = gss_pipe_upcall, 1519 .upcall = gss_pipe_upcall,
1520 .downcall = gss_pipe_downcall, 1520 .downcall = gss_pipe_downcall,
1521 .destroy_msg = gss_pipe_destroy_msg, 1521 .destroy_msg = gss_pipe_destroy_msg,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 2278a50c6444..2e6a148d277c 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -181,6 +181,11 @@ static void rsi_request(struct cache_detail *cd,
181 (*bpp)[-1] = '\n'; 181 (*bpp)[-1] = '\n';
182} 182}
183 183
184static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
185{
186 return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
187}
188
184 189
185static int rsi_parse(struct cache_detail *cd, 190static int rsi_parse(struct cache_detail *cd,
186 char *mesg, int mlen) 191 char *mesg, int mlen)
@@ -270,7 +275,7 @@ static struct cache_detail rsi_cache = {
270 .hash_table = rsi_table, 275 .hash_table = rsi_table,
271 .name = "auth.rpcsec.init", 276 .name = "auth.rpcsec.init",
272 .cache_put = rsi_put, 277 .cache_put = rsi_put,
273 .cache_request = rsi_request, 278 .cache_upcall = rsi_upcall,
274 .cache_parse = rsi_parse, 279 .cache_parse = rsi_parse,
275 .match = rsi_match, 280 .match = rsi_match,
276 .init = rsi_init, 281 .init = rsi_init,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index ff0c23053d2f..45cdaff9b361 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -27,10 +27,12 @@
27#include <linux/net.h> 27#include <linux/net.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/pagemap.h>
30#include <asm/ioctls.h> 31#include <asm/ioctls.h>
31#include <linux/sunrpc/types.h> 32#include <linux/sunrpc/types.h>
32#include <linux/sunrpc/cache.h> 33#include <linux/sunrpc/cache.h>
33#include <linux/sunrpc/stats.h> 34#include <linux/sunrpc/stats.h>
35#include <linux/sunrpc/rpc_pipe_fs.h>
34 36
35#define RPCDBG_FACILITY RPCDBG_CACHE 37#define RPCDBG_FACILITY RPCDBG_CACHE
36 38
@@ -175,7 +177,13 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
175} 177}
176EXPORT_SYMBOL_GPL(sunrpc_cache_update); 178EXPORT_SYMBOL_GPL(sunrpc_cache_update);
177 179
178static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h); 180static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
181{
182 if (!cd->cache_upcall)
183 return -EINVAL;
184 return cd->cache_upcall(cd, h);
185}
186
179/* 187/*
180 * This is the generic cache management routine for all 188 * This is the generic cache management routine for all
181 * the authentication caches. 189 * the authentication caches.
@@ -284,76 +292,11 @@ static DEFINE_SPINLOCK(cache_list_lock);
284static struct cache_detail *current_detail; 292static struct cache_detail *current_detail;
285static int current_index; 293static int current_index;
286 294
287static const struct file_operations cache_file_operations;
288static const struct file_operations content_file_operations;
289static const struct file_operations cache_flush_operations;
290
291static void do_cache_clean(struct work_struct *work); 295static void do_cache_clean(struct work_struct *work);
292static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); 296static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
293 297
294static void remove_cache_proc_entries(struct cache_detail *cd) 298static void sunrpc_init_cache_detail(struct cache_detail *cd)
295{
296 if (cd->proc_ent == NULL)
297 return;
298 if (cd->flush_ent)
299 remove_proc_entry("flush", cd->proc_ent);
300 if (cd->channel_ent)
301 remove_proc_entry("channel", cd->proc_ent);
302 if (cd->content_ent)
303 remove_proc_entry("content", cd->proc_ent);
304 cd->proc_ent = NULL;
305 remove_proc_entry(cd->name, proc_net_rpc);
306}
307
308#ifdef CONFIG_PROC_FS
309static int create_cache_proc_entries(struct cache_detail *cd)
310{
311 struct proc_dir_entry *p;
312
313 cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
314 if (cd->proc_ent == NULL)
315 goto out_nomem;
316 cd->channel_ent = cd->content_ent = NULL;
317
318 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
319 cd->proc_ent, &cache_flush_operations, cd);
320 cd->flush_ent = p;
321 if (p == NULL)
322 goto out_nomem;
323
324 if (cd->cache_request || cd->cache_parse) {
325 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
326 cd->proc_ent, &cache_file_operations, cd);
327 cd->channel_ent = p;
328 if (p == NULL)
329 goto out_nomem;
330 }
331 if (cd->cache_show) {
332 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
333 cd->proc_ent, &content_file_operations, cd);
334 cd->content_ent = p;
335 if (p == NULL)
336 goto out_nomem;
337 }
338 return 0;
339out_nomem:
340 remove_cache_proc_entries(cd);
341 return -ENOMEM;
342}
343#else /* CONFIG_PROC_FS */
344static int create_cache_proc_entries(struct cache_detail *cd)
345{
346 return 0;
347}
348#endif
349
350int cache_register(struct cache_detail *cd)
351{ 299{
352 int ret;
353
354 ret = create_cache_proc_entries(cd);
355 if (ret)
356 return ret;
357 rwlock_init(&cd->hash_lock); 300 rwlock_init(&cd->hash_lock);
358 INIT_LIST_HEAD(&cd->queue); 301 INIT_LIST_HEAD(&cd->queue);
359 spin_lock(&cache_list_lock); 302 spin_lock(&cache_list_lock);
@@ -367,11 +310,9 @@ int cache_register(struct cache_detail *cd)
367 310
368 /* start the cleaning process */ 311 /* start the cleaning process */
369 schedule_delayed_work(&cache_cleaner, 0); 312 schedule_delayed_work(&cache_cleaner, 0);
370 return 0;
371} 313}
372EXPORT_SYMBOL_GPL(cache_register);
373 314
374void cache_unregister(struct cache_detail *cd) 315static void sunrpc_destroy_cache_detail(struct cache_detail *cd)
375{ 316{
376 cache_purge(cd); 317 cache_purge(cd);
377 spin_lock(&cache_list_lock); 318 spin_lock(&cache_list_lock);
@@ -386,7 +327,6 @@ void cache_unregister(struct cache_detail *cd)
386 list_del_init(&cd->others); 327 list_del_init(&cd->others);
387 write_unlock(&cd->hash_lock); 328 write_unlock(&cd->hash_lock);
388 spin_unlock(&cache_list_lock); 329 spin_unlock(&cache_list_lock);
389 remove_cache_proc_entries(cd);
390 if (list_empty(&cache_list)) { 330 if (list_empty(&cache_list)) {
391 /* module must be being unloaded so its safe to kill the worker */ 331 /* module must be being unloaded so its safe to kill the worker */
392 cancel_delayed_work_sync(&cache_cleaner); 332 cancel_delayed_work_sync(&cache_cleaner);
@@ -395,7 +335,6 @@ void cache_unregister(struct cache_detail *cd)
395out: 335out:
396 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); 336 printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name);
397} 337}
398EXPORT_SYMBOL_GPL(cache_unregister);
399 338
400/* clean cache tries to find something to clean 339/* clean cache tries to find something to clean
401 * and cleans it. 340 * and cleans it.
@@ -687,18 +626,18 @@ struct cache_reader {
687 int offset; /* if non-0, we have a refcnt on next request */ 626 int offset; /* if non-0, we have a refcnt on next request */
688}; 627};
689 628
690static ssize_t 629static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
691cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 630 loff_t *ppos, struct cache_detail *cd)
692{ 631{
693 struct cache_reader *rp = filp->private_data; 632 struct cache_reader *rp = filp->private_data;
694 struct cache_request *rq; 633 struct cache_request *rq;
695 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; 634 struct inode *inode = filp->f_path.dentry->d_inode;
696 int err; 635 int err;
697 636
698 if (count == 0) 637 if (count == 0)
699 return 0; 638 return 0;
700 639
701 mutex_lock(&queue_io_mutex); /* protect against multiple concurrent 640 mutex_lock(&inode->i_mutex); /* protect against multiple concurrent
702 * readers on this file */ 641 * readers on this file */
703 again: 642 again:
704 spin_lock(&queue_lock); 643 spin_lock(&queue_lock);
@@ -711,7 +650,7 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
711 } 650 }
712 if (rp->q.list.next == &cd->queue) { 651 if (rp->q.list.next == &cd->queue) {
713 spin_unlock(&queue_lock); 652 spin_unlock(&queue_lock);
714 mutex_unlock(&queue_io_mutex); 653 mutex_unlock(&inode->i_mutex);
715 BUG_ON(rp->offset); 654 BUG_ON(rp->offset);
716 return 0; 655 return 0;
717 } 656 }
@@ -758,49 +697,90 @@ cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
758 } 697 }
759 if (err == -EAGAIN) 698 if (err == -EAGAIN)
760 goto again; 699 goto again;
761 mutex_unlock(&queue_io_mutex); 700 mutex_unlock(&inode->i_mutex);
762 return err ? err : count; 701 return err ? err : count;
763} 702}
764 703
765static char write_buf[8192]; /* protected by queue_io_mutex */ 704static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
705 size_t count, struct cache_detail *cd)
706{
707 ssize_t ret;
766 708
767static ssize_t 709 if (copy_from_user(kaddr, buf, count))
768cache_write(struct file *filp, const char __user *buf, size_t count, 710 return -EFAULT;
769 loff_t *ppos) 711 kaddr[count] = '\0';
712 ret = cd->cache_parse(cd, kaddr, count);
713 if (!ret)
714 ret = count;
715 return ret;
716}
717
718static ssize_t cache_slow_downcall(const char __user *buf,
719 size_t count, struct cache_detail *cd)
770{ 720{
771 int err; 721 static char write_buf[8192]; /* protected by queue_io_mutex */
772 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; 722 ssize_t ret = -EINVAL;
773 723
774 if (count == 0)
775 return 0;
776 if (count >= sizeof(write_buf)) 724 if (count >= sizeof(write_buf))
777 return -EINVAL; 725 goto out;
778
779 mutex_lock(&queue_io_mutex); 726 mutex_lock(&queue_io_mutex);
727 ret = cache_do_downcall(write_buf, buf, count, cd);
728 mutex_unlock(&queue_io_mutex);
729out:
730 return ret;
731}
780 732
781 if (copy_from_user(write_buf, buf, count)) { 733static ssize_t cache_downcall(struct address_space *mapping,
782 mutex_unlock(&queue_io_mutex); 734 const char __user *buf,
783 return -EFAULT; 735 size_t count, struct cache_detail *cd)
784 } 736{
785 write_buf[count] = '\0'; 737 struct page *page;
786 if (cd->cache_parse) 738 char *kaddr;
787 err = cd->cache_parse(cd, write_buf, count); 739 ssize_t ret = -ENOMEM;
788 else 740
789 err = -EINVAL; 741 if (count >= PAGE_CACHE_SIZE)
742 goto out_slow;
743
744 page = find_or_create_page(mapping, 0, GFP_KERNEL);
745 if (!page)
746 goto out_slow;
747
748 kaddr = kmap(page);
749 ret = cache_do_downcall(kaddr, buf, count, cd);
750 kunmap(page);
751 unlock_page(page);
752 page_cache_release(page);
753 return ret;
754out_slow:
755 return cache_slow_downcall(buf, count, cd);
756}
790 757
791 mutex_unlock(&queue_io_mutex); 758static ssize_t cache_write(struct file *filp, const char __user *buf,
792 return err ? err : count; 759 size_t count, loff_t *ppos,
760 struct cache_detail *cd)
761{
762 struct address_space *mapping = filp->f_mapping;
763 struct inode *inode = filp->f_path.dentry->d_inode;
764 ssize_t ret = -EINVAL;
765
766 if (!cd->cache_parse)
767 goto out;
768
769 mutex_lock(&inode->i_mutex);
770 ret = cache_downcall(mapping, buf, count, cd);
771 mutex_unlock(&inode->i_mutex);
772out:
773 return ret;
793} 774}
794 775
795static DECLARE_WAIT_QUEUE_HEAD(queue_wait); 776static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
796 777
797static unsigned int 778static unsigned int cache_poll(struct file *filp, poll_table *wait,
798cache_poll(struct file *filp, poll_table *wait) 779 struct cache_detail *cd)
799{ 780{
800 unsigned int mask; 781 unsigned int mask;
801 struct cache_reader *rp = filp->private_data; 782 struct cache_reader *rp = filp->private_data;
802 struct cache_queue *cq; 783 struct cache_queue *cq;
803 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
804 784
805 poll_wait(filp, &queue_wait, wait); 785 poll_wait(filp, &queue_wait, wait);
806 786
@@ -822,14 +802,13 @@ cache_poll(struct file *filp, poll_table *wait)
822 return mask; 802 return mask;
823} 803}
824 804
825static int 805static int cache_ioctl(struct inode *ino, struct file *filp,
826cache_ioctl(struct inode *ino, struct file *filp, 806 unsigned int cmd, unsigned long arg,
827 unsigned int cmd, unsigned long arg) 807 struct cache_detail *cd)
828{ 808{
829 int len = 0; 809 int len = 0;
830 struct cache_reader *rp = filp->private_data; 810 struct cache_reader *rp = filp->private_data;
831 struct cache_queue *cq; 811 struct cache_queue *cq;
832 struct cache_detail *cd = PDE(ino)->data;
833 812
834 if (cmd != FIONREAD || !rp) 813 if (cmd != FIONREAD || !rp)
835 return -EINVAL; 814 return -EINVAL;
@@ -852,15 +831,15 @@ cache_ioctl(struct inode *ino, struct file *filp,
852 return put_user(len, (int __user *)arg); 831 return put_user(len, (int __user *)arg);
853} 832}
854 833
855static int 834static int cache_open(struct inode *inode, struct file *filp,
856cache_open(struct inode *inode, struct file *filp) 835 struct cache_detail *cd)
857{ 836{
858 struct cache_reader *rp = NULL; 837 struct cache_reader *rp = NULL;
859 838
839 if (!cd || !try_module_get(cd->owner))
840 return -EACCES;
860 nonseekable_open(inode, filp); 841 nonseekable_open(inode, filp);
861 if (filp->f_mode & FMODE_READ) { 842 if (filp->f_mode & FMODE_READ) {
862 struct cache_detail *cd = PDE(inode)->data;
863
864 rp = kmalloc(sizeof(*rp), GFP_KERNEL); 843 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
865 if (!rp) 844 if (!rp)
866 return -ENOMEM; 845 return -ENOMEM;
@@ -875,11 +854,10 @@ cache_open(struct inode *inode, struct file *filp)
875 return 0; 854 return 0;
876} 855}
877 856
878static int 857static int cache_release(struct inode *inode, struct file *filp,
879cache_release(struct inode *inode, struct file *filp) 858 struct cache_detail *cd)
880{ 859{
881 struct cache_reader *rp = filp->private_data; 860 struct cache_reader *rp = filp->private_data;
882 struct cache_detail *cd = PDE(inode)->data;
883 861
884 if (rp) { 862 if (rp) {
885 spin_lock(&queue_lock); 863 spin_lock(&queue_lock);
@@ -903,23 +881,12 @@ cache_release(struct inode *inode, struct file *filp)
903 cd->last_close = get_seconds(); 881 cd->last_close = get_seconds();
904 atomic_dec(&cd->readers); 882 atomic_dec(&cd->readers);
905 } 883 }
884 module_put(cd->owner);
906 return 0; 885 return 0;
907} 886}
908 887
909 888
910 889
911static const struct file_operations cache_file_operations = {
912 .owner = THIS_MODULE,
913 .llseek = no_llseek,
914 .read = cache_read,
915 .write = cache_write,
916 .poll = cache_poll,
917 .ioctl = cache_ioctl, /* for FIONREAD */
918 .open = cache_open,
919 .release = cache_release,
920};
921
922
923static void queue_loose(struct cache_detail *detail, struct cache_head *ch) 890static void queue_loose(struct cache_detail *detail, struct cache_head *ch)
924{ 891{
925 struct cache_queue *cq; 892 struct cache_queue *cq;
@@ -1020,15 +987,21 @@ static void warn_no_listener(struct cache_detail *detail)
1020 if (detail->last_warn != detail->last_close) { 987 if (detail->last_warn != detail->last_close) {
1021 detail->last_warn = detail->last_close; 988 detail->last_warn = detail->last_close;
1022 if (detail->warn_no_listener) 989 if (detail->warn_no_listener)
1023 detail->warn_no_listener(detail); 990 detail->warn_no_listener(detail, detail->last_close != 0);
1024 } 991 }
1025} 992}
1026 993
1027/* 994/*
1028 * register an upcall request to user-space. 995 * register an upcall request to user-space and queue it up for read() by the
996 * upcall daemon.
997 *
1029 * Each request is at most one page long. 998 * Each request is at most one page long.
1030 */ 999 */
1031static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h) 1000int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1001 void (*cache_request)(struct cache_detail *,
1002 struct cache_head *,
1003 char **,
1004 int *))
1032{ 1005{
1033 1006
1034 char *buf; 1007 char *buf;
@@ -1036,9 +1009,6 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1036 char *bp; 1009 char *bp;
1037 int len; 1010 int len;
1038 1011
1039 if (detail->cache_request == NULL)
1040 return -EINVAL;
1041
1042 if (atomic_read(&detail->readers) == 0 && 1012 if (atomic_read(&detail->readers) == 0 &&
1043 detail->last_close < get_seconds() - 30) { 1013 detail->last_close < get_seconds() - 30) {
1044 warn_no_listener(detail); 1014 warn_no_listener(detail);
@@ -1057,7 +1027,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1057 1027
1058 bp = buf; len = PAGE_SIZE; 1028 bp = buf; len = PAGE_SIZE;
1059 1029
1060 detail->cache_request(detail, h, &bp, &len); 1030 cache_request(detail, h, &bp, &len);
1061 1031
1062 if (len < 0) { 1032 if (len < 0) {
1063 kfree(buf); 1033 kfree(buf);
@@ -1075,6 +1045,7 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1075 wake_up(&queue_wait); 1045 wake_up(&queue_wait);
1076 return 0; 1046 return 0;
1077} 1047}
1048EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1078 1049
1079/* 1050/*
1080 * parse a message from user-space and pass it 1051 * parse a message from user-space and pass it
@@ -1242,11 +1213,13 @@ static const struct seq_operations cache_content_op = {
1242 .show = c_show, 1213 .show = c_show,
1243}; 1214};
1244 1215
1245static int content_open(struct inode *inode, struct file *file) 1216static int content_open(struct inode *inode, struct file *file,
1217 struct cache_detail *cd)
1246{ 1218{
1247 struct handle *han; 1219 struct handle *han;
1248 struct cache_detail *cd = PDE(inode)->data;
1249 1220
1221 if (!cd || !try_module_get(cd->owner))
1222 return -EACCES;
1250 han = __seq_open_private(file, &cache_content_op, sizeof(*han)); 1223 han = __seq_open_private(file, &cache_content_op, sizeof(*han));
1251 if (han == NULL) 1224 if (han == NULL)
1252 return -ENOMEM; 1225 return -ENOMEM;
@@ -1255,17 +1228,33 @@ static int content_open(struct inode *inode, struct file *file)
1255 return 0; 1228 return 0;
1256} 1229}
1257 1230
1258static const struct file_operations content_file_operations = { 1231static int content_release(struct inode *inode, struct file *file,
1259 .open = content_open, 1232 struct cache_detail *cd)
1260 .read = seq_read, 1233{
1261 .llseek = seq_lseek, 1234 int ret = seq_release_private(inode, file);
1262 .release = seq_release_private, 1235 module_put(cd->owner);
1263}; 1236 return ret;
1237}
1238
1239static int open_flush(struct inode *inode, struct file *file,
1240 struct cache_detail *cd)
1241{
1242 if (!cd || !try_module_get(cd->owner))
1243 return -EACCES;
1244 return nonseekable_open(inode, file);
1245}
1246
1247static int release_flush(struct inode *inode, struct file *file,
1248 struct cache_detail *cd)
1249{
1250 module_put(cd->owner);
1251 return 0;
1252}
1264 1253
1265static ssize_t read_flush(struct file *file, char __user *buf, 1254static ssize_t read_flush(struct file *file, char __user *buf,
1266 size_t count, loff_t *ppos) 1255 size_t count, loff_t *ppos,
1256 struct cache_detail *cd)
1267{ 1257{
1268 struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1269 char tbuf[20]; 1258 char tbuf[20];
1270 unsigned long p = *ppos; 1259 unsigned long p = *ppos;
1271 size_t len; 1260 size_t len;
@@ -1283,10 +1272,10 @@ static ssize_t read_flush(struct file *file, char __user *buf,
1283 return len; 1272 return len;
1284} 1273}
1285 1274
1286static ssize_t write_flush(struct file * file, const char __user * buf, 1275static ssize_t write_flush(struct file *file, const char __user *buf,
1287 size_t count, loff_t *ppos) 1276 size_t count, loff_t *ppos,
1277 struct cache_detail *cd)
1288{ 1278{
1289 struct cache_detail *cd = PDE(file->f_path.dentry->d_inode)->data;
1290 char tbuf[20]; 1279 char tbuf[20];
1291 char *ep; 1280 char *ep;
1292 long flushtime; 1281 long flushtime;
@@ -1307,8 +1296,343 @@ static ssize_t write_flush(struct file * file, const char __user * buf,
1307 return count; 1296 return count;
1308} 1297}
1309 1298
1310static const struct file_operations cache_flush_operations = { 1299static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1311 .open = nonseekable_open, 1300 size_t count, loff_t *ppos)
1312 .read = read_flush, 1301{
1313 .write = write_flush, 1302 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1303
1304 return cache_read(filp, buf, count, ppos, cd);
1305}
1306
1307static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1308 size_t count, loff_t *ppos)
1309{
1310 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1311
1312 return cache_write(filp, buf, count, ppos, cd);
1313}
1314
1315static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
1316{
1317 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1318
1319 return cache_poll(filp, wait, cd);
1320}
1321
1322static int cache_ioctl_procfs(struct inode *inode, struct file *filp,
1323 unsigned int cmd, unsigned long arg)
1324{
1325 struct cache_detail *cd = PDE(inode)->data;
1326
1327 return cache_ioctl(inode, filp, cmd, arg, cd);
1328}
1329
1330static int cache_open_procfs(struct inode *inode, struct file *filp)
1331{
1332 struct cache_detail *cd = PDE(inode)->data;
1333
1334 return cache_open(inode, filp, cd);
1335}
1336
1337static int cache_release_procfs(struct inode *inode, struct file *filp)
1338{
1339 struct cache_detail *cd = PDE(inode)->data;
1340
1341 return cache_release(inode, filp, cd);
1342}
1343
1344static const struct file_operations cache_file_operations_procfs = {
1345 .owner = THIS_MODULE,
1346 .llseek = no_llseek,
1347 .read = cache_read_procfs,
1348 .write = cache_write_procfs,
1349 .poll = cache_poll_procfs,
1350 .ioctl = cache_ioctl_procfs, /* for FIONREAD */
1351 .open = cache_open_procfs,
1352 .release = cache_release_procfs,
1314}; 1353};
1354
1355static int content_open_procfs(struct inode *inode, struct file *filp)
1356{
1357 struct cache_detail *cd = PDE(inode)->data;
1358
1359 return content_open(inode, filp, cd);
1360}
1361
1362static int content_release_procfs(struct inode *inode, struct file *filp)
1363{
1364 struct cache_detail *cd = PDE(inode)->data;
1365
1366 return content_release(inode, filp, cd);
1367}
1368
1369static const struct file_operations content_file_operations_procfs = {
1370 .open = content_open_procfs,
1371 .read = seq_read,
1372 .llseek = seq_lseek,
1373 .release = content_release_procfs,
1374};
1375
1376static int open_flush_procfs(struct inode *inode, struct file *filp)
1377{
1378 struct cache_detail *cd = PDE(inode)->data;
1379
1380 return open_flush(inode, filp, cd);
1381}
1382
1383static int release_flush_procfs(struct inode *inode, struct file *filp)
1384{
1385 struct cache_detail *cd = PDE(inode)->data;
1386
1387 return release_flush(inode, filp, cd);
1388}
1389
1390static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1391 size_t count, loff_t *ppos)
1392{
1393 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1394
1395 return read_flush(filp, buf, count, ppos, cd);
1396}
1397
1398static ssize_t write_flush_procfs(struct file *filp,
1399 const char __user *buf,
1400 size_t count, loff_t *ppos)
1401{
1402 struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data;
1403
1404 return write_flush(filp, buf, count, ppos, cd);
1405}
1406
1407static const struct file_operations cache_flush_operations_procfs = {
1408 .open = open_flush_procfs,
1409 .read = read_flush_procfs,
1410 .write = write_flush_procfs,
1411 .release = release_flush_procfs,
1412};
1413
1414static void remove_cache_proc_entries(struct cache_detail *cd)
1415{
1416 if (cd->u.procfs.proc_ent == NULL)
1417 return;
1418 if (cd->u.procfs.flush_ent)
1419 remove_proc_entry("flush", cd->u.procfs.proc_ent);
1420 if (cd->u.procfs.channel_ent)
1421 remove_proc_entry("channel", cd->u.procfs.proc_ent);
1422 if (cd->u.procfs.content_ent)
1423 remove_proc_entry("content", cd->u.procfs.proc_ent);
1424 cd->u.procfs.proc_ent = NULL;
1425 remove_proc_entry(cd->name, proc_net_rpc);
1426}
1427
1428#ifdef CONFIG_PROC_FS
1429static int create_cache_proc_entries(struct cache_detail *cd)
1430{
1431 struct proc_dir_entry *p;
1432
1433 cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
1434 if (cd->u.procfs.proc_ent == NULL)
1435 goto out_nomem;
1436 cd->u.procfs.channel_ent = NULL;
1437 cd->u.procfs.content_ent = NULL;
1438
1439 p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR,
1440 cd->u.procfs.proc_ent,
1441 &cache_flush_operations_procfs, cd);
1442 cd->u.procfs.flush_ent = p;
1443 if (p == NULL)
1444 goto out_nomem;
1445
1446 if (cd->cache_upcall || cd->cache_parse) {
1447 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1448 cd->u.procfs.proc_ent,
1449 &cache_file_operations_procfs, cd);
1450 cd->u.procfs.channel_ent = p;
1451 if (p == NULL)
1452 goto out_nomem;
1453 }
1454 if (cd->cache_show) {
1455 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR,
1456 cd->u.procfs.proc_ent,
1457 &content_file_operations_procfs, cd);
1458 cd->u.procfs.content_ent = p;
1459 if (p == NULL)
1460 goto out_nomem;
1461 }
1462 return 0;
1463out_nomem:
1464 remove_cache_proc_entries(cd);
1465 return -ENOMEM;
1466}
1467#else /* CONFIG_PROC_FS */
1468static int create_cache_proc_entries(struct cache_detail *cd)
1469{
1470 return 0;
1471}
1472#endif
1473
1474int cache_register(struct cache_detail *cd)
1475{
1476 int ret;
1477
1478 sunrpc_init_cache_detail(cd);
1479 ret = create_cache_proc_entries(cd);
1480 if (ret)
1481 sunrpc_destroy_cache_detail(cd);
1482 return ret;
1483}
1484EXPORT_SYMBOL_GPL(cache_register);
1485
1486void cache_unregister(struct cache_detail *cd)
1487{
1488 remove_cache_proc_entries(cd);
1489 sunrpc_destroy_cache_detail(cd);
1490}
1491EXPORT_SYMBOL_GPL(cache_unregister);
1492
1493static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1494 size_t count, loff_t *ppos)
1495{
1496 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1497
1498 return cache_read(filp, buf, count, ppos, cd);
1499}
1500
1501static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1502 size_t count, loff_t *ppos)
1503{
1504 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1505
1506 return cache_write(filp, buf, count, ppos, cd);
1507}
1508
1509static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
1510{
1511 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1512
1513 return cache_poll(filp, wait, cd);
1514}
1515
1516static int cache_ioctl_pipefs(struct inode *inode, struct file *filp,
1517 unsigned int cmd, unsigned long arg)
1518{
1519 struct cache_detail *cd = RPC_I(inode)->private;
1520
1521 return cache_ioctl(inode, filp, cmd, arg, cd);
1522}
1523
1524static int cache_open_pipefs(struct inode *inode, struct file *filp)
1525{
1526 struct cache_detail *cd = RPC_I(inode)->private;
1527
1528 return cache_open(inode, filp, cd);
1529}
1530
1531static int cache_release_pipefs(struct inode *inode, struct file *filp)
1532{
1533 struct cache_detail *cd = RPC_I(inode)->private;
1534
1535 return cache_release(inode, filp, cd);
1536}
1537
1538const struct file_operations cache_file_operations_pipefs = {
1539 .owner = THIS_MODULE,
1540 .llseek = no_llseek,
1541 .read = cache_read_pipefs,
1542 .write = cache_write_pipefs,
1543 .poll = cache_poll_pipefs,
1544 .ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1545 .open = cache_open_pipefs,
1546 .release = cache_release_pipefs,
1547};
1548
1549static int content_open_pipefs(struct inode *inode, struct file *filp)
1550{
1551 struct cache_detail *cd = RPC_I(inode)->private;
1552
1553 return content_open(inode, filp, cd);
1554}
1555
1556static int content_release_pipefs(struct inode *inode, struct file *filp)
1557{
1558 struct cache_detail *cd = RPC_I(inode)->private;
1559
1560 return content_release(inode, filp, cd);
1561}
1562
1563const struct file_operations content_file_operations_pipefs = {
1564 .open = content_open_pipefs,
1565 .read = seq_read,
1566 .llseek = seq_lseek,
1567 .release = content_release_pipefs,
1568};
1569
1570static int open_flush_pipefs(struct inode *inode, struct file *filp)
1571{
1572 struct cache_detail *cd = RPC_I(inode)->private;
1573
1574 return open_flush(inode, filp, cd);
1575}
1576
1577static int release_flush_pipefs(struct inode *inode, struct file *filp)
1578{
1579 struct cache_detail *cd = RPC_I(inode)->private;
1580
1581 return release_flush(inode, filp, cd);
1582}
1583
1584static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1585 size_t count, loff_t *ppos)
1586{
1587 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1588
1589 return read_flush(filp, buf, count, ppos, cd);
1590}
1591
1592static ssize_t write_flush_pipefs(struct file *filp,
1593 const char __user *buf,
1594 size_t count, loff_t *ppos)
1595{
1596 struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private;
1597
1598 return write_flush(filp, buf, count, ppos, cd);
1599}
1600
1601const struct file_operations cache_flush_operations_pipefs = {
1602 .open = open_flush_pipefs,
1603 .read = read_flush_pipefs,
1604 .write = write_flush_pipefs,
1605 .release = release_flush_pipefs,
1606};
1607
1608int sunrpc_cache_register_pipefs(struct dentry *parent,
1609 const char *name, mode_t umode,
1610 struct cache_detail *cd)
1611{
1612 struct qstr q;
1613 struct dentry *dir;
1614 int ret = 0;
1615
1616 sunrpc_init_cache_detail(cd);
1617 q.name = name;
1618 q.len = strlen(name);
1619 q.hash = full_name_hash(q.name, q.len);
1620 dir = rpc_create_cache_dir(parent, &q, umode, cd);
1621 if (!IS_ERR(dir))
1622 cd->u.pipefs.dir = dir;
1623 else {
1624 sunrpc_destroy_cache_detail(cd);
1625 ret = PTR_ERR(dir);
1626 }
1627 return ret;
1628}
1629EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1630
1631void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1632{
1633 rpc_remove_cache_dir(cd->u.pipefs.dir);
1634 cd->u.pipefs.dir = NULL;
1635 sunrpc_destroy_cache_detail(cd);
1636}
1637EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1638
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index df1039f077c2..fac0ca93f06b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -27,6 +27,8 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/kallsyms.h> 28#include <linux/kallsyms.h>
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/namei.h>
31#include <linux/mount.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include <linux/utsname.h> 33#include <linux/utsname.h>
32#include <linux/workqueue.h> 34#include <linux/workqueue.h>
@@ -97,33 +99,49 @@ static int
97rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 99rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
98{ 100{
99 static uint32_t clntid; 101 static uint32_t clntid;
102 struct nameidata nd;
103 struct path path;
104 char name[15];
105 struct qstr q = {
106 .name = name,
107 };
100 int error; 108 int error;
101 109
102 clnt->cl_vfsmnt = ERR_PTR(-ENOENT); 110 clnt->cl_path.mnt = ERR_PTR(-ENOENT);
103 clnt->cl_dentry = ERR_PTR(-ENOENT); 111 clnt->cl_path.dentry = ERR_PTR(-ENOENT);
104 if (dir_name == NULL) 112 if (dir_name == NULL)
105 return 0; 113 return 0;
106 114
107 clnt->cl_vfsmnt = rpc_get_mount(); 115 path.mnt = rpc_get_mount();
108 if (IS_ERR(clnt->cl_vfsmnt)) 116 if (IS_ERR(path.mnt))
109 return PTR_ERR(clnt->cl_vfsmnt); 117 return PTR_ERR(path.mnt);
118 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd);
119 if (error)
120 goto err;
110 121
111 for (;;) { 122 for (;;) {
112 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 123 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
113 "%s/clnt%x", dir_name, 124 name[sizeof(name) - 1] = '\0';
114 (unsigned int)clntid++); 125 q.hash = full_name_hash(q.name, q.len);
115 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0'; 126 path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt);
116 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt); 127 if (!IS_ERR(path.dentry))
117 if (!IS_ERR(clnt->cl_dentry)) 128 break;
118 return 0; 129 error = PTR_ERR(path.dentry);
119 error = PTR_ERR(clnt->cl_dentry);
120 if (error != -EEXIST) { 130 if (error != -EEXIST) {
121 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 131 printk(KERN_INFO "RPC: Couldn't create pipefs entry"
122 clnt->cl_pathname, error); 132 " %s/%s, error %d\n",
123 rpc_put_mount(); 133 dir_name, name, error);
124 return error; 134 goto err_path_put;
125 } 135 }
126 } 136 }
137 path_put(&nd.path);
138 clnt->cl_path = path;
139 return 0;
140err_path_put:
141 path_put(&nd.path);
142err:
143 rpc_put_mount();
144 return error;
127} 145}
128 146
129static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) 147static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
@@ -231,8 +249,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru
231 return clnt; 249 return clnt;
232 250
233out_no_auth: 251out_no_auth:
234 if (!IS_ERR(clnt->cl_dentry)) { 252 if (!IS_ERR(clnt->cl_path.dentry)) {
235 rpc_rmdir(clnt->cl_dentry); 253 rpc_remove_client_dir(clnt->cl_path.dentry);
236 rpc_put_mount(); 254 rpc_put_mount();
237 } 255 }
238out_no_path: 256out_no_path:
@@ -423,8 +441,8 @@ rpc_free_client(struct kref *kref)
423 441
424 dprintk("RPC: destroying %s client for %s\n", 442 dprintk("RPC: destroying %s client for %s\n",
425 clnt->cl_protname, clnt->cl_server); 443 clnt->cl_protname, clnt->cl_server);
426 if (!IS_ERR(clnt->cl_dentry)) { 444 if (!IS_ERR(clnt->cl_path.dentry)) {
427 rpc_rmdir(clnt->cl_dentry); 445 rpc_remove_client_dir(clnt->cl_path.dentry);
428 rpc_put_mount(); 446 rpc_put_mount();
429 } 447 }
430 if (clnt->cl_parent != clnt) { 448 if (clnt->cl_parent != clnt) {
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 9ced0628d69c..7f676bdf70d3 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -26,6 +26,7 @@
26#include <linux/sunrpc/clnt.h> 26#include <linux/sunrpc/clnt.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/sunrpc/rpc_pipe_fs.h> 28#include <linux/sunrpc/rpc_pipe_fs.h>
29#include <linux/sunrpc/cache.h>
29 30
30static struct vfsmount *rpc_mount __read_mostly; 31static struct vfsmount *rpc_mount __read_mostly;
31static int rpc_mount_count; 32static int rpc_mount_count;
@@ -125,7 +126,7 @@ static void
125rpc_close_pipes(struct inode *inode) 126rpc_close_pipes(struct inode *inode)
126{ 127{
127 struct rpc_inode *rpci = RPC_I(inode); 128 struct rpc_inode *rpci = RPC_I(inode);
128 struct rpc_pipe_ops *ops; 129 const struct rpc_pipe_ops *ops;
129 int need_release; 130 int need_release;
130 131
131 mutex_lock(&inode->i_mutex); 132 mutex_lock(&inode->i_mutex);
@@ -398,66 +399,12 @@ static const struct file_operations rpc_info_operations = {
398 399
399 400
400/* 401/*
401 * We have a single directory with 1 node in it.
402 */
403enum {
404 RPCAUTH_Root = 1,
405 RPCAUTH_lockd,
406 RPCAUTH_mount,
407 RPCAUTH_nfs,
408 RPCAUTH_portmap,
409 RPCAUTH_statd,
410 RPCAUTH_nfsd4_cb,
411 RPCAUTH_RootEOF
412};
413
414/*
415 * Description of fs contents. 402 * Description of fs contents.
416 */ 403 */
417struct rpc_filelist { 404struct rpc_filelist {
418 char *name; 405 const char *name;
419 const struct file_operations *i_fop; 406 const struct file_operations *i_fop;
420 int mode; 407 umode_t mode;
421};
422
423static struct rpc_filelist files[] = {
424 [RPCAUTH_lockd] = {
425 .name = "lockd",
426 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
427 },
428 [RPCAUTH_mount] = {
429 .name = "mount",
430 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
431 },
432 [RPCAUTH_nfs] = {
433 .name = "nfs",
434 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
435 },
436 [RPCAUTH_portmap] = {
437 .name = "portmap",
438 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
439 },
440 [RPCAUTH_statd] = {
441 .name = "statd",
442 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
443 },
444 [RPCAUTH_nfsd4_cb] = {
445 .name = "nfsd4_cb",
446 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
447 },
448};
449
450enum {
451 RPCAUTH_info = 2,
452 RPCAUTH_EOF
453};
454
455static struct rpc_filelist authfiles[] = {
456 [RPCAUTH_info] = {
457 .name = "info",
458 .i_fop = &rpc_info_operations,
459 .mode = S_IFREG | S_IRUSR,
460 },
461}; 408};
462 409
463struct vfsmount *rpc_get_mount(void) 410struct vfsmount *rpc_get_mount(void)
@@ -469,11 +416,13 @@ struct vfsmount *rpc_get_mount(void)
469 return ERR_PTR(err); 416 return ERR_PTR(err);
470 return rpc_mount; 417 return rpc_mount;
471} 418}
419EXPORT_SYMBOL_GPL(rpc_get_mount);
472 420
473void rpc_put_mount(void) 421void rpc_put_mount(void)
474{ 422{
475 simple_release_fs(&rpc_mount, &rpc_mount_count); 423 simple_release_fs(&rpc_mount, &rpc_mount_count);
476} 424}
425EXPORT_SYMBOL_GPL(rpc_put_mount);
477 426
478static int rpc_delete_dentry(struct dentry *dentry) 427static int rpc_delete_dentry(struct dentry *dentry)
479{ 428{
@@ -484,39 +433,8 @@ static const struct dentry_operations rpc_dentry_operations = {
484 .d_delete = rpc_delete_dentry, 433 .d_delete = rpc_delete_dentry,
485}; 434};
486 435
487static int
488rpc_lookup_parent(char *path, struct nameidata *nd)
489{
490 struct vfsmount *mnt;
491
492 if (path[0] == '\0')
493 return -ENOENT;
494
495 mnt = rpc_get_mount();
496 if (IS_ERR(mnt)) {
497 printk(KERN_WARNING "%s: %s failed to mount "
498 "pseudofilesystem \n", __FILE__, __func__);
499 return PTR_ERR(mnt);
500 }
501
502 if (vfs_path_lookup(mnt->mnt_root, mnt, path, LOOKUP_PARENT, nd)) {
503 printk(KERN_WARNING "%s: %s failed to find path %s\n",
504 __FILE__, __func__, path);
505 rpc_put_mount();
506 return -ENOENT;
507 }
508 return 0;
509}
510
511static void
512rpc_release_path(struct nameidata *nd)
513{
514 path_put(&nd->path);
515 rpc_put_mount();
516}
517
518static struct inode * 436static struct inode *
519rpc_get_inode(struct super_block *sb, int mode) 437rpc_get_inode(struct super_block *sb, umode_t mode)
520{ 438{
521 struct inode *inode = new_inode(sb); 439 struct inode *inode = new_inode(sb);
522 if (!inode) 440 if (!inode)
@@ -534,212 +452,274 @@ rpc_get_inode(struct super_block *sb, int mode)
534 return inode; 452 return inode;
535} 453}
536 454
537/* 455static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
538 * FIXME: This probably has races. 456 umode_t mode,
539 */ 457 const struct file_operations *i_fop,
540static void rpc_depopulate(struct dentry *parent, 458 void *private)
541 unsigned long start, unsigned long eof)
542{ 459{
543 struct inode *dir = parent->d_inode; 460 struct inode *inode;
544 struct list_head *pos, *next;
545 struct dentry *dentry, *dvec[10];
546 int n = 0;
547 461
548 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD); 462 BUG_ON(!d_unhashed(dentry));
549repeat: 463 inode = rpc_get_inode(dir->i_sb, mode);
550 spin_lock(&dcache_lock); 464 if (!inode)
551 list_for_each_safe(pos, next, &parent->d_subdirs) { 465 goto out_err;
552 dentry = list_entry(pos, struct dentry, d_u.d_child); 466 inode->i_ino = iunique(dir->i_sb, 100);
553 if (!dentry->d_inode || 467 if (i_fop)
554 dentry->d_inode->i_ino < start || 468 inode->i_fop = i_fop;
555 dentry->d_inode->i_ino >= eof) 469 if (private)
556 continue; 470 rpc_inode_setowner(inode, private);
557 spin_lock(&dentry->d_lock); 471 d_add(dentry, inode);
558 if (!d_unhashed(dentry)) { 472 return 0;
559 dget_locked(dentry); 473out_err:
560 __d_drop(dentry); 474 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
561 spin_unlock(&dentry->d_lock); 475 __FILE__, __func__, dentry->d_name.name);
562 dvec[n++] = dentry; 476 dput(dentry);
563 if (n == ARRAY_SIZE(dvec)) 477 return -ENOMEM;
564 break;
565 } else
566 spin_unlock(&dentry->d_lock);
567 }
568 spin_unlock(&dcache_lock);
569 if (n) {
570 do {
571 dentry = dvec[--n];
572 if (S_ISREG(dentry->d_inode->i_mode))
573 simple_unlink(dir, dentry);
574 else if (S_ISDIR(dentry->d_inode->i_mode))
575 simple_rmdir(dir, dentry);
576 d_delete(dentry);
577 dput(dentry);
578 } while (n);
579 goto repeat;
580 }
581 mutex_unlock(&dir->i_mutex);
582} 478}
583 479
584static int 480static int __rpc_create(struct inode *dir, struct dentry *dentry,
585rpc_populate(struct dentry *parent, 481 umode_t mode,
586 struct rpc_filelist *files, 482 const struct file_operations *i_fop,
587 int start, int eof) 483 void *private)
588{ 484{
589 struct inode *inode, *dir = parent->d_inode; 485 int err;
590 void *private = RPC_I(dir)->private;
591 struct dentry *dentry;
592 int mode, i;
593 486
594 mutex_lock(&dir->i_mutex); 487 err = __rpc_create_common(dir, dentry, S_IFREG | mode, i_fop, private);
595 for (i = start; i < eof; i++) { 488 if (err)
596 dentry = d_alloc_name(parent, files[i].name); 489 return err;
597 if (!dentry) 490 fsnotify_create(dir, dentry);
598 goto out_bad;
599 dentry->d_op = &rpc_dentry_operations;
600 mode = files[i].mode;
601 inode = rpc_get_inode(dir->i_sb, mode);
602 if (!inode) {
603 dput(dentry);
604 goto out_bad;
605 }
606 inode->i_ino = i;
607 if (files[i].i_fop)
608 inode->i_fop = files[i].i_fop;
609 if (private)
610 rpc_inode_setowner(inode, private);
611 if (S_ISDIR(mode))
612 inc_nlink(dir);
613 d_add(dentry, inode);
614 fsnotify_create(dir, dentry);
615 }
616 mutex_unlock(&dir->i_mutex);
617 return 0; 491 return 0;
618out_bad:
619 mutex_unlock(&dir->i_mutex);
620 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
621 __FILE__, __func__, parent->d_name.name);
622 return -ENOMEM;
623} 492}
624 493
625static int 494static int __rpc_mkdir(struct inode *dir, struct dentry *dentry,
626__rpc_mkdir(struct inode *dir, struct dentry *dentry) 495 umode_t mode,
496 const struct file_operations *i_fop,
497 void *private)
627{ 498{
628 struct inode *inode; 499 int err;
629 500
630 inode = rpc_get_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); 501 err = __rpc_create_common(dir, dentry, S_IFDIR | mode, i_fop, private);
631 if (!inode) 502 if (err)
632 goto out_err; 503 return err;
633 inode->i_ino = iunique(dir->i_sb, 100);
634 d_instantiate(dentry, inode);
635 inc_nlink(dir); 504 inc_nlink(dir);
636 fsnotify_mkdir(dir, dentry); 505 fsnotify_mkdir(dir, dentry);
637 return 0; 506 return 0;
638out_err:
639 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
640 __FILE__, __func__, dentry->d_name.name);
641 return -ENOMEM;
642} 507}
643 508
644static int 509static int __rpc_mkpipe(struct inode *dir, struct dentry *dentry,
645__rpc_rmdir(struct inode *dir, struct dentry *dentry) 510 umode_t mode,
511 const struct file_operations *i_fop,
512 void *private,
513 const struct rpc_pipe_ops *ops,
514 int flags)
646{ 515{
647 int error; 516 struct rpc_inode *rpci;
648 error = simple_rmdir(dir, dentry); 517 int err;
649 if (!error) 518
650 d_delete(dentry); 519 err = __rpc_create_common(dir, dentry, S_IFIFO | mode, i_fop, private);
651 return error; 520 if (err)
521 return err;
522 rpci = RPC_I(dentry->d_inode);
523 rpci->nkern_readwriters = 1;
524 rpci->private = private;
525 rpci->flags = flags;
526 rpci->ops = ops;
527 fsnotify_create(dir, dentry);
528 return 0;
652} 529}
653 530
654static struct dentry * 531static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
655rpc_lookup_create(struct dentry *parent, const char *name, int len, int exclusive) 532{
533 int ret;
534
535 dget(dentry);
536 ret = simple_rmdir(dir, dentry);
537 d_delete(dentry);
538 dput(dentry);
539 return ret;
540}
541
542static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
543{
544 int ret;
545
546 dget(dentry);
547 ret = simple_unlink(dir, dentry);
548 d_delete(dentry);
549 dput(dentry);
550 return ret;
551}
552
553static int __rpc_rmpipe(struct inode *dir, struct dentry *dentry)
554{
555 struct inode *inode = dentry->d_inode;
556 struct rpc_inode *rpci = RPC_I(inode);
557
558 rpci->nkern_readwriters--;
559 if (rpci->nkern_readwriters != 0)
560 return 0;
561 rpc_close_pipes(inode);
562 return __rpc_unlink(dir, dentry);
563}
564
565static struct dentry *__rpc_lookup_create(struct dentry *parent,
566 struct qstr *name)
656{ 567{
657 struct inode *dir = parent->d_inode;
658 struct dentry *dentry; 568 struct dentry *dentry;
659 569
660 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 570 dentry = d_lookup(parent, name);
661 dentry = lookup_one_len(name, parent, len); 571 if (!dentry) {
662 if (IS_ERR(dentry)) 572 dentry = d_alloc(parent, name);
663 goto out_err; 573 if (!dentry) {
574 dentry = ERR_PTR(-ENOMEM);
575 goto out_err;
576 }
577 }
664 if (!dentry->d_inode) 578 if (!dentry->d_inode)
665 dentry->d_op = &rpc_dentry_operations; 579 dentry->d_op = &rpc_dentry_operations;
666 else if (exclusive) {
667 dput(dentry);
668 dentry = ERR_PTR(-EEXIST);
669 goto out_err;
670 }
671 return dentry;
672out_err: 580out_err:
673 mutex_unlock(&dir->i_mutex);
674 return dentry; 581 return dentry;
675} 582}
676 583
677static struct dentry * 584static struct dentry *__rpc_lookup_create_exclusive(struct dentry *parent,
678rpc_lookup_negative(char *path, struct nameidata *nd) 585 struct qstr *name)
679{ 586{
680 struct dentry *dentry; 587 struct dentry *dentry;
681 int error;
682 588
683 if ((error = rpc_lookup_parent(path, nd)) != 0) 589 dentry = __rpc_lookup_create(parent, name);
684 return ERR_PTR(error); 590 if (dentry->d_inode == NULL)
685 dentry = rpc_lookup_create(nd->path.dentry, nd->last.name, nd->last.len, 591 return dentry;
686 1); 592 dput(dentry);
687 if (IS_ERR(dentry)) 593 return ERR_PTR(-EEXIST);
688 rpc_release_path(nd);
689 return dentry;
690} 594}
691 595
692/** 596/*
693 * rpc_mkdir - Create a new directory in rpc_pipefs 597 * FIXME: This probably has races.
694 * @path: path from the rpc_pipefs root to the new directory
695 * @rpc_client: rpc client to associate with this directory
696 *
697 * This creates a directory at the given @path associated with
698 * @rpc_clnt, which will contain a file named "info" with some basic
699 * information about the client, together with any "pipes" that may
700 * later be created using rpc_mkpipe().
701 */ 598 */
702struct dentry * 599static void __rpc_depopulate(struct dentry *parent,
703rpc_mkdir(char *path, struct rpc_clnt *rpc_client) 600 const struct rpc_filelist *files,
601 int start, int eof)
704{ 602{
705 struct nameidata nd; 603 struct inode *dir = parent->d_inode;
706 struct dentry *dentry; 604 struct dentry *dentry;
707 struct inode *dir; 605 struct qstr name;
606 int i;
607
608 for (i = start; i < eof; i++) {
609 name.name = files[i].name;
610 name.len = strlen(files[i].name);
611 name.hash = full_name_hash(name.name, name.len);
612 dentry = d_lookup(parent, &name);
613
614 if (dentry == NULL)
615 continue;
616 if (dentry->d_inode == NULL)
617 goto next;
618 switch (dentry->d_inode->i_mode & S_IFMT) {
619 default:
620 BUG();
621 case S_IFREG:
622 __rpc_unlink(dir, dentry);
623 break;
624 case S_IFDIR:
625 __rpc_rmdir(dir, dentry);
626 }
627next:
628 dput(dentry);
629 }
630}
631
632static void rpc_depopulate(struct dentry *parent,
633 const struct rpc_filelist *files,
634 int start, int eof)
635{
636 struct inode *dir = parent->d_inode;
637
638 mutex_lock_nested(&dir->i_mutex, I_MUTEX_CHILD);
639 __rpc_depopulate(parent, files, start, eof);
640 mutex_unlock(&dir->i_mutex);
641}
642
643static int rpc_populate(struct dentry *parent,
644 const struct rpc_filelist *files,
645 int start, int eof,
646 void *private)
647{
648 struct inode *dir = parent->d_inode;
649 struct dentry *dentry;
650 int i, err;
651
652 mutex_lock(&dir->i_mutex);
653 for (i = start; i < eof; i++) {
654 struct qstr q;
655
656 q.name = files[i].name;
657 q.len = strlen(files[i].name);
658 q.hash = full_name_hash(q.name, q.len);
659 dentry = __rpc_lookup_create_exclusive(parent, &q);
660 err = PTR_ERR(dentry);
661 if (IS_ERR(dentry))
662 goto out_bad;
663 switch (files[i].mode & S_IFMT) {
664 default:
665 BUG();
666 case S_IFREG:
667 err = __rpc_create(dir, dentry,
668 files[i].mode,
669 files[i].i_fop,
670 private);
671 break;
672 case S_IFDIR:
673 err = __rpc_mkdir(dir, dentry,
674 files[i].mode,
675 NULL,
676 private);
677 }
678 if (err != 0)
679 goto out_bad;
680 }
681 mutex_unlock(&dir->i_mutex);
682 return 0;
683out_bad:
684 __rpc_depopulate(parent, files, start, eof);
685 mutex_unlock(&dir->i_mutex);
686 printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
687 __FILE__, __func__, parent->d_name.name);
688 return err;
689}
690
691static struct dentry *rpc_mkdir_populate(struct dentry *parent,
692 struct qstr *name, umode_t mode, void *private,
693 int (*populate)(struct dentry *, void *), void *args_populate)
694{
695 struct dentry *dentry;
696 struct inode *dir = parent->d_inode;
708 int error; 697 int error;
709 698
710 dentry = rpc_lookup_negative(path, &nd); 699 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
700 dentry = __rpc_lookup_create_exclusive(parent, name);
711 if (IS_ERR(dentry)) 701 if (IS_ERR(dentry))
712 return dentry; 702 goto out;
713 dir = nd.path.dentry->d_inode; 703 error = __rpc_mkdir(dir, dentry, mode, NULL, private);
714 if ((error = __rpc_mkdir(dir, dentry)) != 0) 704 if (error != 0)
715 goto err_dput; 705 goto out_err;
716 RPC_I(dentry->d_inode)->private = rpc_client; 706 if (populate != NULL) {
717 error = rpc_populate(dentry, authfiles, 707 error = populate(dentry, args_populate);
718 RPCAUTH_info, RPCAUTH_EOF); 708 if (error)
719 if (error) 709 goto err_rmdir;
720 goto err_depopulate; 710 }
721 dget(dentry);
722out: 711out:
723 mutex_unlock(&dir->i_mutex); 712 mutex_unlock(&dir->i_mutex);
724 rpc_release_path(&nd);
725 return dentry; 713 return dentry;
726err_depopulate: 714err_rmdir:
727 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF);
728 __rpc_rmdir(dir, dentry); 715 __rpc_rmdir(dir, dentry);
729err_dput: 716out_err:
730 dput(dentry);
731 printk(KERN_WARNING "%s: %s() failed to create directory %s (errno = %d)\n",
732 __FILE__, __func__, path, error);
733 dentry = ERR_PTR(error); 717 dentry = ERR_PTR(error);
734 goto out; 718 goto out;
735} 719}
736 720
737/** 721static int rpc_rmdir_depopulate(struct dentry *dentry,
738 * rpc_rmdir - Remove a directory created with rpc_mkdir() 722 void (*depopulate)(struct dentry *))
739 * @dentry: directory to remove
740 */
741int
742rpc_rmdir(struct dentry *dentry)
743{ 723{
744 struct dentry *parent; 724 struct dentry *parent;
745 struct inode *dir; 725 struct inode *dir;
@@ -748,9 +728,9 @@ rpc_rmdir(struct dentry *dentry)
748 parent = dget_parent(dentry); 728 parent = dget_parent(dentry);
749 dir = parent->d_inode; 729 dir = parent->d_inode;
750 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 730 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
751 rpc_depopulate(dentry, RPCAUTH_info, RPCAUTH_EOF); 731 if (depopulate != NULL)
732 depopulate(dentry);
752 error = __rpc_rmdir(dir, dentry); 733 error = __rpc_rmdir(dir, dentry);
753 dput(dentry);
754 mutex_unlock(&dir->i_mutex); 734 mutex_unlock(&dir->i_mutex);
755 dput(parent); 735 dput(parent);
756 return error; 736 return error;
@@ -776,50 +756,54 @@ rpc_rmdir(struct dentry *dentry)
776 * The @private argument passed here will be available to all these methods 756 * The @private argument passed here will be available to all these methods
777 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private. 757 * from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
778 */ 758 */
779struct dentry * 759struct dentry *rpc_mkpipe(struct dentry *parent, const char *name,
780rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags) 760 void *private, const struct rpc_pipe_ops *ops,
761 int flags)
781{ 762{
782 struct dentry *dentry; 763 struct dentry *dentry;
783 struct inode *dir, *inode; 764 struct inode *dir = parent->d_inode;
784 struct rpc_inode *rpci; 765 umode_t umode = S_IFIFO | S_IRUSR | S_IWUSR;
766 struct qstr q;
767 int err;
768
769 if (ops->upcall == NULL)
770 umode &= ~S_IRUGO;
771 if (ops->downcall == NULL)
772 umode &= ~S_IWUGO;
773
774 q.name = name;
775 q.len = strlen(name);
776 q.hash = full_name_hash(q.name, q.len),
785 777
786 dentry = rpc_lookup_create(parent, name, strlen(name), 0); 778 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
779 dentry = __rpc_lookup_create(parent, &q);
787 if (IS_ERR(dentry)) 780 if (IS_ERR(dentry))
788 return dentry; 781 goto out;
789 dir = parent->d_inode;
790 if (dentry->d_inode) { 782 if (dentry->d_inode) {
791 rpci = RPC_I(dentry->d_inode); 783 struct rpc_inode *rpci = RPC_I(dentry->d_inode);
792 if (rpci->private != private || 784 if (rpci->private != private ||
793 rpci->ops != ops || 785 rpci->ops != ops ||
794 rpci->flags != flags) { 786 rpci->flags != flags) {
795 dput (dentry); 787 dput (dentry);
796 dentry = ERR_PTR(-EBUSY); 788 err = -EBUSY;
789 goto out_err;
797 } 790 }
798 rpci->nkern_readwriters++; 791 rpci->nkern_readwriters++;
799 goto out; 792 goto out;
800 } 793 }
801 inode = rpc_get_inode(dir->i_sb, S_IFIFO | S_IRUSR | S_IWUSR); 794
802 if (!inode) 795 err = __rpc_mkpipe(dir, dentry, umode, &rpc_pipe_fops,
803 goto err_dput; 796 private, ops, flags);
804 inode->i_ino = iunique(dir->i_sb, 100); 797 if (err)
805 inode->i_fop = &rpc_pipe_fops; 798 goto out_err;
806 d_instantiate(dentry, inode);
807 rpci = RPC_I(inode);
808 rpci->private = private;
809 rpci->flags = flags;
810 rpci->ops = ops;
811 rpci->nkern_readwriters = 1;
812 fsnotify_create(dir, dentry);
813 dget(dentry);
814out: 799out:
815 mutex_unlock(&dir->i_mutex); 800 mutex_unlock(&dir->i_mutex);
816 return dentry; 801 return dentry;
817err_dput: 802out_err:
818 dput(dentry); 803 dentry = ERR_PTR(err);
819 dentry = ERR_PTR(-ENOMEM);
820 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n", 804 printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
821 __FILE__, __func__, parent->d_name.name, name, 805 __FILE__, __func__, parent->d_name.name, name,
822 -ENOMEM); 806 err);
823 goto out; 807 goto out;
824} 808}
825EXPORT_SYMBOL_GPL(rpc_mkpipe); 809EXPORT_SYMBOL_GPL(rpc_mkpipe);
@@ -842,19 +826,107 @@ rpc_unlink(struct dentry *dentry)
842 parent = dget_parent(dentry); 826 parent = dget_parent(dentry);
843 dir = parent->d_inode; 827 dir = parent->d_inode;
844 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); 828 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
845 if (--RPC_I(dentry->d_inode)->nkern_readwriters == 0) { 829 error = __rpc_rmpipe(dir, dentry);
846 rpc_close_pipes(dentry->d_inode);
847 error = simple_unlink(dir, dentry);
848 if (!error)
849 d_delete(dentry);
850 }
851 dput(dentry);
852 mutex_unlock(&dir->i_mutex); 830 mutex_unlock(&dir->i_mutex);
853 dput(parent); 831 dput(parent);
854 return error; 832 return error;
855} 833}
856EXPORT_SYMBOL_GPL(rpc_unlink); 834EXPORT_SYMBOL_GPL(rpc_unlink);
857 835
836enum {
837 RPCAUTH_info,
838 RPCAUTH_EOF
839};
840
841static const struct rpc_filelist authfiles[] = {
842 [RPCAUTH_info] = {
843 .name = "info",
844 .i_fop = &rpc_info_operations,
845 .mode = S_IFREG | S_IRUSR,
846 },
847};
848
849static int rpc_clntdir_populate(struct dentry *dentry, void *private)
850{
851 return rpc_populate(dentry,
852 authfiles, RPCAUTH_info, RPCAUTH_EOF,
853 private);
854}
855
856static void rpc_clntdir_depopulate(struct dentry *dentry)
857{
858 rpc_depopulate(dentry, authfiles, RPCAUTH_info, RPCAUTH_EOF);
859}
860
861/**
862 * rpc_create_client_dir - Create a new rpc_client directory in rpc_pipefs
863 * @path: path from the rpc_pipefs root to the new directory
864 * @rpc_client: rpc client to associate with this directory
865 *
866 * This creates a directory at the given @path associated with
867 * @rpc_clnt, which will contain a file named "info" with some basic
868 * information about the client, together with any "pipes" that may
869 * later be created using rpc_mkpipe().
870 */
871struct dentry *rpc_create_client_dir(struct dentry *dentry,
872 struct qstr *name,
873 struct rpc_clnt *rpc_client)
874{
875 return rpc_mkdir_populate(dentry, name, S_IRUGO | S_IXUGO, NULL,
876 rpc_clntdir_populate, rpc_client);
877}
878
879/**
880 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
881 * @dentry: directory to remove
882 */
883int rpc_remove_client_dir(struct dentry *dentry)
884{
885 return rpc_rmdir_depopulate(dentry, rpc_clntdir_depopulate);
886}
887
888static const struct rpc_filelist cache_pipefs_files[3] = {
889 [0] = {
890 .name = "channel",
891 .i_fop = &cache_file_operations_pipefs,
892 .mode = S_IFREG|S_IRUSR|S_IWUSR,
893 },
894 [1] = {
895 .name = "content",
896 .i_fop = &content_file_operations_pipefs,
897 .mode = S_IFREG|S_IRUSR,
898 },
899 [2] = {
900 .name = "flush",
901 .i_fop = &cache_flush_operations_pipefs,
902 .mode = S_IFREG|S_IRUSR|S_IWUSR,
903 },
904};
905
906static int rpc_cachedir_populate(struct dentry *dentry, void *private)
907{
908 return rpc_populate(dentry,
909 cache_pipefs_files, 0, 3,
910 private);
911}
912
913static void rpc_cachedir_depopulate(struct dentry *dentry)
914{
915 rpc_depopulate(dentry, cache_pipefs_files, 0, 3);
916}
917
918struct dentry *rpc_create_cache_dir(struct dentry *parent, struct qstr *name,
919 mode_t umode, struct cache_detail *cd)
920{
921 return rpc_mkdir_populate(parent, name, umode, NULL,
922 rpc_cachedir_populate, cd);
923}
924
925void rpc_remove_cache_dir(struct dentry *dentry)
926{
927 rpc_rmdir_depopulate(dentry, rpc_cachedir_depopulate);
928}
929
858/* 930/*
859 * populate the filesystem 931 * populate the filesystem
860 */ 932 */
@@ -866,6 +938,51 @@ static struct super_operations s_ops = {
866 938
867#define RPCAUTH_GSSMAGIC 0x67596969 939#define RPCAUTH_GSSMAGIC 0x67596969
868 940
941/*
942 * We have a single directory with 1 node in it.
943 */
944enum {
945 RPCAUTH_lockd,
946 RPCAUTH_mount,
947 RPCAUTH_nfs,
948 RPCAUTH_portmap,
949 RPCAUTH_statd,
950 RPCAUTH_nfsd4_cb,
951 RPCAUTH_cache,
952 RPCAUTH_RootEOF
953};
954
955static const struct rpc_filelist files[] = {
956 [RPCAUTH_lockd] = {
957 .name = "lockd",
958 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
959 },
960 [RPCAUTH_mount] = {
961 .name = "mount",
962 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
963 },
964 [RPCAUTH_nfs] = {
965 .name = "nfs",
966 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
967 },
968 [RPCAUTH_portmap] = {
969 .name = "portmap",
970 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
971 },
972 [RPCAUTH_statd] = {
973 .name = "statd",
974 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
975 },
976 [RPCAUTH_nfsd4_cb] = {
977 .name = "nfsd4_cb",
978 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
979 },
980 [RPCAUTH_cache] = {
981 .name = "cache",
982 .mode = S_IFDIR | S_IRUGO | S_IXUGO,
983 },
984};
985
869static int 986static int
870rpc_fill_super(struct super_block *sb, void *data, int silent) 987rpc_fill_super(struct super_block *sb, void *data, int silent)
871{ 988{
@@ -886,7 +1003,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
886 iput(inode); 1003 iput(inode);
887 return -ENOMEM; 1004 return -ENOMEM;
888 } 1005 }
889 if (rpc_populate(root, files, RPCAUTH_Root + 1, RPCAUTH_RootEOF)) 1006 if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
890 goto out; 1007 goto out;
891 sb->s_root = root; 1008 sb->s_root = root;
892 return 0; 1009 return 0;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index beee6da33035..830faf4d9997 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -75,6 +75,37 @@ enum {
75#define RPCB_OWNER_STRING "0" 75#define RPCB_OWNER_STRING "0"
76#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING) 76#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING)
77 77
78/*
79 * XDR data type sizes
80 */
81#define RPCB_program_sz (1)
82#define RPCB_version_sz (1)
83#define RPCB_protocol_sz (1)
84#define RPCB_port_sz (1)
85#define RPCB_boolean_sz (1)
86
87#define RPCB_netid_sz (1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
88#define RPCB_addr_sz (1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
89#define RPCB_ownerstring_sz (1 + XDR_QUADLEN(RPCB_MAXOWNERLEN))
90
91/*
92 * XDR argument and result sizes
93 */
94#define RPCB_mappingargs_sz (RPCB_program_sz + RPCB_version_sz + \
95 RPCB_protocol_sz + RPCB_port_sz)
96#define RPCB_getaddrargs_sz (RPCB_program_sz + RPCB_version_sz + \
97 RPCB_netid_sz + RPCB_addr_sz + \
98 RPCB_ownerstring_sz)
99
100#define RPCB_getportres_sz RPCB_port_sz
101#define RPCB_setres_sz RPCB_boolean_sz
102
103/*
104 * Note that RFC 1833 does not put any size restrictions on the
105 * address string returned by the remote rpcbind database.
106 */
107#define RPCB_getaddrres_sz RPCB_addr_sz
108
78static void rpcb_getport_done(struct rpc_task *, void *); 109static void rpcb_getport_done(struct rpc_task *, void *);
79static void rpcb_map_release(void *data); 110static void rpcb_map_release(void *data);
80static struct rpc_program rpcb_program; 111static struct rpc_program rpcb_program;
@@ -122,6 +153,7 @@ static void rpcb_map_release(void *data)
122 153
123 rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status); 154 rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status);
124 xprt_put(map->r_xprt); 155 xprt_put(map->r_xprt);
156 kfree(map->r_addr);
125 kfree(map); 157 kfree(map);
126} 158}
127 159
@@ -268,12 +300,9 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
268 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; 300 const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
269 struct rpcbind_args *map = msg->rpc_argp; 301 struct rpcbind_args *map = msg->rpc_argp;
270 unsigned short port = ntohs(sin->sin_port); 302 unsigned short port = ntohs(sin->sin_port);
271 char buf[32]; 303 int result;
272 304
273 /* Construct AF_INET universal address */ 305 map->r_addr = rpc_sockaddr2uaddr(sap);
274 snprintf(buf, sizeof(buf), "%pI4.%u.%u",
275 &sin->sin_addr.s_addr, port >> 8, port & 0xff);
276 map->r_addr = buf;
277 306
278 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with " 307 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
279 "local rpcbind\n", (port ? "" : "un"), 308 "local rpcbind\n", (port ? "" : "un"),
@@ -284,7 +313,9 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
284 if (port) 313 if (port)
285 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 314 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
286 315
287 return rpcb_register_call(RPCBVERS_4, msg); 316 result = rpcb_register_call(RPCBVERS_4, msg);
317 kfree(map->r_addr);
318 return result;
288} 319}
289 320
290/* 321/*
@@ -296,16 +327,9 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
296 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; 327 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
297 struct rpcbind_args *map = msg->rpc_argp; 328 struct rpcbind_args *map = msg->rpc_argp;
298 unsigned short port = ntohs(sin6->sin6_port); 329 unsigned short port = ntohs(sin6->sin6_port);
299 char buf[64]; 330 int result;
300 331
301 /* Construct AF_INET6 universal address */ 332 map->r_addr = rpc_sockaddr2uaddr(sap);
302 if (ipv6_addr_any(&sin6->sin6_addr))
303 snprintf(buf, sizeof(buf), "::.%u.%u",
304 port >> 8, port & 0xff);
305 else
306 snprintf(buf, sizeof(buf), "%pI6.%u.%u",
307 &sin6->sin6_addr, port >> 8, port & 0xff);
308 map->r_addr = buf;
309 333
310 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with " 334 dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
311 "local rpcbind\n", (port ? "" : "un"), 335 "local rpcbind\n", (port ? "" : "un"),
@@ -316,7 +340,9 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
316 if (port) 340 if (port)
317 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; 341 msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
318 342
319 return rpcb_register_call(RPCBVERS_4, msg); 343 result = rpcb_register_call(RPCBVERS_4, msg);
344 kfree(map->r_addr);
345 return result;
320} 346}
321 347
322static int rpcb_unregister_all_protofamilies(struct rpc_message *msg) 348static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
@@ -428,7 +454,7 @@ int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
428 struct rpc_message msg = { 454 struct rpc_message msg = {
429 .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT], 455 .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
430 .rpc_argp = &map, 456 .rpc_argp = &map,
431 .rpc_resp = &map.r_port, 457 .rpc_resp = &map,
432 }; 458 };
433 struct rpc_clnt *rpcb_clnt; 459 struct rpc_clnt *rpcb_clnt;
434 int status; 460 int status;
@@ -458,7 +484,7 @@ static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbi
458 struct rpc_message msg = { 484 struct rpc_message msg = {
459 .rpc_proc = proc, 485 .rpc_proc = proc,
460 .rpc_argp = map, 486 .rpc_argp = map,
461 .rpc_resp = &map->r_port, 487 .rpc_resp = map,
462 }; 488 };
463 struct rpc_task_setup task_setup_data = { 489 struct rpc_task_setup task_setup_data = {
464 .rpc_client = rpcb_clnt, 490 .rpc_client = rpcb_clnt,
@@ -539,6 +565,7 @@ void rpcb_getport_async(struct rpc_task *task)
539 goto bailout_nofree; 565 goto bailout_nofree;
540 } 566 }
541 567
568 /* Parent transport's destination address */
542 salen = rpc_peeraddr(clnt, sap, sizeof(addr)); 569 salen = rpc_peeraddr(clnt, sap, sizeof(addr));
543 570
544 /* Don't ever use rpcbind v2 for AF_INET6 requests */ 571 /* Don't ever use rpcbind v2 for AF_INET6 requests */
@@ -589,11 +616,22 @@ void rpcb_getport_async(struct rpc_task *task)
589 map->r_prot = xprt->prot; 616 map->r_prot = xprt->prot;
590 map->r_port = 0; 617 map->r_port = 0;
591 map->r_xprt = xprt_get(xprt); 618 map->r_xprt = xprt_get(xprt);
592 map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
593 map->r_addr = rpc_peeraddr2str(rpcb_clnt, RPC_DISPLAY_UNIVERSAL_ADDR);
594 map->r_owner = "";
595 map->r_status = -EIO; 619 map->r_status = -EIO;
596 620
621 switch (bind_version) {
622 case RPCBVERS_4:
623 case RPCBVERS_3:
624 map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
625 map->r_addr = rpc_sockaddr2uaddr(sap);
626 map->r_owner = "";
627 break;
628 case RPCBVERS_2:
629 map->r_addr = NULL;
630 break;
631 default:
632 BUG();
633 }
634
597 child = rpcb_call_async(rpcb_clnt, map, proc); 635 child = rpcb_call_async(rpcb_clnt, map, proc);
598 rpc_release_client(rpcb_clnt); 636 rpc_release_client(rpcb_clnt);
599 if (IS_ERR(child)) { 637 if (IS_ERR(child)) {
@@ -656,176 +694,278 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
656 * XDR functions for rpcbind 694 * XDR functions for rpcbind
657 */ 695 */
658 696
659static int rpcb_encode_mapping(struct rpc_rqst *req, __be32 *p, 697static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
660 struct rpcbind_args *rpcb) 698 const struct rpcbind_args *rpcb)
661{ 699{
662 dprintk("RPC: encoding rpcb request (%u, %u, %d, %u)\n", 700 struct rpc_task *task = req->rq_task;
701 struct xdr_stream xdr;
702
703 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
704 task->tk_pid, task->tk_msg.rpc_proc->p_name,
663 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); 705 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
706
707 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
708
709 p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
710 if (unlikely(p == NULL))
711 return -EIO;
712
664 *p++ = htonl(rpcb->r_prog); 713 *p++ = htonl(rpcb->r_prog);
665 *p++ = htonl(rpcb->r_vers); 714 *p++ = htonl(rpcb->r_vers);
666 *p++ = htonl(rpcb->r_prot); 715 *p++ = htonl(rpcb->r_prot);
667 *p++ = htonl(rpcb->r_port); 716 *p = htonl(rpcb->r_port);
668 717
669 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
670 return 0; 718 return 0;
671} 719}
672 720
673static int rpcb_decode_getport(struct rpc_rqst *req, __be32 *p, 721static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
674 unsigned short *portp) 722 struct rpcbind_args *rpcb)
675{ 723{
676 *portp = (unsigned short) ntohl(*p++); 724 struct rpc_task *task = req->rq_task;
677 dprintk("RPC: rpcb getport result: %u\n", 725 struct xdr_stream xdr;
678 *portp); 726 unsigned long port;
727
728 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
729
730 rpcb->r_port = 0;
731
732 p = xdr_inline_decode(&xdr, sizeof(__be32));
733 if (unlikely(p == NULL))
734 return -EIO;
735
736 port = ntohl(*p);
737 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
738 task->tk_msg.rpc_proc->p_name, port);
739 if (unlikely(port > USHORT_MAX))
740 return -EIO;
741
742 rpcb->r_port = port;
679 return 0; 743 return 0;
680} 744}
681 745
682static int rpcb_decode_set(struct rpc_rqst *req, __be32 *p, 746static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
683 unsigned int *boolp) 747 unsigned int *boolp)
684{ 748{
685 *boolp = (unsigned int) ntohl(*p++); 749 struct rpc_task *task = req->rq_task;
686 dprintk("RPC: rpcb set/unset call %s\n", 750 struct xdr_stream xdr;
751
752 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
753
754 p = xdr_inline_decode(&xdr, sizeof(__be32));
755 if (unlikely(p == NULL))
756 return -EIO;
757
758 *boolp = 0;
759 if (*p)
760 *boolp = 1;
761
762 dprintk("RPC: %5u RPCB_%s call %s\n",
763 task->tk_pid, task->tk_msg.rpc_proc->p_name,
687 (*boolp ? "succeeded" : "failed")); 764 (*boolp ? "succeeded" : "failed"));
688 return 0; 765 return 0;
689} 766}
690 767
691static int rpcb_encode_getaddr(struct rpc_rqst *req, __be32 *p, 768static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
692 struct rpcbind_args *rpcb) 769 const u32 maxstrlen)
693{ 770{
694 dprintk("RPC: encoding rpcb request (%u, %u, %s)\n", 771 u32 len;
695 rpcb->r_prog, rpcb->r_vers, rpcb->r_addr); 772 __be32 *p;
696 *p++ = htonl(rpcb->r_prog);
697 *p++ = htonl(rpcb->r_vers);
698 773
699 p = xdr_encode_string(p, rpcb->r_netid); 774 if (unlikely(string == NULL))
700 p = xdr_encode_string(p, rpcb->r_addr); 775 return -EIO;
701 p = xdr_encode_string(p, rpcb->r_owner); 776 len = strlen(string);
777 if (unlikely(len > maxstrlen))
778 return -EIO;
702 779
703 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 780 p = xdr_reserve_space(xdr, sizeof(__be32) + len);
781 if (unlikely(p == NULL))
782 return -EIO;
783 xdr_encode_opaque(p, string, len);
704 784
705 return 0; 785 return 0;
706} 786}
707 787
708static int rpcb_decode_getaddr(struct rpc_rqst *req, __be32 *p, 788static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
709 unsigned short *portp) 789 const struct rpcbind_args *rpcb)
710{ 790{
711 char *addr; 791 struct rpc_task *task = req->rq_task;
712 u32 addr_len; 792 struct xdr_stream xdr;
713 int c, i, f, first, val;
714 793
715 *portp = 0; 794 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
716 addr_len = ntohl(*p++); 795 task->tk_pid, task->tk_msg.rpc_proc->p_name,
796 rpcb->r_prog, rpcb->r_vers,
797 rpcb->r_netid, rpcb->r_addr);
717 798
718 if (addr_len == 0) { 799 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
719 dprintk("RPC: rpcb_decode_getaddr: "
720 "service is not registered\n");
721 return 0;
722 }
723 800
724 /* 801 p = xdr_reserve_space(&xdr,
725 * Simple sanity check. 802 sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
726 */ 803 if (unlikely(p == NULL))
727 if (addr_len > RPCBIND_MAXUADDRLEN) 804 return -EIO;
728 goto out_err; 805 *p++ = htonl(rpcb->r_prog);
729 806 *p = htonl(rpcb->r_vers);
730 /*
731 * Start at the end and walk backwards until the first dot
732 * is encountered. When the second dot is found, we have
733 * both parts of the port number.
734 */
735 addr = (char *)p;
736 val = 0;
737 first = 1;
738 f = 1;
739 for (i = addr_len - 1; i > 0; i--) {
740 c = addr[i];
741 if (c >= '0' && c <= '9') {
742 val += (c - '0') * f;
743 f *= 10;
744 } else if (c == '.') {
745 if (first) {
746 *portp = val;
747 val = first = 0;
748 f = 1;
749 } else {
750 *portp |= (val << 8);
751 break;
752 }
753 }
754 }
755 807
756 /* 808 if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
757 * Simple sanity check. If we never saw a dot in the reply, 809 return -EIO;
758 * then this was probably just garbage. 810 if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
759 */ 811 return -EIO;
760 if (first) 812 if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
761 goto out_err; 813 return -EIO;
762 814
763 dprintk("RPC: rpcb_decode_getaddr port=%u\n", *portp);
764 return 0; 815 return 0;
765
766out_err:
767 dprintk("RPC: rpcbind server returned malformed reply\n");
768 return -EIO;
769} 816}
770 817
771#define RPCB_program_sz (1u) 818static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
772#define RPCB_version_sz (1u) 819 struct rpcbind_args *rpcb)
773#define RPCB_protocol_sz (1u) 820{
774#define RPCB_port_sz (1u) 821 struct sockaddr_storage address;
775#define RPCB_boolean_sz (1u) 822 struct sockaddr *sap = (struct sockaddr *)&address;
823 struct rpc_task *task = req->rq_task;
824 struct xdr_stream xdr;
825 u32 len;
776 826
777#define RPCB_netid_sz (1+XDR_QUADLEN(RPCBIND_MAXNETIDLEN)) 827 rpcb->r_port = 0;
778#define RPCB_addr_sz (1+XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
779#define RPCB_ownerstring_sz (1+XDR_QUADLEN(RPCB_MAXOWNERLEN))
780 828
781#define RPCB_mappingargs_sz RPCB_program_sz+RPCB_version_sz+ \ 829 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
782 RPCB_protocol_sz+RPCB_port_sz
783#define RPCB_getaddrargs_sz RPCB_program_sz+RPCB_version_sz+ \
784 RPCB_netid_sz+RPCB_addr_sz+ \
785 RPCB_ownerstring_sz
786 830
787#define RPCB_setres_sz RPCB_boolean_sz 831 p = xdr_inline_decode(&xdr, sizeof(__be32));
788#define RPCB_getportres_sz RPCB_port_sz 832 if (unlikely(p == NULL))
789 833 goto out_fail;
790/* 834 len = ntohl(*p);
791 * Note that RFC 1833 does not put any size restrictions on the
792 * address string returned by the remote rpcbind database.
793 */
794#define RPCB_getaddrres_sz RPCB_addr_sz
795 835
796#define PROC(proc, argtype, restype) \ 836 /*
797 [RPCBPROC_##proc] = { \ 837 * If the returned universal address is a null string,
798 .p_proc = RPCBPROC_##proc, \ 838 * the requested RPC service was not registered.
799 .p_encode = (kxdrproc_t) rpcb_encode_##argtype, \ 839 */
800 .p_decode = (kxdrproc_t) rpcb_decode_##restype, \ 840 if (len == 0) {
801 .p_arglen = RPCB_##argtype##args_sz, \ 841 dprintk("RPC: %5u RPCB reply: program not registered\n",
802 .p_replen = RPCB_##restype##res_sz, \ 842 task->tk_pid);
803 .p_statidx = RPCBPROC_##proc, \ 843 return 0;
804 .p_timer = 0, \
805 .p_name = #proc, \
806 } 844 }
807 845
846 if (unlikely(len > RPCBIND_MAXUADDRLEN))
847 goto out_fail;
848
849 p = xdr_inline_decode(&xdr, len);
850 if (unlikely(p == NULL))
851 goto out_fail;
852 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
853 task->tk_msg.rpc_proc->p_name, (char *)p);
854
855 if (rpc_uaddr2sockaddr((char *)p, len, sap, sizeof(address)) == 0)
856 goto out_fail;
857 rpcb->r_port = rpc_get_port(sap);
858
859 return 0;
860
861out_fail:
862 dprintk("RPC: %5u malformed RPCB_%s reply\n",
863 task->tk_pid, task->tk_msg.rpc_proc->p_name);
864 return -EIO;
865}
866
808/* 867/*
809 * Not all rpcbind procedures described in RFC 1833 are implemented 868 * Not all rpcbind procedures described in RFC 1833 are implemented
810 * since the Linux kernel RPC code requires only these. 869 * since the Linux kernel RPC code requires only these.
811 */ 870 */
871
812static struct rpc_procinfo rpcb_procedures2[] = { 872static struct rpc_procinfo rpcb_procedures2[] = {
813 PROC(SET, mapping, set), 873 [RPCBPROC_SET] = {
814 PROC(UNSET, mapping, set), 874 .p_proc = RPCBPROC_SET,
815 PROC(GETPORT, mapping, getport), 875 .p_encode = (kxdrproc_t)rpcb_enc_mapping,
876 .p_decode = (kxdrproc_t)rpcb_dec_set,
877 .p_arglen = RPCB_mappingargs_sz,
878 .p_replen = RPCB_setres_sz,
879 .p_statidx = RPCBPROC_SET,
880 .p_timer = 0,
881 .p_name = "SET",
882 },
883 [RPCBPROC_UNSET] = {
884 .p_proc = RPCBPROC_UNSET,
885 .p_encode = (kxdrproc_t)rpcb_enc_mapping,
886 .p_decode = (kxdrproc_t)rpcb_dec_set,
887 .p_arglen = RPCB_mappingargs_sz,
888 .p_replen = RPCB_setres_sz,
889 .p_statidx = RPCBPROC_UNSET,
890 .p_timer = 0,
891 .p_name = "UNSET",
892 },
893 [RPCBPROC_GETPORT] = {
894 .p_proc = RPCBPROC_GETPORT,
895 .p_encode = (kxdrproc_t)rpcb_enc_mapping,
896 .p_decode = (kxdrproc_t)rpcb_dec_getport,
897 .p_arglen = RPCB_mappingargs_sz,
898 .p_replen = RPCB_getportres_sz,
899 .p_statidx = RPCBPROC_GETPORT,
900 .p_timer = 0,
901 .p_name = "GETPORT",
902 },
816}; 903};
817 904
818static struct rpc_procinfo rpcb_procedures3[] = { 905static struct rpc_procinfo rpcb_procedures3[] = {
819 PROC(SET, getaddr, set), 906 [RPCBPROC_SET] = {
820 PROC(UNSET, getaddr, set), 907 .p_proc = RPCBPROC_SET,
821 PROC(GETADDR, getaddr, getaddr), 908 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
909 .p_decode = (kxdrproc_t)rpcb_dec_set,
910 .p_arglen = RPCB_getaddrargs_sz,
911 .p_replen = RPCB_setres_sz,
912 .p_statidx = RPCBPROC_SET,
913 .p_timer = 0,
914 .p_name = "SET",
915 },
916 [RPCBPROC_UNSET] = {
917 .p_proc = RPCBPROC_UNSET,
918 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
919 .p_decode = (kxdrproc_t)rpcb_dec_set,
920 .p_arglen = RPCB_getaddrargs_sz,
921 .p_replen = RPCB_setres_sz,
922 .p_statidx = RPCBPROC_UNSET,
923 .p_timer = 0,
924 .p_name = "UNSET",
925 },
926 [RPCBPROC_GETADDR] = {
927 .p_proc = RPCBPROC_GETADDR,
928 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
929 .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
930 .p_arglen = RPCB_getaddrargs_sz,
931 .p_replen = RPCB_getaddrres_sz,
932 .p_statidx = RPCBPROC_GETADDR,
933 .p_timer = 0,
934 .p_name = "GETADDR",
935 },
822}; 936};
823 937
824static struct rpc_procinfo rpcb_procedures4[] = { 938static struct rpc_procinfo rpcb_procedures4[] = {
825 PROC(SET, getaddr, set), 939 [RPCBPROC_SET] = {
826 PROC(UNSET, getaddr, set), 940 .p_proc = RPCBPROC_SET,
827 PROC(GETADDR, getaddr, getaddr), 941 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
828 PROC(GETVERSADDR, getaddr, getaddr), 942 .p_decode = (kxdrproc_t)rpcb_dec_set,
943 .p_arglen = RPCB_getaddrargs_sz,
944 .p_replen = RPCB_setres_sz,
945 .p_statidx = RPCBPROC_SET,
946 .p_timer = 0,
947 .p_name = "SET",
948 },
949 [RPCBPROC_UNSET] = {
950 .p_proc = RPCBPROC_UNSET,
951 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
952 .p_decode = (kxdrproc_t)rpcb_dec_set,
953 .p_arglen = RPCB_getaddrargs_sz,
954 .p_replen = RPCB_setres_sz,
955 .p_statidx = RPCBPROC_UNSET,
956 .p_timer = 0,
957 .p_name = "UNSET",
958 },
959 [RPCBPROC_GETADDR] = {
960 .p_proc = RPCBPROC_GETADDR,
961 .p_encode = (kxdrproc_t)rpcb_enc_getaddr,
962 .p_decode = (kxdrproc_t)rpcb_dec_getaddr,
963 .p_arglen = RPCB_getaddrargs_sz,
964 .p_replen = RPCB_getaddrres_sz,
965 .p_statidx = RPCBPROC_GETADDR,
966 .p_timer = 0,
967 .p_name = "GETADDR",
968 },
829}; 969};
830 970
831static struct rpcb_info rpcb_next_version[] = { 971static struct rpcb_info rpcb_next_version[] = {
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index adaa81982f74..8cce92189019 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -69,5 +69,5 @@ cleanup_sunrpc(void)
69 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 69 rcu_barrier(); /* Wait for completion of call_rcu()'s */
70} 70}
71MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
72module_init(init_sunrpc); 72fs_initcall(init_sunrpc); /* Ensure we're initialised before nfs */
73module_exit(cleanup_sunrpc); 73module_exit(cleanup_sunrpc);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 5c865e2d299e..6caffa34ac01 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -171,6 +171,11 @@ static void ip_map_request(struct cache_detail *cd,
171 (*bpp)[-1] = '\n'; 171 (*bpp)[-1] = '\n';
172} 172}
173 173
174static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
175{
176 return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
177}
178
174static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr); 179static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
175static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); 180static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
176 181
@@ -289,7 +294,7 @@ struct cache_detail ip_map_cache = {
289 .hash_table = ip_table, 294 .hash_table = ip_table,
290 .name = "auth.unix.ip", 295 .name = "auth.unix.ip",
291 .cache_put = ip_map_put, 296 .cache_put = ip_map_put,
292 .cache_request = ip_map_request, 297 .cache_upcall = ip_map_upcall,
293 .cache_parse = ip_map_parse, 298 .cache_parse = ip_map_parse,
294 .cache_show = ip_map_show, 299 .cache_show = ip_map_show,
295 .match = ip_map_match, 300 .match = ip_map_match,
@@ -523,6 +528,11 @@ static void unix_gid_request(struct cache_detail *cd,
523 (*bpp)[-1] = '\n'; 528 (*bpp)[-1] = '\n';
524} 529}
525 530
531static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
532{
533 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
534}
535
526static struct unix_gid *unix_gid_lookup(uid_t uid); 536static struct unix_gid *unix_gid_lookup(uid_t uid);
527extern struct cache_detail unix_gid_cache; 537extern struct cache_detail unix_gid_cache;
528 538
@@ -622,7 +632,7 @@ struct cache_detail unix_gid_cache = {
622 .hash_table = gid_table, 632 .hash_table = gid_table,
623 .name = "auth.unix.gid", 633 .name = "auth.unix.gid",
624 .cache_put = unix_gid_put, 634 .cache_put = unix_gid_put,
625 .cache_request = unix_gid_request, 635 .cache_upcall = unix_gid_upcall,
626 .cache_parse = unix_gid_parse, 636 .cache_parse = unix_gid_parse,
627 .cache_show = unix_gid_show, 637 .cache_show = unix_gid_show,
628 .match = unix_gid_match, 638 .match = unix_gid_match,
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index 31becbf09263..dd824341c349 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -25,8 +25,13 @@
25#define RPC_RTO_INIT (HZ/5) 25#define RPC_RTO_INIT (HZ/5)
26#define RPC_RTO_MIN (HZ/10) 26#define RPC_RTO_MIN (HZ/10)
27 27
28void 28/**
29rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) 29 * rpc_init_rtt - Initialize an RPC RTT estimator context
30 * @rt: context to initialize
31 * @timeo: initial timeout value, in jiffies
32 *
33 */
34void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
30{ 35{
31 unsigned long init = 0; 36 unsigned long init = 0;
32 unsigned i; 37 unsigned i;
@@ -43,12 +48,16 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
43} 48}
44EXPORT_SYMBOL_GPL(rpc_init_rtt); 49EXPORT_SYMBOL_GPL(rpc_init_rtt);
45 50
46/* 51/**
52 * rpc_update_rtt - Update an RPC RTT estimator context
53 * @rt: context to update
54 * @timer: timer array index (request type)
55 * @m: recent actual RTT, in jiffies
56 *
47 * NB: When computing the smoothed RTT and standard deviation, 57 * NB: When computing the smoothed RTT and standard deviation,
48 * be careful not to produce negative intermediate results. 58 * be careful not to produce negative intermediate results.
49 */ 59 */
50void 60void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
51rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
52{ 61{
53 long *srtt, *sdrtt; 62 long *srtt, *sdrtt;
54 63
@@ -79,21 +88,25 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
79} 88}
80EXPORT_SYMBOL_GPL(rpc_update_rtt); 89EXPORT_SYMBOL_GPL(rpc_update_rtt);
81 90
82/* 91/**
83 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 92 * rpc_calc_rto - Provide an estimated timeout value
84 * Use the mean and mean deviation of rtt for the appropriate type of rpc 93 * @rt: context to use for calculation
85 * for the frequent rpcs and a default for the others. 94 * @timer: timer array index (request type)
86 * The justification for doing "other" this way is that these rpcs 95 *
87 * happen so infrequently that timer est. would probably be stale. 96 * Estimate RTO for an NFS RPC sent via an unreliable datagram. Use
88 * Also, since many of these rpcs are 97 * the mean and mean deviation of RTT for the appropriate type of RPC
89 * non-idempotent, a conservative timeout is desired. 98 * for frequently issued RPCs, and a fixed default for the others.
99 *
100 * The justification for doing "other" this way is that these RPCs
101 * happen so infrequently that timer estimation would probably be
102 * stale. Also, since many of these RPCs are non-idempotent, a
103 * conservative timeout is desired.
104 *
90 * getattr, lookup, 105 * getattr, lookup,
91 * read, write, commit - A+4D 106 * read, write, commit - A+4D
92 * other - timeo 107 * other - timeo
93 */ 108 */
94 109unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
95unsigned long
96rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
97{ 110{
98 unsigned long res; 111 unsigned long res;
99 112
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 406e26de584e..8bd690c48b69 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -24,7 +24,7 @@ xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 unsigned int quadlen = XDR_QUADLEN(obj->len); 24 unsigned int quadlen = XDR_QUADLEN(obj->len);
25 25
26 p[quadlen] = 0; /* zero trailing bytes */ 26 p[quadlen] = 0; /* zero trailing bytes */
27 *p++ = htonl(obj->len); 27 *p++ = cpu_to_be32(obj->len);
28 memcpy(p, obj->data, obj->len); 28 memcpy(p, obj->data, obj->len);
29 return p + XDR_QUADLEN(obj->len); 29 return p + XDR_QUADLEN(obj->len);
30} 30}
@@ -35,7 +35,7 @@ xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
35{ 35{
36 unsigned int len; 36 unsigned int len;
37 37
38 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ) 38 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
39 return NULL; 39 return NULL;
40 obj->len = len; 40 obj->len = len;
41 obj->data = (u8 *) p; 41 obj->data = (u8 *) p;
@@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
83 */ 83 */
84__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes) 84__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
85{ 85{
86 *p++ = htonl(nbytes); 86 *p++ = cpu_to_be32(nbytes);
87 return xdr_encode_opaque_fixed(p, ptr, nbytes); 87 return xdr_encode_opaque_fixed(p, ptr, nbytes);
88} 88}
89EXPORT_SYMBOL_GPL(xdr_encode_opaque); 89EXPORT_SYMBOL_GPL(xdr_encode_opaque);
@@ -101,7 +101,7 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
101{ 101{
102 u32 len; 102 u32 len;
103 103
104 len = ntohl(*p++); 104 len = be32_to_cpu(*p++);
105 if (len > maxlen) 105 if (len > maxlen)
106 return NULL; 106 return NULL;
107 *lenp = len; 107 *lenp = len;
@@ -771,7 +771,7 @@ xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
771 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj)); 771 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
772 if (status) 772 if (status)
773 return status; 773 return status;
774 *obj = ntohl(raw); 774 *obj = be32_to_cpu(raw);
775 return 0; 775 return 0;
776} 776}
777EXPORT_SYMBOL_GPL(xdr_decode_word); 777EXPORT_SYMBOL_GPL(xdr_decode_word);
@@ -779,7 +779,7 @@ EXPORT_SYMBOL_GPL(xdr_decode_word);
779int 779int
780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) 780xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
781{ 781{
782 __be32 raw = htonl(obj); 782 __be32 raw = cpu_to_be32(obj);
783 783
784 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj)); 784 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
785} 785}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 1dd6123070e9..9a63f669ece4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -168,47 +168,25 @@ static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
168static void 168static void
169xprt_rdma_format_addresses(struct rpc_xprt *xprt) 169xprt_rdma_format_addresses(struct rpc_xprt *xprt)
170{ 170{
171 struct sockaddr_in *addr = (struct sockaddr_in *) 171 struct sockaddr *sap = (struct sockaddr *)
172 &rpcx_to_rdmad(xprt).addr; 172 &rpcx_to_rdmad(xprt).addr;
173 char *buf; 173 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
174 char buf[64];
174 175
175 buf = kzalloc(20, GFP_KERNEL); 176 (void)rpc_ntop(sap, buf, sizeof(buf));
176 if (buf) 177 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
177 snprintf(buf, 20, "%pI4", &addr->sin_addr.s_addr);
178 xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
179 178
180 buf = kzalloc(8, GFP_KERNEL); 179 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
181 if (buf) 180 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
182 snprintf(buf, 8, "%u", ntohs(addr->sin_port));
183 xprt->address_strings[RPC_DISPLAY_PORT] = buf;
184 181
185 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma"; 182 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
186 183
187 buf = kzalloc(48, GFP_KERNEL); 184 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
188 if (buf) 185 NIPQUAD(sin->sin_addr.s_addr));
189 snprintf(buf, 48, "addr=%pI4 port=%u proto=%s", 186 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
190 &addr->sin_addr.s_addr, 187
191 ntohs(addr->sin_port), "rdma"); 188 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
192 xprt->address_strings[RPC_DISPLAY_ALL] = buf; 189 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
193
194 buf = kzalloc(10, GFP_KERNEL);
195 if (buf)
196 snprintf(buf, 10, "%02x%02x%02x%02x",
197 NIPQUAD(addr->sin_addr.s_addr));
198 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
199
200 buf = kzalloc(8, GFP_KERNEL);
201 if (buf)
202 snprintf(buf, 8, "%4hx", ntohs(addr->sin_port));
203 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf;
204
205 buf = kzalloc(30, GFP_KERNEL);
206 if (buf)
207 snprintf(buf, 30, "%pI4.%u.%u",
208 &addr->sin_addr.s_addr,
209 ntohs(addr->sin_port) >> 8,
210 ntohs(addr->sin_port) & 0xff);
211 xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
212 190
213 /* netid */ 191 /* netid */
214 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma"; 192 xprt->address_strings[RPC_DISPLAY_NETID] = "rdma";
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 83c73c4d017a..62438f3a914d 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -248,8 +248,8 @@ struct sock_xprt {
248 * Connection of transports 248 * Connection of transports
249 */ 249 */
250 struct delayed_work connect_worker; 250 struct delayed_work connect_worker;
251 struct sockaddr_storage addr; 251 struct sockaddr_storage srcaddr;
252 unsigned short port; 252 unsigned short srcport;
253 253
254 /* 254 /*
255 * UDP socket buffer size parameters 255 * UDP socket buffer size parameters
@@ -296,117 +296,60 @@ static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
296 return (struct sockaddr_in6 *) &xprt->addr; 296 return (struct sockaddr_in6 *) &xprt->addr;
297} 297}
298 298
299static void xs_format_ipv4_peer_addresses(struct rpc_xprt *xprt, 299static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
300 const char *protocol,
301 const char *netid)
302{ 300{
303 struct sockaddr_in *addr = xs_addr_in(xprt); 301 struct sockaddr *sap = xs_addr(xprt);
304 char *buf; 302 struct sockaddr_in6 *sin6;
303 struct sockaddr_in *sin;
304 char buf[128];
305 305
306 buf = kzalloc(20, GFP_KERNEL); 306 (void)rpc_ntop(sap, buf, sizeof(buf));
307 if (buf) { 307 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
308 snprintf(buf, 20, "%pI4", &addr->sin_addr.s_addr);
309 }
310 xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
311
312 buf = kzalloc(8, GFP_KERNEL);
313 if (buf) {
314 snprintf(buf, 8, "%u",
315 ntohs(addr->sin_port));
316 }
317 xprt->address_strings[RPC_DISPLAY_PORT] = buf;
318
319 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
320
321 buf = kzalloc(48, GFP_KERNEL);
322 if (buf) {
323 snprintf(buf, 48, "addr=%pI4 port=%u proto=%s",
324 &addr->sin_addr.s_addr,
325 ntohs(addr->sin_port),
326 protocol);
327 }
328 xprt->address_strings[RPC_DISPLAY_ALL] = buf;
329
330 buf = kzalloc(10, GFP_KERNEL);
331 if (buf) {
332 snprintf(buf, 10, "%02x%02x%02x%02x",
333 NIPQUAD(addr->sin_addr.s_addr));
334 }
335 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
336 308
337 buf = kzalloc(8, GFP_KERNEL); 309 switch (sap->sa_family) {
338 if (buf) { 310 case AF_INET:
339 snprintf(buf, 8, "%4hx", 311 sin = xs_addr_in(xprt);
340 ntohs(addr->sin_port)); 312 (void)snprintf(buf, sizeof(buf), "%02x%02x%02x%02x",
341 } 313 NIPQUAD(sin->sin_addr.s_addr));
342 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf; 314 break;
343 315 case AF_INET6:
344 buf = kzalloc(30, GFP_KERNEL); 316 sin6 = xs_addr_in6(xprt);
345 if (buf) { 317 (void)snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
346 snprintf(buf, 30, "%pI4.%u.%u", 318 break;
347 &addr->sin_addr.s_addr, 319 default:
348 ntohs(addr->sin_port) >> 8, 320 BUG();
349 ntohs(addr->sin_port) & 0xff);
350 } 321 }
351 xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf; 322 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
352
353 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
354} 323}
355 324
356static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt, 325static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
357 const char *protocol,
358 const char *netid)
359{ 326{
360 struct sockaddr_in6 *addr = xs_addr_in6(xprt); 327 struct sockaddr *sap = xs_addr(xprt);
361 char *buf; 328 char buf[128];
362 329
363 buf = kzalloc(40, GFP_KERNEL); 330 (void)snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
364 if (buf) { 331 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
365 snprintf(buf, 40, "%pI6",&addr->sin6_addr);
366 }
367 xprt->address_strings[RPC_DISPLAY_ADDR] = buf;
368 332
369 buf = kzalloc(8, GFP_KERNEL); 333 (void)snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
370 if (buf) { 334 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
371 snprintf(buf, 8, "%u", 335}
372 ntohs(addr->sin6_port));
373 }
374 xprt->address_strings[RPC_DISPLAY_PORT] = buf;
375 336
337static void xs_format_peer_addresses(struct rpc_xprt *xprt,
338 const char *protocol,
339 const char *netid)
340{
376 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol; 341 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
342 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
343 xs_format_common_peer_addresses(xprt);
344 xs_format_common_peer_ports(xprt);
345}
377 346
378 buf = kzalloc(64, GFP_KERNEL); 347static void xs_update_peer_port(struct rpc_xprt *xprt)
379 if (buf) { 348{
380 snprintf(buf, 64, "addr=%pI6 port=%u proto=%s", 349 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
381 &addr->sin6_addr, 350 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
382 ntohs(addr->sin6_port),
383 protocol);
384 }
385 xprt->address_strings[RPC_DISPLAY_ALL] = buf;
386
387 buf = kzalloc(36, GFP_KERNEL);
388 if (buf)
389 snprintf(buf, 36, "%pi6", &addr->sin6_addr);
390
391 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf;
392
393 buf = kzalloc(8, GFP_KERNEL);
394 if (buf) {
395 snprintf(buf, 8, "%4hx",
396 ntohs(addr->sin6_port));
397 }
398 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = buf;
399
400 buf = kzalloc(50, GFP_KERNEL);
401 if (buf) {
402 snprintf(buf, 50, "%pI6.%u.%u",
403 &addr->sin6_addr,
404 ntohs(addr->sin6_port) >> 8,
405 ntohs(addr->sin6_port) & 0xff);
406 }
407 xprt->address_strings[RPC_DISPLAY_UNIVERSAL_ADDR] = buf;
408 351
409 xprt->address_strings[RPC_DISPLAY_NETID] = netid; 352 xs_format_common_peer_ports(xprt);
410} 353}
411 354
412static void xs_free_peer_addresses(struct rpc_xprt *xprt) 355static void xs_free_peer_addresses(struct rpc_xprt *xprt)
@@ -1587,25 +1530,15 @@ static unsigned short xs_get_random_port(void)
1587 */ 1530 */
1588static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) 1531static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1589{ 1532{
1590 struct sockaddr *addr = xs_addr(xprt);
1591
1592 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port); 1533 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1593 1534
1594 switch (addr->sa_family) { 1535 rpc_set_port(xs_addr(xprt), port);
1595 case AF_INET: 1536 xs_update_peer_port(xprt);
1596 ((struct sockaddr_in *)addr)->sin_port = htons(port);
1597 break;
1598 case AF_INET6:
1599 ((struct sockaddr_in6 *)addr)->sin6_port = htons(port);
1600 break;
1601 default:
1602 BUG();
1603 }
1604} 1537}
1605 1538
1606static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock) 1539static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
1607{ 1540{
1608 unsigned short port = transport->port; 1541 unsigned short port = transport->srcport;
1609 1542
1610 if (port == 0 && transport->xprt.resvport) 1543 if (port == 0 && transport->xprt.resvport)
1611 port = xs_get_random_port(); 1544 port = xs_get_random_port();
@@ -1614,8 +1547,8 @@ static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket
1614 1547
1615static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port) 1548static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
1616{ 1549{
1617 if (transport->port != 0) 1550 if (transport->srcport != 0)
1618 transport->port = 0; 1551 transport->srcport = 0;
1619 if (!transport->xprt.resvport) 1552 if (!transport->xprt.resvport)
1620 return 0; 1553 return 0;
1621 if (port <= xprt_min_resvport || port > xprt_max_resvport) 1554 if (port <= xprt_min_resvport || port > xprt_max_resvport)
@@ -1633,7 +1566,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
1633 unsigned short port = xs_get_srcport(transport, sock); 1566 unsigned short port = xs_get_srcport(transport, sock);
1634 unsigned short last; 1567 unsigned short last;
1635 1568
1636 sa = (struct sockaddr_in *)&transport->addr; 1569 sa = (struct sockaddr_in *)&transport->srcaddr;
1637 myaddr.sin_addr = sa->sin_addr; 1570 myaddr.sin_addr = sa->sin_addr;
1638 do { 1571 do {
1639 myaddr.sin_port = htons(port); 1572 myaddr.sin_port = htons(port);
@@ -1642,7 +1575,7 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
1642 if (port == 0) 1575 if (port == 0)
1643 break; 1576 break;
1644 if (err == 0) { 1577 if (err == 0) {
1645 transport->port = port; 1578 transport->srcport = port;
1646 break; 1579 break;
1647 } 1580 }
1648 last = port; 1581 last = port;
@@ -1666,7 +1599,7 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
1666 unsigned short port = xs_get_srcport(transport, sock); 1599 unsigned short port = xs_get_srcport(transport, sock);
1667 unsigned short last; 1600 unsigned short last;
1668 1601
1669 sa = (struct sockaddr_in6 *)&transport->addr; 1602 sa = (struct sockaddr_in6 *)&transport->srcaddr;
1670 myaddr.sin6_addr = sa->sin6_addr; 1603 myaddr.sin6_addr = sa->sin6_addr;
1671 do { 1604 do {
1672 myaddr.sin6_port = htons(port); 1605 myaddr.sin6_port = htons(port);
@@ -1675,7 +1608,7 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
1675 if (port == 0) 1608 if (port == 0)
1676 break; 1609 break;
1677 if (err == 0) { 1610 if (err == 0) {
1678 transport->port = port; 1611 transport->srcport = port;
1679 break; 1612 break;
1680 } 1613 }
1681 last = port; 1614 last = port;
@@ -1780,8 +1713,11 @@ static void xs_udp_connect_worker4(struct work_struct *work)
1780 goto out; 1713 goto out;
1781 } 1714 }
1782 1715
1783 dprintk("RPC: worker connecting xprt %p to address: %s\n", 1716 dprintk("RPC: worker connecting xprt %p via %s to "
1784 xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 1717 "%s (port %s)\n", xprt,
1718 xprt->address_strings[RPC_DISPLAY_PROTO],
1719 xprt->address_strings[RPC_DISPLAY_ADDR],
1720 xprt->address_strings[RPC_DISPLAY_PORT]);
1785 1721
1786 xs_udp_finish_connecting(xprt, sock); 1722 xs_udp_finish_connecting(xprt, sock);
1787 status = 0; 1723 status = 0;
@@ -1822,8 +1758,11 @@ static void xs_udp_connect_worker6(struct work_struct *work)
1822 goto out; 1758 goto out;
1823 } 1759 }
1824 1760
1825 dprintk("RPC: worker connecting xprt %p to address: %s\n", 1761 dprintk("RPC: worker connecting xprt %p via %s to "
1826 xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 1762 "%s (port %s)\n", xprt,
1763 xprt->address_strings[RPC_DISPLAY_PROTO],
1764 xprt->address_strings[RPC_DISPLAY_ADDR],
1765 xprt->address_strings[RPC_DISPLAY_PORT]);
1827 1766
1828 xs_udp_finish_connecting(xprt, sock); 1767 xs_udp_finish_connecting(xprt, sock);
1829 status = 0; 1768 status = 0;
@@ -1948,8 +1887,11 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
1948 goto out_eagain; 1887 goto out_eagain;
1949 } 1888 }
1950 1889
1951 dprintk("RPC: worker connecting xprt %p to address: %s\n", 1890 dprintk("RPC: worker connecting xprt %p via %s to "
1952 xprt, xprt->address_strings[RPC_DISPLAY_ALL]); 1891 "%s (port %s)\n", xprt,
1892 xprt->address_strings[RPC_DISPLAY_PROTO],
1893 xprt->address_strings[RPC_DISPLAY_ADDR],
1894 xprt->address_strings[RPC_DISPLAY_PORT]);
1953 1895
1954 status = xs_tcp_finish_connecting(xprt, sock); 1896 status = xs_tcp_finish_connecting(xprt, sock);
1955 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 1897 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
@@ -2120,7 +2062,7 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2120 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2062 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2121 2063
2122 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", 2064 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
2123 transport->port, 2065 transport->srcport,
2124 xprt->stat.bind_count, 2066 xprt->stat.bind_count,
2125 xprt->stat.sends, 2067 xprt->stat.sends,
2126 xprt->stat.recvs, 2068 xprt->stat.recvs,
@@ -2144,7 +2086,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2144 idle_time = (long)(jiffies - xprt->last_used) / HZ; 2086 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2145 2087
2146 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", 2088 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
2147 transport->port, 2089 transport->srcport,
2148 xprt->stat.bind_count, 2090 xprt->stat.bind_count,
2149 xprt->stat.connect_count, 2091 xprt->stat.connect_count,
2150 xprt->stat.connect_time, 2092 xprt->stat.connect_time,
@@ -2223,7 +2165,7 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2223 memcpy(&xprt->addr, args->dstaddr, args->addrlen); 2165 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2224 xprt->addrlen = args->addrlen; 2166 xprt->addrlen = args->addrlen;
2225 if (args->srcaddr) 2167 if (args->srcaddr)
2226 memcpy(&new->addr, args->srcaddr, args->addrlen); 2168 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2227 2169
2228 return xprt; 2170 return xprt;
2229} 2171}
@@ -2272,7 +2214,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2272 2214
2273 INIT_DELAYED_WORK(&transport->connect_worker, 2215 INIT_DELAYED_WORK(&transport->connect_worker,
2274 xs_udp_connect_worker4); 2216 xs_udp_connect_worker4);
2275 xs_format_ipv4_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP); 2217 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2276 break; 2218 break;
2277 case AF_INET6: 2219 case AF_INET6:
2278 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2220 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
@@ -2280,15 +2222,22 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2280 2222
2281 INIT_DELAYED_WORK(&transport->connect_worker, 2223 INIT_DELAYED_WORK(&transport->connect_worker,
2282 xs_udp_connect_worker6); 2224 xs_udp_connect_worker6);
2283 xs_format_ipv6_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); 2225 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2284 break; 2226 break;
2285 default: 2227 default:
2286 kfree(xprt); 2228 kfree(xprt);
2287 return ERR_PTR(-EAFNOSUPPORT); 2229 return ERR_PTR(-EAFNOSUPPORT);
2288 } 2230 }
2289 2231
2290 dprintk("RPC: set up transport to address %s\n", 2232 if (xprt_bound(xprt))
2291 xprt->address_strings[RPC_DISPLAY_ALL]); 2233 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2234 xprt->address_strings[RPC_DISPLAY_ADDR],
2235 xprt->address_strings[RPC_DISPLAY_PORT],
2236 xprt->address_strings[RPC_DISPLAY_PROTO]);
2237 else
2238 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2239 xprt->address_strings[RPC_DISPLAY_ADDR],
2240 xprt->address_strings[RPC_DISPLAY_PROTO]);
2292 2241
2293 if (try_module_get(THIS_MODULE)) 2242 if (try_module_get(THIS_MODULE))
2294 return xprt; 2243 return xprt;
@@ -2337,23 +2286,33 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2337 if (((struct sockaddr_in *)addr)->sin_port != htons(0)) 2286 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2338 xprt_set_bound(xprt); 2287 xprt_set_bound(xprt);
2339 2288
2340 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4); 2289 INIT_DELAYED_WORK(&transport->connect_worker,
2341 xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP); 2290 xs_tcp_connect_worker4);
2291 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2342 break; 2292 break;
2343 case AF_INET6: 2293 case AF_INET6:
2344 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) 2294 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2345 xprt_set_bound(xprt); 2295 xprt_set_bound(xprt);
2346 2296
2347 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6); 2297 INIT_DELAYED_WORK(&transport->connect_worker,
2348 xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); 2298 xs_tcp_connect_worker6);
2299 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2349 break; 2300 break;
2350 default: 2301 default:
2351 kfree(xprt); 2302 kfree(xprt);
2352 return ERR_PTR(-EAFNOSUPPORT); 2303 return ERR_PTR(-EAFNOSUPPORT);
2353 } 2304 }
2354 2305
2355 dprintk("RPC: set up transport to address %s\n", 2306 if (xprt_bound(xprt))
2356 xprt->address_strings[RPC_DISPLAY_ALL]); 2307 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2308 xprt->address_strings[RPC_DISPLAY_ADDR],
2309 xprt->address_strings[RPC_DISPLAY_PORT],
2310 xprt->address_strings[RPC_DISPLAY_PROTO]);
2311 else
2312 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2313 xprt->address_strings[RPC_DISPLAY_ADDR],
2314 xprt->address_strings[RPC_DISPLAY_PROTO]);
2315
2357 2316
2358 if (try_module_get(THIS_MODULE)) 2317 if (try_module_get(THIS_MODULE))
2359 return xprt; 2318 return xprt;
@@ -2412,3 +2371,55 @@ void cleanup_socket_xprt(void)
2412 xprt_unregister_transport(&xs_udp_transport); 2371 xprt_unregister_transport(&xs_udp_transport);
2413 xprt_unregister_transport(&xs_tcp_transport); 2372 xprt_unregister_transport(&xs_tcp_transport);
2414} 2373}
2374
2375static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
2376 unsigned int min, unsigned int max)
2377{
2378 unsigned long num;
2379 int ret;
2380
2381 if (!val)
2382 return -EINVAL;
2383 ret = strict_strtoul(val, 0, &num);
2384 if (ret == -EINVAL || num < min || num > max)
2385 return -EINVAL;
2386 *((unsigned int *)kp->arg) = num;
2387 return 0;
2388}
2389
2390static int param_set_portnr(const char *val, struct kernel_param *kp)
2391{
2392 return param_set_uint_minmax(val, kp,
2393 RPC_MIN_RESVPORT,
2394 RPC_MAX_RESVPORT);
2395}
2396
2397static int param_get_portnr(char *buffer, struct kernel_param *kp)
2398{
2399 return param_get_uint(buffer, kp);
2400}
2401#define param_check_portnr(name, p) \
2402 __param_check(name, p, unsigned int);
2403
2404module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
2405module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
2406
2407static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
2408{
2409 return param_set_uint_minmax(val, kp,
2410 RPC_MIN_SLOT_TABLE,
2411 RPC_MAX_SLOT_TABLE);
2412}
2413
2414static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
2415{
2416 return param_get_uint(buffer, kp);
2417}
2418#define param_check_slot_table_size(name, p) \
2419 __param_check(name, p, unsigned int);
2420
2421module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
2422 slot_table_size, 0644);
2423module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
2424 slot_table_size, 0644);
2425
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 911ba7ffab84..090d300d7394 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -57,7 +57,6 @@
57# call mcount (offset: 0x5) 57# call mcount (offset: 0x5)
58# [...] 58# [...]
59# ret 59# ret
60# .globl my_func
61# other_func: 60# other_func:
62# [...] 61# [...]
63# call mcount (offset: 0x1b) 62# call mcount (offset: 0x1b)
diff --git a/security/Makefile b/security/Makefile
index b56e7f9ecbc2..95ecc06392d7 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -16,9 +16,7 @@ obj-$(CONFIG_SECURITYFS) += inode.o
16# Must precede capability.o in order to stack properly. 16# Must precede capability.o in order to stack properly.
17obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o 17obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o 18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
19ifeq ($(CONFIG_AUDIT),y) 19obj-$(CONFIG_AUDIT) += lsm_audit.o
20obj-$(CONFIG_SECURITY_SMACK) += lsm_audit.o
21endif
22obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o 20obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
23obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o 21obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
24obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o 22obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
diff --git a/security/capability.c b/security/capability.c
index 88f752e8152c..fce07a7bc825 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -373,6 +373,11 @@ static int cap_task_create(unsigned long clone_flags)
373 return 0; 373 return 0;
374} 374}
375 375
376static int cap_cred_alloc_blank(struct cred *cred, gfp_t gfp)
377{
378 return 0;
379}
380
376static void cap_cred_free(struct cred *cred) 381static void cap_cred_free(struct cred *cred)
377{ 382{
378} 383}
@@ -386,6 +391,10 @@ static void cap_cred_commit(struct cred *new, const struct cred *old)
386{ 391{
387} 392}
388 393
394static void cap_cred_transfer(struct cred *new, const struct cred *old)
395{
396}
397
389static int cap_kernel_act_as(struct cred *new, u32 secid) 398static int cap_kernel_act_as(struct cred *new, u32 secid)
390{ 399{
391 return 0; 400 return 0;
@@ -396,6 +405,11 @@ static int cap_kernel_create_files_as(struct cred *new, struct inode *inode)
396 return 0; 405 return 0;
397} 406}
398 407
408static int cap_kernel_module_request(void)
409{
410 return 0;
411}
412
399static int cap_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) 413static int cap_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
400{ 414{
401 return 0; 415 return 0;
@@ -701,10 +715,26 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
701{ 715{
702} 716}
703 717
718
719
704static void cap_req_classify_flow(const struct request_sock *req, 720static void cap_req_classify_flow(const struct request_sock *req,
705 struct flowi *fl) 721 struct flowi *fl)
706{ 722{
707} 723}
724
725static int cap_tun_dev_create(void)
726{
727 return 0;
728}
729
730static void cap_tun_dev_post_create(struct sock *sk)
731{
732}
733
734static int cap_tun_dev_attach(struct sock *sk)
735{
736 return 0;
737}
708#endif /* CONFIG_SECURITY_NETWORK */ 738#endif /* CONFIG_SECURITY_NETWORK */
709 739
710#ifdef CONFIG_SECURITY_NETWORK_XFRM 740#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -792,6 +822,20 @@ static void cap_release_secctx(char *secdata, u32 seclen)
792{ 822{
793} 823}
794 824
825static int cap_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
826{
827 return 0;
828}
829
830static int cap_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
831{
832 return 0;
833}
834
835static int cap_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
836{
837 return 0;
838}
795#ifdef CONFIG_KEYS 839#ifdef CONFIG_KEYS
796static int cap_key_alloc(struct key *key, const struct cred *cred, 840static int cap_key_alloc(struct key *key, const struct cred *cred,
797 unsigned long flags) 841 unsigned long flags)
@@ -815,6 +859,13 @@ static int cap_key_getsecurity(struct key *key, char **_buffer)
815 return 0; 859 return 0;
816} 860}
817 861
862static int cap_key_session_to_parent(const struct cred *cred,
863 const struct cred *parent_cred,
864 struct key *key)
865{
866 return 0;
867}
868
818#endif /* CONFIG_KEYS */ 869#endif /* CONFIG_KEYS */
819 870
820#ifdef CONFIG_AUDIT 871#ifdef CONFIG_AUDIT
@@ -854,7 +905,7 @@ struct security_operations default_security_ops = {
854 905
855void security_fixup_ops(struct security_operations *ops) 906void security_fixup_ops(struct security_operations *ops)
856{ 907{
857 set_to_cap_if_null(ops, ptrace_may_access); 908 set_to_cap_if_null(ops, ptrace_access_check);
858 set_to_cap_if_null(ops, ptrace_traceme); 909 set_to_cap_if_null(ops, ptrace_traceme);
859 set_to_cap_if_null(ops, capget); 910 set_to_cap_if_null(ops, capget);
860 set_to_cap_if_null(ops, capset); 911 set_to_cap_if_null(ops, capset);
@@ -940,11 +991,14 @@ void security_fixup_ops(struct security_operations *ops)
940 set_to_cap_if_null(ops, file_receive); 991 set_to_cap_if_null(ops, file_receive);
941 set_to_cap_if_null(ops, dentry_open); 992 set_to_cap_if_null(ops, dentry_open);
942 set_to_cap_if_null(ops, task_create); 993 set_to_cap_if_null(ops, task_create);
994 set_to_cap_if_null(ops, cred_alloc_blank);
943 set_to_cap_if_null(ops, cred_free); 995 set_to_cap_if_null(ops, cred_free);
944 set_to_cap_if_null(ops, cred_prepare); 996 set_to_cap_if_null(ops, cred_prepare);
945 set_to_cap_if_null(ops, cred_commit); 997 set_to_cap_if_null(ops, cred_commit);
998 set_to_cap_if_null(ops, cred_transfer);
946 set_to_cap_if_null(ops, kernel_act_as); 999 set_to_cap_if_null(ops, kernel_act_as);
947 set_to_cap_if_null(ops, kernel_create_files_as); 1000 set_to_cap_if_null(ops, kernel_create_files_as);
1001 set_to_cap_if_null(ops, kernel_module_request);
948 set_to_cap_if_null(ops, task_setuid); 1002 set_to_cap_if_null(ops, task_setuid);
949 set_to_cap_if_null(ops, task_fix_setuid); 1003 set_to_cap_if_null(ops, task_fix_setuid);
950 set_to_cap_if_null(ops, task_setgid); 1004 set_to_cap_if_null(ops, task_setgid);
@@ -992,6 +1046,9 @@ void security_fixup_ops(struct security_operations *ops)
992 set_to_cap_if_null(ops, secid_to_secctx); 1046 set_to_cap_if_null(ops, secid_to_secctx);
993 set_to_cap_if_null(ops, secctx_to_secid); 1047 set_to_cap_if_null(ops, secctx_to_secid);
994 set_to_cap_if_null(ops, release_secctx); 1048 set_to_cap_if_null(ops, release_secctx);
1049 set_to_cap_if_null(ops, inode_notifysecctx);
1050 set_to_cap_if_null(ops, inode_setsecctx);
1051 set_to_cap_if_null(ops, inode_getsecctx);
995#ifdef CONFIG_SECURITY_NETWORK 1052#ifdef CONFIG_SECURITY_NETWORK
996 set_to_cap_if_null(ops, unix_stream_connect); 1053 set_to_cap_if_null(ops, unix_stream_connect);
997 set_to_cap_if_null(ops, unix_may_send); 1054 set_to_cap_if_null(ops, unix_may_send);
@@ -1020,6 +1077,9 @@ void security_fixup_ops(struct security_operations *ops)
1020 set_to_cap_if_null(ops, inet_csk_clone); 1077 set_to_cap_if_null(ops, inet_csk_clone);
1021 set_to_cap_if_null(ops, inet_conn_established); 1078 set_to_cap_if_null(ops, inet_conn_established);
1022 set_to_cap_if_null(ops, req_classify_flow); 1079 set_to_cap_if_null(ops, req_classify_flow);
1080 set_to_cap_if_null(ops, tun_dev_create);
1081 set_to_cap_if_null(ops, tun_dev_post_create);
1082 set_to_cap_if_null(ops, tun_dev_attach);
1023#endif /* CONFIG_SECURITY_NETWORK */ 1083#endif /* CONFIG_SECURITY_NETWORK */
1024#ifdef CONFIG_SECURITY_NETWORK_XFRM 1084#ifdef CONFIG_SECURITY_NETWORK_XFRM
1025 set_to_cap_if_null(ops, xfrm_policy_alloc_security); 1085 set_to_cap_if_null(ops, xfrm_policy_alloc_security);
@@ -1038,6 +1098,7 @@ void security_fixup_ops(struct security_operations *ops)
1038 set_to_cap_if_null(ops, key_free); 1098 set_to_cap_if_null(ops, key_free);
1039 set_to_cap_if_null(ops, key_permission); 1099 set_to_cap_if_null(ops, key_permission);
1040 set_to_cap_if_null(ops, key_getsecurity); 1100 set_to_cap_if_null(ops, key_getsecurity);
1101 set_to_cap_if_null(ops, key_session_to_parent);
1041#endif /* CONFIG_KEYS */ 1102#endif /* CONFIG_KEYS */
1042#ifdef CONFIG_AUDIT 1103#ifdef CONFIG_AUDIT
1043 set_to_cap_if_null(ops, audit_rule_init); 1104 set_to_cap_if_null(ops, audit_rule_init);
diff --git a/security/commoncap.c b/security/commoncap.c
index e3097c0a1311..fe30751a6cd9 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -101,7 +101,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
101} 101}
102 102
103/** 103/**
104 * cap_ptrace_may_access - Determine whether the current process may access 104 * cap_ptrace_access_check - Determine whether the current process may access
105 * another 105 * another
106 * @child: The process to be accessed 106 * @child: The process to be accessed
107 * @mode: The mode of attachment. 107 * @mode: The mode of attachment.
@@ -109,7 +109,7 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
109 * Determine whether a process may access another, returning 0 if permission 109 * Determine whether a process may access another, returning 0 if permission
110 * granted, -ve if denied. 110 * granted, -ve if denied.
111 */ 111 */
112int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) 112int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
113{ 113{
114 int ret = 0; 114 int ret = 0;
115 115
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 4732f5e5d127..b85e61bcf246 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -249,7 +249,11 @@ void ima_counts_put(struct path *path, int mask)
249 struct inode *inode = path->dentry->d_inode; 249 struct inode *inode = path->dentry->d_inode;
250 struct ima_iint_cache *iint; 250 struct ima_iint_cache *iint;
251 251
252 if (!ima_initialized || !S_ISREG(inode->i_mode)) 252 /* The inode may already have been freed, freeing the iint
253 * with it. Verify the inode is not NULL before dereferencing
254 * it.
255 */
256 if (!ima_initialized || !inode || !S_ISREG(inode->i_mode))
253 return; 257 return;
254 iint = ima_iint_find_insert_get(inode); 258 iint = ima_iint_find_insert_get(inode);
255 if (!iint) 259 if (!iint)
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 747a464943af..74d5447d7df7 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-y := \ 5obj-y := \
6 gc.o \
6 key.o \ 7 key.o \
7 keyring.o \ 8 keyring.o \
8 keyctl.o \ 9 keyctl.o \
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c766c68a63bc..792c0a611a6d 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -82,6 +82,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
82 case KEYCTL_GET_SECURITY: 82 case KEYCTL_GET_SECURITY:
83 return keyctl_get_security(arg2, compat_ptr(arg3), arg4); 83 return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
84 84
85 case KEYCTL_SESSION_TO_PARENT:
86 return keyctl_session_to_parent();
87
85 default: 88 default:
86 return -EOPNOTSUPP; 89 return -EOPNOTSUPP;
87 } 90 }
diff --git a/security/keys/gc.c b/security/keys/gc.c
new file mode 100644
index 000000000000..1e616aef55fd
--- /dev/null
+++ b/security/keys/gc.c
@@ -0,0 +1,194 @@
1/* Key garbage collector
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <keys/keyring-type.h>
14#include "internal.h"
15
16/*
17 * Delay between key revocation/expiry in seconds
18 */
19unsigned key_gc_delay = 5 * 60;
20
21/*
22 * Reaper
23 */
24static void key_gc_timer_func(unsigned long);
25static void key_garbage_collector(struct work_struct *);
26static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
27static DECLARE_WORK(key_gc_work, key_garbage_collector);
28static key_serial_t key_gc_cursor; /* the last key the gc considered */
29static unsigned long key_gc_executing;
30static time_t key_gc_next_run = LONG_MAX;
31
32/*
33 * Schedule a garbage collection run
34 * - precision isn't particularly important
35 */
36void key_schedule_gc(time_t gc_at)
37{
38 unsigned long expires;
39 time_t now = current_kernel_time().tv_sec;
40
41 kenter("%ld", gc_at - now);
42
43 gc_at += key_gc_delay;
44
45 if (now >= gc_at) {
46 schedule_work(&key_gc_work);
47 } else if (gc_at < key_gc_next_run) {
48 expires = jiffies + (gc_at - now) * HZ;
49 mod_timer(&key_gc_timer, expires);
50 }
51}
52
53/*
54 * The garbage collector timer kicked off
55 */
56static void key_gc_timer_func(unsigned long data)
57{
58 kenter("");
59 key_gc_next_run = LONG_MAX;
60 schedule_work(&key_gc_work);
61}
62
63/*
64 * Garbage collect pointers from a keyring
65 * - return true if we altered the keyring
66 */
67static bool key_gc_keyring(struct key *keyring, time_t limit)
68 __releases(key_serial_lock)
69{
70 struct keyring_list *klist;
71 struct key *key;
72 int loop;
73
74 kenter("%x", key_serial(keyring));
75
76 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
77 goto dont_gc;
78
79 /* scan the keyring looking for dead keys */
80 klist = rcu_dereference(keyring->payload.subscriptions);
81 if (!klist)
82 goto dont_gc;
83
84 for (loop = klist->nkeys - 1; loop >= 0; loop--) {
85 key = klist->keys[loop];
86 if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
87 (key->expiry > 0 && key->expiry <= limit))
88 goto do_gc;
89 }
90
91dont_gc:
92 kleave(" = false");
93 return false;
94
95do_gc:
96 key_gc_cursor = keyring->serial;
97 key_get(keyring);
98 spin_unlock(&key_serial_lock);
99 keyring_gc(keyring, limit);
100 key_put(keyring);
101 kleave(" = true");
102 return true;
103}
104
105/*
106 * Garbage collector for keys
107 * - this involves scanning the keyrings for dead, expired and revoked keys
108 * that have overstayed their welcome
109 */
110static void key_garbage_collector(struct work_struct *work)
111{
112 struct rb_node *rb;
113 key_serial_t cursor;
114 struct key *key, *xkey;
115 time_t new_timer = LONG_MAX, limit;
116
117 kenter("");
118
119 if (test_and_set_bit(0, &key_gc_executing)) {
120 key_schedule_gc(current_kernel_time().tv_sec);
121 return;
122 }
123
124 limit = current_kernel_time().tv_sec;
125 if (limit > key_gc_delay)
126 limit -= key_gc_delay;
127 else
128 limit = key_gc_delay;
129
130 spin_lock(&key_serial_lock);
131
132 if (RB_EMPTY_ROOT(&key_serial_tree))
133 goto reached_the_end;
134
135 cursor = key_gc_cursor;
136 if (cursor < 0)
137 cursor = 0;
138
139 /* find the first key above the cursor */
140 key = NULL;
141 rb = key_serial_tree.rb_node;
142 while (rb) {
143 xkey = rb_entry(rb, struct key, serial_node);
144 if (cursor < xkey->serial) {
145 key = xkey;
146 rb = rb->rb_left;
147 } else if (cursor > xkey->serial) {
148 rb = rb->rb_right;
149 } else {
150 rb = rb_next(rb);
151 if (!rb)
152 goto reached_the_end;
153 key = rb_entry(rb, struct key, serial_node);
154 break;
155 }
156 }
157
158 if (!key)
159 goto reached_the_end;
160
161 /* trawl through the keys looking for keyrings */
162 for (;;) {
163 if (key->expiry > 0 && key->expiry < new_timer)
164 new_timer = key->expiry;
165
166 if (key->type == &key_type_keyring &&
167 key_gc_keyring(key, limit)) {
168 /* the gc ate our lock */
169 schedule_work(&key_gc_work);
170 goto no_unlock;
171 }
172
173 rb = rb_next(&key->serial_node);
174 if (!rb) {
175 key_gc_cursor = 0;
176 break;
177 }
178 key = rb_entry(rb, struct key, serial_node);
179 }
180
181out:
182 spin_unlock(&key_serial_lock);
183no_unlock:
184 clear_bit(0, &key_gc_executing);
185 if (new_timer < LONG_MAX)
186 key_schedule_gc(new_timer);
187
188 kleave("");
189 return;
190
191reached_the_end:
192 key_gc_cursor = 0;
193 goto out;
194}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 9fb679c66b8a..24ba0307b7ad 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -124,11 +124,18 @@ extern struct key *request_key_and_link(struct key_type *type,
124 struct key *dest_keyring, 124 struct key *dest_keyring,
125 unsigned long flags); 125 unsigned long flags);
126 126
127extern key_ref_t lookup_user_key(key_serial_t id, int create, int partial, 127extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
128 key_perm_t perm); 128 key_perm_t perm);
129#define KEY_LOOKUP_CREATE 0x01
130#define KEY_LOOKUP_PARTIAL 0x02
131#define KEY_LOOKUP_FOR_UNLINK 0x04
129 132
130extern long join_session_keyring(const char *name); 133extern long join_session_keyring(const char *name);
131 134
135extern unsigned key_gc_delay;
136extern void keyring_gc(struct key *keyring, time_t limit);
137extern void key_schedule_gc(time_t expiry_at);
138
132/* 139/*
133 * check to see whether permission is granted to use a key in the desired way 140 * check to see whether permission is granted to use a key in the desired way
134 */ 141 */
@@ -194,6 +201,7 @@ extern long keyctl_set_timeout(key_serial_t, unsigned);
194extern long keyctl_assume_authority(key_serial_t); 201extern long keyctl_assume_authority(key_serial_t);
195extern long keyctl_get_security(key_serial_t keyid, char __user *buffer, 202extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
196 size_t buflen); 203 size_t buflen);
204extern long keyctl_session_to_parent(void);
197 205
198/* 206/*
199 * debugging key validation 207 * debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 4a1297d1ada4..08531ad0f252 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -500,6 +500,7 @@ int key_negate_and_link(struct key *key,
500 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 500 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
501 now = current_kernel_time(); 501 now = current_kernel_time();
502 key->expiry = now.tv_sec + timeout; 502 key->expiry = now.tv_sec + timeout;
503 key_schedule_gc(key->expiry);
503 504
504 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 505 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
505 awaken = 1; 506 awaken = 1;
@@ -642,10 +643,8 @@ struct key *key_lookup(key_serial_t id)
642 goto error; 643 goto error;
643 644
644 found: 645 found:
645 /* pretend it doesn't exist if it's dead */ 646 /* pretend it doesn't exist if it is awaiting deletion */
646 if (atomic_read(&key->usage) == 0 || 647 if (atomic_read(&key->usage) == 0)
647 test_bit(KEY_FLAG_DEAD, &key->flags) ||
648 key->type == &key_type_dead)
649 goto not_found; 648 goto not_found;
650 649
651 /* this races with key_put(), but that doesn't matter since key_put() 650 /* this races with key_put(), but that doesn't matter since key_put()
@@ -890,6 +889,9 @@ EXPORT_SYMBOL(key_update);
890 */ 889 */
891void key_revoke(struct key *key) 890void key_revoke(struct key *key)
892{ 891{
892 struct timespec now;
893 time_t time;
894
893 key_check(key); 895 key_check(key);
894 896
895 /* make sure no one's trying to change or use the key when we mark it 897 /* make sure no one's trying to change or use the key when we mark it
@@ -902,6 +904,14 @@ void key_revoke(struct key *key)
902 key->type->revoke) 904 key->type->revoke)
903 key->type->revoke(key); 905 key->type->revoke(key);
904 906
907 /* set the death time to no more than the expiry time */
908 now = current_kernel_time();
909 time = now.tv_sec;
910 if (key->revoked_at == 0 || key->revoked_at > time) {
911 key->revoked_at = time;
912 key_schedule_gc(key->revoked_at);
913 }
914
905 up_write(&key->sem); 915 up_write(&key->sem);
906 916
907} /* end key_revoke() */ 917} /* end key_revoke() */
@@ -958,8 +968,10 @@ void unregister_key_type(struct key_type *ktype)
958 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 968 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
959 key = rb_entry(_n, struct key, serial_node); 969 key = rb_entry(_n, struct key, serial_node);
960 970
961 if (key->type == ktype) 971 if (key->type == ktype) {
962 key->type = &key_type_dead; 972 key->type = &key_type_dead;
973 set_bit(KEY_FLAG_DEAD, &key->flags);
974 }
963 } 975 }
964 976
965 spin_unlock(&key_serial_lock); 977 spin_unlock(&key_serial_lock);
@@ -984,6 +996,8 @@ void unregister_key_type(struct key_type *ktype)
984 spin_unlock(&key_serial_lock); 996 spin_unlock(&key_serial_lock);
985 up_write(&key_types_sem); 997 up_write(&key_types_sem);
986 998
999 key_schedule_gc(0);
1000
987} /* end unregister_key_type() */ 1001} /* end unregister_key_type() */
988 1002
989EXPORT_SYMBOL(unregister_key_type); 1003EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 7f09fb897d2b..74c968524592 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -103,7 +103,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
103 } 103 }
104 104
105 /* find the target keyring (which must be writable) */ 105 /* find the target keyring (which must be writable) */
106 keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE); 106 keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
107 if (IS_ERR(keyring_ref)) { 107 if (IS_ERR(keyring_ref)) {
108 ret = PTR_ERR(keyring_ref); 108 ret = PTR_ERR(keyring_ref);
109 goto error3; 109 goto error3;
@@ -185,7 +185,8 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
185 /* get the destination keyring if specified */ 185 /* get the destination keyring if specified */
186 dest_ref = NULL; 186 dest_ref = NULL;
187 if (destringid) { 187 if (destringid) {
188 dest_ref = lookup_user_key(destringid, 1, 0, KEY_WRITE); 188 dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
189 KEY_WRITE);
189 if (IS_ERR(dest_ref)) { 190 if (IS_ERR(dest_ref)) {
190 ret = PTR_ERR(dest_ref); 191 ret = PTR_ERR(dest_ref);
191 goto error3; 192 goto error3;
@@ -233,9 +234,11 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
233long keyctl_get_keyring_ID(key_serial_t id, int create) 234long keyctl_get_keyring_ID(key_serial_t id, int create)
234{ 235{
235 key_ref_t key_ref; 236 key_ref_t key_ref;
237 unsigned long lflags;
236 long ret; 238 long ret;
237 239
238 key_ref = lookup_user_key(id, create, 0, KEY_SEARCH); 240 lflags = create ? KEY_LOOKUP_CREATE : 0;
241 key_ref = lookup_user_key(id, lflags, KEY_SEARCH);
239 if (IS_ERR(key_ref)) { 242 if (IS_ERR(key_ref)) {
240 ret = PTR_ERR(key_ref); 243 ret = PTR_ERR(key_ref);
241 goto error; 244 goto error;
@@ -309,7 +312,7 @@ long keyctl_update_key(key_serial_t id,
309 } 312 }
310 313
311 /* find the target key (which must be writable) */ 314 /* find the target key (which must be writable) */
312 key_ref = lookup_user_key(id, 0, 0, KEY_WRITE); 315 key_ref = lookup_user_key(id, 0, KEY_WRITE);
313 if (IS_ERR(key_ref)) { 316 if (IS_ERR(key_ref)) {
314 ret = PTR_ERR(key_ref); 317 ret = PTR_ERR(key_ref);
315 goto error2; 318 goto error2;
@@ -337,10 +340,16 @@ long keyctl_revoke_key(key_serial_t id)
337 key_ref_t key_ref; 340 key_ref_t key_ref;
338 long ret; 341 long ret;
339 342
340 key_ref = lookup_user_key(id, 0, 0, KEY_WRITE); 343 key_ref = lookup_user_key(id, 0, KEY_WRITE);
341 if (IS_ERR(key_ref)) { 344 if (IS_ERR(key_ref)) {
342 ret = PTR_ERR(key_ref); 345 ret = PTR_ERR(key_ref);
343 goto error; 346 if (ret != -EACCES)
347 goto error;
348 key_ref = lookup_user_key(id, 0, KEY_SETATTR);
349 if (IS_ERR(key_ref)) {
350 ret = PTR_ERR(key_ref);
351 goto error;
352 }
344 } 353 }
345 354
346 key_revoke(key_ref_to_ptr(key_ref)); 355 key_revoke(key_ref_to_ptr(key_ref));
@@ -363,7 +372,7 @@ long keyctl_keyring_clear(key_serial_t ringid)
363 key_ref_t keyring_ref; 372 key_ref_t keyring_ref;
364 long ret; 373 long ret;
365 374
366 keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE); 375 keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
367 if (IS_ERR(keyring_ref)) { 376 if (IS_ERR(keyring_ref)) {
368 ret = PTR_ERR(keyring_ref); 377 ret = PTR_ERR(keyring_ref);
369 goto error; 378 goto error;
@@ -389,13 +398,13 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
389 key_ref_t keyring_ref, key_ref; 398 key_ref_t keyring_ref, key_ref;
390 long ret; 399 long ret;
391 400
392 keyring_ref = lookup_user_key(ringid, 1, 0, KEY_WRITE); 401 keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
393 if (IS_ERR(keyring_ref)) { 402 if (IS_ERR(keyring_ref)) {
394 ret = PTR_ERR(keyring_ref); 403 ret = PTR_ERR(keyring_ref);
395 goto error; 404 goto error;
396 } 405 }
397 406
398 key_ref = lookup_user_key(id, 1, 0, KEY_LINK); 407 key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_LINK);
399 if (IS_ERR(key_ref)) { 408 if (IS_ERR(key_ref)) {
400 ret = PTR_ERR(key_ref); 409 ret = PTR_ERR(key_ref);
401 goto error2; 410 goto error2;
@@ -423,13 +432,13 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
423 key_ref_t keyring_ref, key_ref; 432 key_ref_t keyring_ref, key_ref;
424 long ret; 433 long ret;
425 434
426 keyring_ref = lookup_user_key(ringid, 0, 0, KEY_WRITE); 435 keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE);
427 if (IS_ERR(keyring_ref)) { 436 if (IS_ERR(keyring_ref)) {
428 ret = PTR_ERR(keyring_ref); 437 ret = PTR_ERR(keyring_ref);
429 goto error; 438 goto error;
430 } 439 }
431 440
432 key_ref = lookup_user_key(id, 0, 0, 0); 441 key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
433 if (IS_ERR(key_ref)) { 442 if (IS_ERR(key_ref)) {
434 ret = PTR_ERR(key_ref); 443 ret = PTR_ERR(key_ref);
435 goto error2; 444 goto error2;
@@ -465,7 +474,7 @@ long keyctl_describe_key(key_serial_t keyid,
465 char *tmpbuf; 474 char *tmpbuf;
466 long ret; 475 long ret;
467 476
468 key_ref = lookup_user_key(keyid, 0, 1, KEY_VIEW); 477 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
469 if (IS_ERR(key_ref)) { 478 if (IS_ERR(key_ref)) {
470 /* viewing a key under construction is permitted if we have the 479 /* viewing a key under construction is permitted if we have the
471 * authorisation token handy */ 480 * authorisation token handy */
@@ -474,7 +483,8 @@ long keyctl_describe_key(key_serial_t keyid,
474 if (!IS_ERR(instkey)) { 483 if (!IS_ERR(instkey)) {
475 key_put(instkey); 484 key_put(instkey);
476 key_ref = lookup_user_key(keyid, 485 key_ref = lookup_user_key(keyid,
477 0, 1, 0); 486 KEY_LOOKUP_PARTIAL,
487 0);
478 if (!IS_ERR(key_ref)) 488 if (!IS_ERR(key_ref))
479 goto okay; 489 goto okay;
480 } 490 }
@@ -558,7 +568,7 @@ long keyctl_keyring_search(key_serial_t ringid,
558 } 568 }
559 569
560 /* get the keyring at which to begin the search */ 570 /* get the keyring at which to begin the search */
561 keyring_ref = lookup_user_key(ringid, 0, 0, KEY_SEARCH); 571 keyring_ref = lookup_user_key(ringid, 0, KEY_SEARCH);
562 if (IS_ERR(keyring_ref)) { 572 if (IS_ERR(keyring_ref)) {
563 ret = PTR_ERR(keyring_ref); 573 ret = PTR_ERR(keyring_ref);
564 goto error2; 574 goto error2;
@@ -567,7 +577,8 @@ long keyctl_keyring_search(key_serial_t ringid,
567 /* get the destination keyring if specified */ 577 /* get the destination keyring if specified */
568 dest_ref = NULL; 578 dest_ref = NULL;
569 if (destringid) { 579 if (destringid) {
570 dest_ref = lookup_user_key(destringid, 1, 0, KEY_WRITE); 580 dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
581 KEY_WRITE);
571 if (IS_ERR(dest_ref)) { 582 if (IS_ERR(dest_ref)) {
572 ret = PTR_ERR(dest_ref); 583 ret = PTR_ERR(dest_ref);
573 goto error3; 584 goto error3;
@@ -637,7 +648,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
637 long ret; 648 long ret;
638 649
639 /* find the key first */ 650 /* find the key first */
640 key_ref = lookup_user_key(keyid, 0, 0, 0); 651 key_ref = lookup_user_key(keyid, 0, 0);
641 if (IS_ERR(key_ref)) { 652 if (IS_ERR(key_ref)) {
642 ret = -ENOKEY; 653 ret = -ENOKEY;
643 goto error; 654 goto error;
@@ -700,7 +711,8 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
700 if (uid == (uid_t) -1 && gid == (gid_t) -1) 711 if (uid == (uid_t) -1 && gid == (gid_t) -1)
701 goto error; 712 goto error;
702 713
703 key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR); 714 key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
715 KEY_SETATTR);
704 if (IS_ERR(key_ref)) { 716 if (IS_ERR(key_ref)) {
705 ret = PTR_ERR(key_ref); 717 ret = PTR_ERR(key_ref);
706 goto error; 718 goto error;
@@ -805,7 +817,8 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
805 if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) 817 if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
806 goto error; 818 goto error;
807 819
808 key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR); 820 key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
821 KEY_SETATTR);
809 if (IS_ERR(key_ref)) { 822 if (IS_ERR(key_ref)) {
810 ret = PTR_ERR(key_ref); 823 ret = PTR_ERR(key_ref);
811 goto error; 824 goto error;
@@ -847,7 +860,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
847 860
848 /* if a specific keyring is nominated by ID, then use that */ 861 /* if a specific keyring is nominated by ID, then use that */
849 if (ringid > 0) { 862 if (ringid > 0) {
850 dkref = lookup_user_key(ringid, 1, 0, KEY_WRITE); 863 dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
851 if (IS_ERR(dkref)) 864 if (IS_ERR(dkref))
852 return PTR_ERR(dkref); 865 return PTR_ERR(dkref);
853 *_dest_keyring = key_ref_to_ptr(dkref); 866 *_dest_keyring = key_ref_to_ptr(dkref);
@@ -1083,7 +1096,8 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
1083 time_t expiry; 1096 time_t expiry;
1084 long ret; 1097 long ret;
1085 1098
1086 key_ref = lookup_user_key(id, 1, 1, KEY_SETATTR); 1099 key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
1100 KEY_SETATTR);
1087 if (IS_ERR(key_ref)) { 1101 if (IS_ERR(key_ref)) {
1088 ret = PTR_ERR(key_ref); 1102 ret = PTR_ERR(key_ref);
1089 goto error; 1103 goto error;
@@ -1101,6 +1115,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
1101 } 1115 }
1102 1116
1103 key->expiry = expiry; 1117 key->expiry = expiry;
1118 key_schedule_gc(key->expiry);
1104 1119
1105 up_write(&key->sem); 1120 up_write(&key->sem);
1106 key_put(key); 1121 key_put(key);
@@ -1170,7 +1185,7 @@ long keyctl_get_security(key_serial_t keyid,
1170 char *context; 1185 char *context;
1171 long ret; 1186 long ret;
1172 1187
1173 key_ref = lookup_user_key(keyid, 0, 1, KEY_VIEW); 1188 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
1174 if (IS_ERR(key_ref)) { 1189 if (IS_ERR(key_ref)) {
1175 if (PTR_ERR(key_ref) != -EACCES) 1190 if (PTR_ERR(key_ref) != -EACCES)
1176 return PTR_ERR(key_ref); 1191 return PTR_ERR(key_ref);
@@ -1182,7 +1197,7 @@ long keyctl_get_security(key_serial_t keyid,
1182 return PTR_ERR(key_ref); 1197 return PTR_ERR(key_ref);
1183 key_put(instkey); 1198 key_put(instkey);
1184 1199
1185 key_ref = lookup_user_key(keyid, 0, 1, 0); 1200 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
1186 if (IS_ERR(key_ref)) 1201 if (IS_ERR(key_ref))
1187 return PTR_ERR(key_ref); 1202 return PTR_ERR(key_ref);
1188 } 1203 }
@@ -1213,6 +1228,105 @@ long keyctl_get_security(key_serial_t keyid,
1213 return ret; 1228 return ret;
1214} 1229}
1215 1230
1231/*
1232 * attempt to install the calling process's session keyring on the process's
1233 * parent process
1234 * - the keyring must exist and must grant us LINK permission
1235 * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
1236 */
1237long keyctl_session_to_parent(void)
1238{
1239 struct task_struct *me, *parent;
1240 const struct cred *mycred, *pcred;
1241 struct cred *cred, *oldcred;
1242 key_ref_t keyring_r;
1243 int ret;
1244
1245 keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
1246 if (IS_ERR(keyring_r))
1247 return PTR_ERR(keyring_r);
1248
1249 /* our parent is going to need a new cred struct, a new tgcred struct
1250 * and new security data, so we allocate them here to prevent ENOMEM in
1251 * our parent */
1252 ret = -ENOMEM;
1253 cred = cred_alloc_blank();
1254 if (!cred)
1255 goto error_keyring;
1256
1257 cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
1258 keyring_r = NULL;
1259
1260 me = current;
1261 write_lock_irq(&tasklist_lock);
1262
1263 parent = me->real_parent;
1264 ret = -EPERM;
1265
1266 /* the parent mustn't be init and mustn't be a kernel thread */
1267 if (parent->pid <= 1 || !parent->mm)
1268 goto not_permitted;
1269
1270 /* the parent must be single threaded */
1271 if (atomic_read(&parent->signal->count) != 1)
1272 goto not_permitted;
1273
1274 /* the parent and the child must have different session keyrings or
1275 * there's no point */
1276 mycred = current_cred();
1277 pcred = __task_cred(parent);
1278 if (mycred == pcred ||
1279 mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
1280 goto already_same;
1281
1282 /* the parent must have the same effective ownership and mustn't be
1283 * SUID/SGID */
1284 if (pcred-> uid != mycred->euid ||
1285 pcred->euid != mycred->euid ||
1286 pcred->suid != mycred->euid ||
1287 pcred-> gid != mycred->egid ||
1288 pcred->egid != mycred->egid ||
1289 pcred->sgid != mycred->egid)
1290 goto not_permitted;
1291
1292 /* the keyrings must have the same UID */
1293 if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
1294 mycred->tgcred->session_keyring->uid != mycred->euid)
1295 goto not_permitted;
1296
1297 /* the LSM must permit the replacement of the parent's keyring with the
1298 * keyring from this process */
1299 ret = security_key_session_to_parent(mycred, pcred,
1300 key_ref_to_ptr(keyring_r));
1301 if (ret < 0)
1302 goto not_permitted;
1303
1304 /* if there's an already pending keyring replacement, then we replace
1305 * that */
1306 oldcred = parent->replacement_session_keyring;
1307
1308 /* the replacement session keyring is applied just prior to userspace
1309 * restarting */
1310 parent->replacement_session_keyring = cred;
1311 cred = NULL;
1312 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
1313
1314 write_unlock_irq(&tasklist_lock);
1315 if (oldcred)
1316 put_cred(oldcred);
1317 return 0;
1318
1319already_same:
1320 ret = 0;
1321not_permitted:
1322 put_cred(cred);
1323 return ret;
1324
1325error_keyring:
1326 key_ref_put(keyring_r);
1327 return ret;
1328}
1329
1216/*****************************************************************************/ 1330/*****************************************************************************/
1217/* 1331/*
1218 * the key control system call 1332 * the key control system call
@@ -1298,6 +1412,9 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
1298 (char __user *) arg3, 1412 (char __user *) arg3,
1299 (size_t) arg4); 1413 (size_t) arg4);
1300 1414
1415 case KEYCTL_SESSION_TO_PARENT:
1416 return keyctl_session_to_parent();
1417
1301 default: 1418 default:
1302 return -EOPNOTSUPP; 1419 return -EOPNOTSUPP;
1303 } 1420 }
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 3dba81c2eba3..ac977f661a79 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,3 +1000,88 @@ static void keyring_revoke(struct key *keyring)
1000 } 1000 }
1001 1001
1002} /* end keyring_revoke() */ 1002} /* end keyring_revoke() */
1003
1004/*
1005 * Determine whether a key is dead
1006 */
1007static bool key_is_dead(struct key *key, time_t limit)
1008{
1009 return test_bit(KEY_FLAG_DEAD, &key->flags) ||
1010 (key->expiry > 0 && key->expiry <= limit);
1011}
1012
1013/*
1014 * Collect garbage from the contents of a keyring
1015 */
1016void keyring_gc(struct key *keyring, time_t limit)
1017{
1018 struct keyring_list *klist, *new;
1019 struct key *key;
1020 int loop, keep, max;
1021
1022 kenter("%x", key_serial(keyring));
1023
1024 down_write(&keyring->sem);
1025
1026 klist = keyring->payload.subscriptions;
1027 if (!klist)
1028 goto just_return;
1029
1030 /* work out how many subscriptions we're keeping */
1031 keep = 0;
1032 for (loop = klist->nkeys - 1; loop >= 0; loop--)
1033 if (!key_is_dead(klist->keys[loop], limit));
1034 keep++;
1035
1036 if (keep == klist->nkeys)
1037 goto just_return;
1038
1039 /* allocate a new keyring payload */
1040 max = roundup(keep, 4);
1041 new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
1042 GFP_KERNEL);
1043 if (!new)
1044 goto just_return;
1045 new->maxkeys = max;
1046 new->nkeys = 0;
1047 new->delkey = 0;
1048
1049 /* install the live keys
1050 * - must take care as expired keys may be updated back to life
1051 */
1052 keep = 0;
1053 for (loop = klist->nkeys - 1; loop >= 0; loop--) {
1054 key = klist->keys[loop];
1055 if (!key_is_dead(key, limit)) {
1056 if (keep >= max)
1057 goto discard_new;
1058 new->keys[keep++] = key_get(key);
1059 }
1060 }
1061 new->nkeys = keep;
1062
1063 /* adjust the quota */
1064 key_payload_reserve(keyring,
1065 sizeof(struct keyring_list) +
1066 KEYQUOTA_LINK_BYTES * keep);
1067
1068 if (keep == 0) {
1069 rcu_assign_pointer(keyring->payload.subscriptions, NULL);
1070 kfree(new);
1071 } else {
1072 rcu_assign_pointer(keyring->payload.subscriptions, new);
1073 }
1074
1075 up_write(&keyring->sem);
1076
1077 call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
1078 kleave(" [yes]");
1079 return;
1080
1081discard_new:
1082 new->nkeys = keep;
1083 keyring_clear_rcu_disposal(&new->rcu);
1084just_return:
1085 up_write(&keyring->sem);
1086 kleave(" [no]");
1087}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 769f9bdfd2b3..9d01021ca0c8 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -91,59 +91,94 @@ __initcall(key_proc_init);
91 */ 91 */
92#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS 92#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
93 93
94static struct rb_node *__key_serial_next(struct rb_node *n) 94static struct rb_node *key_serial_next(struct rb_node *n)
95{ 95{
96 struct user_namespace *user_ns = current_user_ns();
97
98 n = rb_next(n);
96 while (n) { 99 while (n) {
97 struct key *key = rb_entry(n, struct key, serial_node); 100 struct key *key = rb_entry(n, struct key, serial_node);
98 if (key->user->user_ns == current_user_ns()) 101 if (key->user->user_ns == user_ns)
99 break; 102 break;
100 n = rb_next(n); 103 n = rb_next(n);
101 } 104 }
102 return n; 105 return n;
103} 106}
104 107
105static struct rb_node *key_serial_next(struct rb_node *n) 108static int proc_keys_open(struct inode *inode, struct file *file)
106{ 109{
107 return __key_serial_next(rb_next(n)); 110 return seq_open(file, &proc_keys_ops);
108} 111}
109 112
110static struct rb_node *key_serial_first(struct rb_root *r) 113static struct key *find_ge_key(key_serial_t id)
111{ 114{
112 struct rb_node *n = rb_first(r); 115 struct user_namespace *user_ns = current_user_ns();
113 return __key_serial_next(n); 116 struct rb_node *n = key_serial_tree.rb_node;
114} 117 struct key *minkey = NULL;
115 118
116static int proc_keys_open(struct inode *inode, struct file *file) 119 while (n) {
117{ 120 struct key *key = rb_entry(n, struct key, serial_node);
118 return seq_open(file, &proc_keys_ops); 121 if (id < key->serial) {
122 if (!minkey || minkey->serial > key->serial)
123 minkey = key;
124 n = n->rb_left;
125 } else if (id > key->serial) {
126 n = n->rb_right;
127 } else {
128 minkey = key;
129 break;
130 }
131 key = NULL;
132 }
119 133
134 if (!minkey)
135 return NULL;
136
137 for (;;) {
138 if (minkey->user->user_ns == user_ns)
139 return minkey;
140 n = rb_next(&minkey->serial_node);
141 if (!n)
142 return NULL;
143 minkey = rb_entry(n, struct key, serial_node);
144 }
120} 145}
121 146
122static void *proc_keys_start(struct seq_file *p, loff_t *_pos) 147static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
148 __acquires(key_serial_lock)
123{ 149{
124 struct rb_node *_p; 150 key_serial_t pos = *_pos;
125 loff_t pos = *_pos; 151 struct key *key;
126 152
127 spin_lock(&key_serial_lock); 153 spin_lock(&key_serial_lock);
128 154
129 _p = key_serial_first(&key_serial_tree); 155 if (*_pos > INT_MAX)
130 while (pos > 0 && _p) { 156 return NULL;
131 pos--; 157 key = find_ge_key(pos);
132 _p = key_serial_next(_p); 158 if (!key)
133 } 159 return NULL;
134 160 *_pos = key->serial;
135 return _p; 161 return &key->serial_node;
162}
136 163
164static inline key_serial_t key_node_serial(struct rb_node *n)
165{
166 struct key *key = rb_entry(n, struct key, serial_node);
167 return key->serial;
137} 168}
138 169
139static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) 170static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
140{ 171{
141 (*_pos)++; 172 struct rb_node *n;
142 return key_serial_next((struct rb_node *) v);
143 173
174 n = key_serial_next(v);
175 if (n)
176 *_pos = key_node_serial(n);
177 return n;
144} 178}
145 179
146static void proc_keys_stop(struct seq_file *p, void *v) 180static void proc_keys_stop(struct seq_file *p, void *v)
181 __releases(key_serial_lock)
147{ 182{
148 spin_unlock(&key_serial_lock); 183 spin_unlock(&key_serial_lock);
149} 184}
@@ -174,11 +209,9 @@ static int proc_keys_show(struct seq_file *m, void *v)
174 /* come up with a suitable timeout value */ 209 /* come up with a suitable timeout value */
175 if (key->expiry == 0) { 210 if (key->expiry == 0) {
176 memcpy(xbuf, "perm", 5); 211 memcpy(xbuf, "perm", 5);
177 } 212 } else if (now.tv_sec >= key->expiry) {
178 else if (now.tv_sec >= key->expiry) {
179 memcpy(xbuf, "expd", 5); 213 memcpy(xbuf, "expd", 5);
180 } 214 } else {
181 else {
182 timo = key->expiry - now.tv_sec; 215 timo = key->expiry - now.tv_sec;
183 216
184 if (timo < 60) 217 if (timo < 60)
@@ -218,9 +251,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
218 seq_putc(m, '\n'); 251 seq_putc(m, '\n');
219 252
220 rcu_read_unlock(); 253 rcu_read_unlock();
221
222 return 0; 254 return 0;
223
224} 255}
225 256
226#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */ 257#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
@@ -246,6 +277,7 @@ static struct rb_node *key_user_first(struct rb_root *r)
246 struct rb_node *n = rb_first(r); 277 struct rb_node *n = rb_first(r);
247 return __key_user_next(n); 278 return __key_user_next(n);
248} 279}
280
249/*****************************************************************************/ 281/*****************************************************************************/
250/* 282/*
251 * implement "/proc/key-users" to provides a list of the key users 283 * implement "/proc/key-users" to provides a list of the key users
@@ -253,10 +285,10 @@ static struct rb_node *key_user_first(struct rb_root *r)
253static int proc_key_users_open(struct inode *inode, struct file *file) 285static int proc_key_users_open(struct inode *inode, struct file *file)
254{ 286{
255 return seq_open(file, &proc_key_users_ops); 287 return seq_open(file, &proc_key_users_ops);
256
257} 288}
258 289
259static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) 290static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
291 __acquires(key_user_lock)
260{ 292{
261 struct rb_node *_p; 293 struct rb_node *_p;
262 loff_t pos = *_pos; 294 loff_t pos = *_pos;
@@ -270,17 +302,16 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
270 } 302 }
271 303
272 return _p; 304 return _p;
273
274} 305}
275 306
276static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) 307static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
277{ 308{
278 (*_pos)++; 309 (*_pos)++;
279 return key_user_next((struct rb_node *) v); 310 return key_user_next((struct rb_node *) v);
280
281} 311}
282 312
283static void proc_key_users_stop(struct seq_file *p, void *v) 313static void proc_key_users_stop(struct seq_file *p, void *v)
314 __releases(key_user_lock)
284{ 315{
285 spin_unlock(&key_user_lock); 316 spin_unlock(&key_user_lock);
286} 317}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 276d27882ce8..5c23afb31ece 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -17,6 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/security.h>
20#include <linux/user_namespace.h> 21#include <linux/user_namespace.h>
21#include <asm/uaccess.h> 22#include <asm/uaccess.h>
22#include "internal.h" 23#include "internal.h"
@@ -487,7 +488,7 @@ static int lookup_user_key_possessed(const struct key *key, const void *target)
487 * - don't create special keyrings unless so requested 488 * - don't create special keyrings unless so requested
488 * - partially constructed keys aren't found unless requested 489 * - partially constructed keys aren't found unless requested
489 */ 490 */
490key_ref_t lookup_user_key(key_serial_t id, int create, int partial, 491key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
491 key_perm_t perm) 492 key_perm_t perm)
492{ 493{
493 struct request_key_auth *rka; 494 struct request_key_auth *rka;
@@ -503,7 +504,7 @@ try_again:
503 switch (id) { 504 switch (id) {
504 case KEY_SPEC_THREAD_KEYRING: 505 case KEY_SPEC_THREAD_KEYRING:
505 if (!cred->thread_keyring) { 506 if (!cred->thread_keyring) {
506 if (!create) 507 if (!(lflags & KEY_LOOKUP_CREATE))
507 goto error; 508 goto error;
508 509
509 ret = install_thread_keyring(); 510 ret = install_thread_keyring();
@@ -521,7 +522,7 @@ try_again:
521 522
522 case KEY_SPEC_PROCESS_KEYRING: 523 case KEY_SPEC_PROCESS_KEYRING:
523 if (!cred->tgcred->process_keyring) { 524 if (!cred->tgcred->process_keyring) {
524 if (!create) 525 if (!(lflags & KEY_LOOKUP_CREATE))
525 goto error; 526 goto error;
526 527
527 ret = install_process_keyring(); 528 ret = install_process_keyring();
@@ -642,7 +643,14 @@ try_again:
642 break; 643 break;
643 } 644 }
644 645
645 if (!partial) { 646 /* unlink does not use the nominated key in any way, so can skip all
647 * the permission checks as it is only concerned with the keyring */
648 if (lflags & KEY_LOOKUP_FOR_UNLINK) {
649 ret = 0;
650 goto error;
651 }
652
653 if (!(lflags & KEY_LOOKUP_PARTIAL)) {
646 ret = wait_for_key_construction(key, true); 654 ret = wait_for_key_construction(key, true);
647 switch (ret) { 655 switch (ret) {
648 case -ERESTARTSYS: 656 case -ERESTARTSYS:
@@ -660,7 +668,8 @@ try_again:
660 } 668 }
661 669
662 ret = -EIO; 670 ret = -EIO;
663 if (!partial && !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 671 if (!(lflags & KEY_LOOKUP_PARTIAL) &&
672 !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
664 goto invalid_key; 673 goto invalid_key;
665 674
666 /* check the permissions */ 675 /* check the permissions */
@@ -702,7 +711,7 @@ long join_session_keyring(const char *name)
702 /* only permit this if there's a single thread in the thread group - 711 /* only permit this if there's a single thread in the thread group -
703 * this avoids us having to adjust the creds on all threads and risking 712 * this avoids us having to adjust the creds on all threads and risking
704 * ENOMEM */ 713 * ENOMEM */
705 if (!is_single_threaded(current)) 714 if (!current_is_single_threaded())
706 return -EMLINK; 715 return -EMLINK;
707 716
708 new = prepare_creds(); 717 new = prepare_creds();
@@ -760,3 +769,51 @@ error:
760 abort_creds(new); 769 abort_creds(new);
761 return ret; 770 return ret;
762} 771}
772
773/*
774 * Replace a process's session keyring when that process resumes userspace on
775 * behalf of one of its children
776 */
777void key_replace_session_keyring(void)
778{
779 const struct cred *old;
780 struct cred *new;
781
782 if (!current->replacement_session_keyring)
783 return;
784
785 write_lock_irq(&tasklist_lock);
786 new = current->replacement_session_keyring;
787 current->replacement_session_keyring = NULL;
788 write_unlock_irq(&tasklist_lock);
789
790 if (!new)
791 return;
792
793 old = current_cred();
794 new-> uid = old-> uid;
795 new-> euid = old-> euid;
796 new-> suid = old-> suid;
797 new->fsuid = old->fsuid;
798 new-> gid = old-> gid;
799 new-> egid = old-> egid;
800 new-> sgid = old-> sgid;
801 new->fsgid = old->fsgid;
802 new->user = get_uid(old->user);
803 new->group_info = get_group_info(old->group_info);
804
805 new->securebits = old->securebits;
806 new->cap_inheritable = old->cap_inheritable;
807 new->cap_permitted = old->cap_permitted;
808 new->cap_effective = old->cap_effective;
809 new->cap_bset = old->cap_bset;
810
811 new->jit_keyring = old->jit_keyring;
812 new->thread_keyring = key_get(old->thread_keyring);
813 new->tgcred->tgid = old->tgcred->tgid;
814 new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
815
816 security_transfer_creds(new, old);
817
818 commit_creds(new);
819}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index b611d493c2d8..5e05dc09e2db 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -13,6 +13,8 @@
13#include <linux/sysctl.h> 13#include <linux/sysctl.h>
14#include "internal.h" 14#include "internal.h"
15 15
16static const int zero, one = 1, max = INT_MAX;
17
16ctl_table key_sysctls[] = { 18ctl_table key_sysctls[] = {
17 { 19 {
18 .ctl_name = CTL_UNNUMBERED, 20 .ctl_name = CTL_UNNUMBERED,
@@ -20,7 +22,9 @@ ctl_table key_sysctls[] = {
20 .data = &key_quota_maxkeys, 22 .data = &key_quota_maxkeys,
21 .maxlen = sizeof(unsigned), 23 .maxlen = sizeof(unsigned),
22 .mode = 0644, 24 .mode = 0644,
23 .proc_handler = &proc_dointvec, 25 .proc_handler = &proc_dointvec_minmax,
26 .extra1 = (void *) &one,
27 .extra2 = (void *) &max,
24 }, 28 },
25 { 29 {
26 .ctl_name = CTL_UNNUMBERED, 30 .ctl_name = CTL_UNNUMBERED,
@@ -28,7 +32,9 @@ ctl_table key_sysctls[] = {
28 .data = &key_quota_maxbytes, 32 .data = &key_quota_maxbytes,
29 .maxlen = sizeof(unsigned), 33 .maxlen = sizeof(unsigned),
30 .mode = 0644, 34 .mode = 0644,
31 .proc_handler = &proc_dointvec, 35 .proc_handler = &proc_dointvec_minmax,
36 .extra1 = (void *) &one,
37 .extra2 = (void *) &max,
32 }, 38 },
33 { 39 {
34 .ctl_name = CTL_UNNUMBERED, 40 .ctl_name = CTL_UNNUMBERED,
@@ -36,7 +42,9 @@ ctl_table key_sysctls[] = {
36 .data = &key_quota_root_maxkeys, 42 .data = &key_quota_root_maxkeys,
37 .maxlen = sizeof(unsigned), 43 .maxlen = sizeof(unsigned),
38 .mode = 0644, 44 .mode = 0644,
39 .proc_handler = &proc_dointvec, 45 .proc_handler = &proc_dointvec_minmax,
46 .extra1 = (void *) &one,
47 .extra2 = (void *) &max,
40 }, 48 },
41 { 49 {
42 .ctl_name = CTL_UNNUMBERED, 50 .ctl_name = CTL_UNNUMBERED,
@@ -44,7 +52,19 @@ ctl_table key_sysctls[] = {
44 .data = &key_quota_root_maxbytes, 52 .data = &key_quota_root_maxbytes,
45 .maxlen = sizeof(unsigned), 53 .maxlen = sizeof(unsigned),
46 .mode = 0644, 54 .mode = 0644,
47 .proc_handler = &proc_dointvec, 55 .proc_handler = &proc_dointvec_minmax,
56 .extra1 = (void *) &one,
57 .extra2 = (void *) &max,
58 },
59 {
60 .ctl_name = CTL_UNNUMBERED,
61 .procname = "gc_delay",
62 .data = &key_gc_delay,
63 .maxlen = sizeof(unsigned),
64 .mode = 0644,
65 .proc_handler = &proc_dointvec_minmax,
66 .extra1 = (void *) &zero,
67 .extra2 = (void *) &max,
48 }, 68 },
49 { .ctl_name = 0 } 69 { .ctl_name = 0 }
50}; 70};
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 94b868494b31..500aad0ebd6a 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -220,6 +220,8 @@ static void dump_common_audit_data(struct audit_buffer *ab,
220 } 220 }
221 221
222 switch (a->type) { 222 switch (a->type) {
223 case LSM_AUDIT_NO_AUDIT:
224 return;
223 case LSM_AUDIT_DATA_IPC: 225 case LSM_AUDIT_DATA_IPC:
224 audit_log_format(ab, " key=%d ", a->u.ipc_id); 226 audit_log_format(ab, " key=%d ", a->u.ipc_id);
225 break; 227 break;
diff --git a/security/security.c b/security/security.c
index dc7674fbfc7a..c4c673240c1c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -124,9 +124,9 @@ int register_security(struct security_operations *ops)
124 124
125/* Security operations */ 125/* Security operations */
126 126
127int security_ptrace_may_access(struct task_struct *child, unsigned int mode) 127int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
128{ 128{
129 return security_ops->ptrace_may_access(child, mode); 129 return security_ops->ptrace_access_check(child, mode);
130} 130}
131 131
132int security_ptrace_traceme(struct task_struct *parent) 132int security_ptrace_traceme(struct task_struct *parent)
@@ -684,6 +684,11 @@ int security_task_create(unsigned long clone_flags)
684 return security_ops->task_create(clone_flags); 684 return security_ops->task_create(clone_flags);
685} 685}
686 686
687int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
688{
689 return security_ops->cred_alloc_blank(cred, gfp);
690}
691
687void security_cred_free(struct cred *cred) 692void security_cred_free(struct cred *cred)
688{ 693{
689 security_ops->cred_free(cred); 694 security_ops->cred_free(cred);
@@ -699,6 +704,11 @@ void security_commit_creds(struct cred *new, const struct cred *old)
699 security_ops->cred_commit(new, old); 704 security_ops->cred_commit(new, old);
700} 705}
701 706
707void security_transfer_creds(struct cred *new, const struct cred *old)
708{
709 security_ops->cred_transfer(new, old);
710}
711
702int security_kernel_act_as(struct cred *new, u32 secid) 712int security_kernel_act_as(struct cred *new, u32 secid)
703{ 713{
704 return security_ops->kernel_act_as(new, secid); 714 return security_ops->kernel_act_as(new, secid);
@@ -709,6 +719,11 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode)
709 return security_ops->kernel_create_files_as(new, inode); 719 return security_ops->kernel_create_files_as(new, inode);
710} 720}
711 721
722int security_kernel_module_request(void)
723{
724 return security_ops->kernel_module_request();
725}
726
712int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) 727int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
713{ 728{
714 return security_ops->task_setuid(id0, id1, id2, flags); 729 return security_ops->task_setuid(id0, id1, id2, flags);
@@ -959,6 +974,24 @@ void security_release_secctx(char *secdata, u32 seclen)
959} 974}
960EXPORT_SYMBOL(security_release_secctx); 975EXPORT_SYMBOL(security_release_secctx);
961 976
977int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
978{
979 return security_ops->inode_notifysecctx(inode, ctx, ctxlen);
980}
981EXPORT_SYMBOL(security_inode_notifysecctx);
982
983int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
984{
985 return security_ops->inode_setsecctx(dentry, ctx, ctxlen);
986}
987EXPORT_SYMBOL(security_inode_setsecctx);
988
989int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
990{
991 return security_ops->inode_getsecctx(inode, ctx, ctxlen);
992}
993EXPORT_SYMBOL(security_inode_getsecctx);
994
962#ifdef CONFIG_SECURITY_NETWORK 995#ifdef CONFIG_SECURITY_NETWORK
963 996
964int security_unix_stream_connect(struct socket *sock, struct socket *other, 997int security_unix_stream_connect(struct socket *sock, struct socket *other,
@@ -1112,6 +1145,24 @@ void security_inet_conn_established(struct sock *sk,
1112 security_ops->inet_conn_established(sk, skb); 1145 security_ops->inet_conn_established(sk, skb);
1113} 1146}
1114 1147
1148int security_tun_dev_create(void)
1149{
1150 return security_ops->tun_dev_create();
1151}
1152EXPORT_SYMBOL(security_tun_dev_create);
1153
1154void security_tun_dev_post_create(struct sock *sk)
1155{
1156 return security_ops->tun_dev_post_create(sk);
1157}
1158EXPORT_SYMBOL(security_tun_dev_post_create);
1159
1160int security_tun_dev_attach(struct sock *sk)
1161{
1162 return security_ops->tun_dev_attach(sk);
1163}
1164EXPORT_SYMBOL(security_tun_dev_attach);
1165
1115#endif /* CONFIG_SECURITY_NETWORK */ 1166#endif /* CONFIG_SECURITY_NETWORK */
1116 1167
1117#ifdef CONFIG_SECURITY_NETWORK_XFRM 1168#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1218,6 +1269,13 @@ int security_key_getsecurity(struct key *key, char **_buffer)
1218 return security_ops->key_getsecurity(key, _buffer); 1269 return security_ops->key_getsecurity(key, _buffer);
1219} 1270}
1220 1271
1272int security_key_session_to_parent(const struct cred *cred,
1273 const struct cred *parent_cred,
1274 struct key *key)
1275{
1276 return security_ops->key_session_to_parent(cred, parent_cred, key);
1277}
1278
1221#endif /* CONFIG_KEYS */ 1279#endif /* CONFIG_KEYS */
1222 1280
1223#ifdef CONFIG_AUDIT 1281#ifdef CONFIG_AUDIT
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b2ab60859832..e3d19014259b 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -137,7 +137,7 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
137 * @tclass: target security class 137 * @tclass: target security class
138 * @av: access vector 138 * @av: access vector
139 */ 139 */
140void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) 140static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
141{ 141{
142 const char **common_pts = NULL; 142 const char **common_pts = NULL;
143 u32 common_base = 0; 143 u32 common_base = 0;
@@ -492,23 +492,35 @@ out:
492 return node; 492 return node;
493} 493}
494 494
495static inline void avc_print_ipv6_addr(struct audit_buffer *ab, 495/**
496 struct in6_addr *addr, __be16 port, 496 * avc_audit_pre_callback - SELinux specific information
497 char *name1, char *name2) 497 * will be called by generic audit code
498 * @ab: the audit buffer
499 * @a: audit_data
500 */
501static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
498{ 502{
499 if (!ipv6_addr_any(addr)) 503 struct common_audit_data *ad = a;
500 audit_log_format(ab, " %s=%pI6", name1, addr); 504 audit_log_format(ab, "avc: %s ",
501 if (port) 505 ad->selinux_audit_data.denied ? "denied" : "granted");
502 audit_log_format(ab, " %s=%d", name2, ntohs(port)); 506 avc_dump_av(ab, ad->selinux_audit_data.tclass,
507 ad->selinux_audit_data.audited);
508 audit_log_format(ab, " for ");
503} 509}
504 510
505static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr, 511/**
506 __be16 port, char *name1, char *name2) 512 * avc_audit_post_callback - SELinux specific information
513 * will be called by generic audit code
514 * @ab: the audit buffer
515 * @a: audit_data
516 */
517static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
507{ 518{
508 if (addr) 519 struct common_audit_data *ad = a;
509 audit_log_format(ab, " %s=%pI4", name1, &addr); 520 audit_log_format(ab, " ");
510 if (port) 521 avc_dump_query(ab, ad->selinux_audit_data.ssid,
511 audit_log_format(ab, " %s=%d", name2, ntohs(port)); 522 ad->selinux_audit_data.tsid,
523 ad->selinux_audit_data.tclass);
512} 524}
513 525
514/** 526/**
@@ -532,13 +544,10 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
532 */ 544 */
533void avc_audit(u32 ssid, u32 tsid, 545void avc_audit(u32 ssid, u32 tsid,
534 u16 tclass, u32 requested, 546 u16 tclass, u32 requested,
535 struct av_decision *avd, int result, struct avc_audit_data *a) 547 struct av_decision *avd, int result, struct common_audit_data *a)
536{ 548{
537 struct task_struct *tsk = current; 549 struct common_audit_data stack_data;
538 struct inode *inode = NULL;
539 u32 denied, audited; 550 u32 denied, audited;
540 struct audit_buffer *ab;
541
542 denied = requested & ~avd->allowed; 551 denied = requested & ~avd->allowed;
543 if (denied) { 552 if (denied) {
544 audited = denied; 553 audited = denied;
@@ -551,144 +560,20 @@ void avc_audit(u32 ssid, u32 tsid,
551 if (!(audited & avd->auditallow)) 560 if (!(audited & avd->auditallow))
552 return; 561 return;
553 } 562 }
554 563 if (!a) {
555 ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); 564 a = &stack_data;
556 if (!ab) 565 memset(a, 0, sizeof(*a));
557 return; /* audit_panic has been called */ 566 a->type = LSM_AUDIT_NO_AUDIT;
558 audit_log_format(ab, "avc: %s ", denied ? "denied" : "granted");
559 avc_dump_av(ab, tclass, audited);
560 audit_log_format(ab, " for ");
561 if (a && a->tsk)
562 tsk = a->tsk;
563 if (tsk && tsk->pid) {
564 audit_log_format(ab, " pid=%d comm=", tsk->pid);
565 audit_log_untrustedstring(ab, tsk->comm);
566 } 567 }
567 if (a) { 568 a->selinux_audit_data.tclass = tclass;
568 switch (a->type) { 569 a->selinux_audit_data.requested = requested;
569 case AVC_AUDIT_DATA_IPC: 570 a->selinux_audit_data.ssid = ssid;
570 audit_log_format(ab, " key=%d", a->u.ipc_id); 571 a->selinux_audit_data.tsid = tsid;
571 break; 572 a->selinux_audit_data.audited = audited;
572 case AVC_AUDIT_DATA_CAP: 573 a->selinux_audit_data.denied = denied;
573 audit_log_format(ab, " capability=%d", a->u.cap); 574 a->lsm_pre_audit = avc_audit_pre_callback;
574 break; 575 a->lsm_post_audit = avc_audit_post_callback;
575 case AVC_AUDIT_DATA_FS: 576 common_lsm_audit(a);
576 if (a->u.fs.path.dentry) {
577 struct dentry *dentry = a->u.fs.path.dentry;
578 if (a->u.fs.path.mnt) {
579 audit_log_d_path(ab, "path=",
580 &a->u.fs.path);
581 } else {
582 audit_log_format(ab, " name=");
583 audit_log_untrustedstring(ab, dentry->d_name.name);
584 }
585 inode = dentry->d_inode;
586 } else if (a->u.fs.inode) {
587 struct dentry *dentry;
588 inode = a->u.fs.inode;
589 dentry = d_find_alias(inode);
590 if (dentry) {
591 audit_log_format(ab, " name=");
592 audit_log_untrustedstring(ab, dentry->d_name.name);
593 dput(dentry);
594 }
595 }
596 if (inode)
597 audit_log_format(ab, " dev=%s ino=%lu",
598 inode->i_sb->s_id,
599 inode->i_ino);
600 break;
601 case AVC_AUDIT_DATA_NET:
602 if (a->u.net.sk) {
603 struct sock *sk = a->u.net.sk;
604 struct unix_sock *u;
605 int len = 0;
606 char *p = NULL;
607
608 switch (sk->sk_family) {
609 case AF_INET: {
610 struct inet_sock *inet = inet_sk(sk);
611
612 avc_print_ipv4_addr(ab, inet->rcv_saddr,
613 inet->sport,
614 "laddr", "lport");
615 avc_print_ipv4_addr(ab, inet->daddr,
616 inet->dport,
617 "faddr", "fport");
618 break;
619 }
620 case AF_INET6: {
621 struct inet_sock *inet = inet_sk(sk);
622 struct ipv6_pinfo *inet6 = inet6_sk(sk);
623
624 avc_print_ipv6_addr(ab, &inet6->rcv_saddr,
625 inet->sport,
626 "laddr", "lport");
627 avc_print_ipv6_addr(ab, &inet6->daddr,
628 inet->dport,
629 "faddr", "fport");
630 break;
631 }
632 case AF_UNIX:
633 u = unix_sk(sk);
634 if (u->dentry) {
635 struct path path = {
636 .dentry = u->dentry,
637 .mnt = u->mnt
638 };
639 audit_log_d_path(ab, "path=",
640 &path);
641 break;
642 }
643 if (!u->addr)
644 break;
645 len = u->addr->len-sizeof(short);
646 p = &u->addr->name->sun_path[0];
647 audit_log_format(ab, " path=");
648 if (*p)
649 audit_log_untrustedstring(ab, p);
650 else
651 audit_log_n_hex(ab, p, len);
652 break;
653 }
654 }
655
656 switch (a->u.net.family) {
657 case AF_INET:
658 avc_print_ipv4_addr(ab, a->u.net.v4info.saddr,
659 a->u.net.sport,
660 "saddr", "src");
661 avc_print_ipv4_addr(ab, a->u.net.v4info.daddr,
662 a->u.net.dport,
663 "daddr", "dest");
664 break;
665 case AF_INET6:
666 avc_print_ipv6_addr(ab, &a->u.net.v6info.saddr,
667 a->u.net.sport,
668 "saddr", "src");
669 avc_print_ipv6_addr(ab, &a->u.net.v6info.daddr,
670 a->u.net.dport,
671 "daddr", "dest");
672 break;
673 }
674 if (a->u.net.netif > 0) {
675 struct net_device *dev;
676
677 /* NOTE: we always use init's namespace */
678 dev = dev_get_by_index(&init_net,
679 a->u.net.netif);
680 if (dev) {
681 audit_log_format(ab, " netif=%s",
682 dev->name);
683 dev_put(dev);
684 }
685 }
686 break;
687 }
688 }
689 audit_log_format(ab, " ");
690 avc_dump_query(ab, ssid, tsid, tclass);
691 audit_log_end(ab);
692} 577}
693 578
694/** 579/**
@@ -956,7 +841,7 @@ out:
956 * another -errno upon other errors. 841 * another -errno upon other errors.
957 */ 842 */
958int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, 843int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
959 u32 requested, struct avc_audit_data *auditdata) 844 u32 requested, struct common_audit_data *auditdata)
960{ 845{
961 struct av_decision avd; 846 struct av_decision avd;
962 int rc; 847 int rc;
@@ -970,3 +855,9 @@ u32 avc_policy_seqno(void)
970{ 855{
971 return avc_cache.latest_notif; 856 return avc_cache.latest_notif;
972} 857}
858
859void avc_disable(void)
860{
861 if (avc_node_cachep)
862 kmem_cache_destroy(avc_node_cachep);
863}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8d8b69c5664e..417f7c994522 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -13,8 +13,8 @@
13 * Eric Paris <eparis@redhat.com> 13 * Eric Paris <eparis@redhat.com>
14 * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. 14 * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
15 * <dgoeddel@trustedcs.com> 15 * <dgoeddel@trustedcs.com>
16 * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P. 16 * Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
17 * Paul Moore <paul.moore@hp.com> 17 * Paul Moore <paul.moore@hp.com>
18 * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd. 18 * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
19 * Yuichi Nakamura <ynakam@hitachisoft.jp> 19 * Yuichi Nakamura <ynakam@hitachisoft.jp>
20 * 20 *
@@ -448,6 +448,10 @@ static int sb_finish_set_opts(struct super_block *sb)
448 sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) 448 sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
449 sbsec->flags &= ~SE_SBLABELSUPP; 449 sbsec->flags &= ~SE_SBLABELSUPP;
450 450
451 /* Special handling for sysfs. Is genfs but also has setxattr handler*/
452 if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
453 sbsec->flags |= SE_SBLABELSUPP;
454
451 /* Initialize the root inode. */ 455 /* Initialize the root inode. */
452 rc = inode_doinit_with_dentry(root_inode, root); 456 rc = inode_doinit_with_dentry(root_inode, root);
453 457
@@ -1479,14 +1483,14 @@ static int task_has_capability(struct task_struct *tsk,
1479 const struct cred *cred, 1483 const struct cred *cred,
1480 int cap, int audit) 1484 int cap, int audit)
1481{ 1485{
1482 struct avc_audit_data ad; 1486 struct common_audit_data ad;
1483 struct av_decision avd; 1487 struct av_decision avd;
1484 u16 sclass; 1488 u16 sclass;
1485 u32 sid = cred_sid(cred); 1489 u32 sid = cred_sid(cred);
1486 u32 av = CAP_TO_MASK(cap); 1490 u32 av = CAP_TO_MASK(cap);
1487 int rc; 1491 int rc;
1488 1492
1489 AVC_AUDIT_DATA_INIT(&ad, CAP); 1493 COMMON_AUDIT_DATA_INIT(&ad, CAP);
1490 ad.tsk = tsk; 1494 ad.tsk = tsk;
1491 ad.u.cap = cap; 1495 ad.u.cap = cap;
1492 1496
@@ -1525,12 +1529,14 @@ static int task_has_system(struct task_struct *tsk,
1525static int inode_has_perm(const struct cred *cred, 1529static int inode_has_perm(const struct cred *cred,
1526 struct inode *inode, 1530 struct inode *inode,
1527 u32 perms, 1531 u32 perms,
1528 struct avc_audit_data *adp) 1532 struct common_audit_data *adp)
1529{ 1533{
1530 struct inode_security_struct *isec; 1534 struct inode_security_struct *isec;
1531 struct avc_audit_data ad; 1535 struct common_audit_data ad;
1532 u32 sid; 1536 u32 sid;
1533 1537
1538 validate_creds(cred);
1539
1534 if (unlikely(IS_PRIVATE(inode))) 1540 if (unlikely(IS_PRIVATE(inode)))
1535 return 0; 1541 return 0;
1536 1542
@@ -1539,7 +1545,7 @@ static int inode_has_perm(const struct cred *cred,
1539 1545
1540 if (!adp) { 1546 if (!adp) {
1541 adp = &ad; 1547 adp = &ad;
1542 AVC_AUDIT_DATA_INIT(&ad, FS); 1548 COMMON_AUDIT_DATA_INIT(&ad, FS);
1543 ad.u.fs.inode = inode; 1549 ad.u.fs.inode = inode;
1544 } 1550 }
1545 1551
@@ -1555,9 +1561,9 @@ static inline int dentry_has_perm(const struct cred *cred,
1555 u32 av) 1561 u32 av)
1556{ 1562{
1557 struct inode *inode = dentry->d_inode; 1563 struct inode *inode = dentry->d_inode;
1558 struct avc_audit_data ad; 1564 struct common_audit_data ad;
1559 1565
1560 AVC_AUDIT_DATA_INIT(&ad, FS); 1566 COMMON_AUDIT_DATA_INIT(&ad, FS);
1561 ad.u.fs.path.mnt = mnt; 1567 ad.u.fs.path.mnt = mnt;
1562 ad.u.fs.path.dentry = dentry; 1568 ad.u.fs.path.dentry = dentry;
1563 return inode_has_perm(cred, inode, av, &ad); 1569 return inode_has_perm(cred, inode, av, &ad);
@@ -1577,11 +1583,11 @@ static int file_has_perm(const struct cred *cred,
1577{ 1583{
1578 struct file_security_struct *fsec = file->f_security; 1584 struct file_security_struct *fsec = file->f_security;
1579 struct inode *inode = file->f_path.dentry->d_inode; 1585 struct inode *inode = file->f_path.dentry->d_inode;
1580 struct avc_audit_data ad; 1586 struct common_audit_data ad;
1581 u32 sid = cred_sid(cred); 1587 u32 sid = cred_sid(cred);
1582 int rc; 1588 int rc;
1583 1589
1584 AVC_AUDIT_DATA_INIT(&ad, FS); 1590 COMMON_AUDIT_DATA_INIT(&ad, FS);
1585 ad.u.fs.path = file->f_path; 1591 ad.u.fs.path = file->f_path;
1586 1592
1587 if (sid != fsec->sid) { 1593 if (sid != fsec->sid) {
@@ -1612,7 +1618,7 @@ static int may_create(struct inode *dir,
1612 struct inode_security_struct *dsec; 1618 struct inode_security_struct *dsec;
1613 struct superblock_security_struct *sbsec; 1619 struct superblock_security_struct *sbsec;
1614 u32 sid, newsid; 1620 u32 sid, newsid;
1615 struct avc_audit_data ad; 1621 struct common_audit_data ad;
1616 int rc; 1622 int rc;
1617 1623
1618 dsec = dir->i_security; 1624 dsec = dir->i_security;
@@ -1621,7 +1627,7 @@ static int may_create(struct inode *dir,
1621 sid = tsec->sid; 1627 sid = tsec->sid;
1622 newsid = tsec->create_sid; 1628 newsid = tsec->create_sid;
1623 1629
1624 AVC_AUDIT_DATA_INIT(&ad, FS); 1630 COMMON_AUDIT_DATA_INIT(&ad, FS);
1625 ad.u.fs.path.dentry = dentry; 1631 ad.u.fs.path.dentry = dentry;
1626 1632
1627 rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, 1633 rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
@@ -1665,7 +1671,7 @@ static int may_link(struct inode *dir,
1665 1671
1666{ 1672{
1667 struct inode_security_struct *dsec, *isec; 1673 struct inode_security_struct *dsec, *isec;
1668 struct avc_audit_data ad; 1674 struct common_audit_data ad;
1669 u32 sid = current_sid(); 1675 u32 sid = current_sid();
1670 u32 av; 1676 u32 av;
1671 int rc; 1677 int rc;
@@ -1673,7 +1679,7 @@ static int may_link(struct inode *dir,
1673 dsec = dir->i_security; 1679 dsec = dir->i_security;
1674 isec = dentry->d_inode->i_security; 1680 isec = dentry->d_inode->i_security;
1675 1681
1676 AVC_AUDIT_DATA_INIT(&ad, FS); 1682 COMMON_AUDIT_DATA_INIT(&ad, FS);
1677 ad.u.fs.path.dentry = dentry; 1683 ad.u.fs.path.dentry = dentry;
1678 1684
1679 av = DIR__SEARCH; 1685 av = DIR__SEARCH;
@@ -1708,7 +1714,7 @@ static inline int may_rename(struct inode *old_dir,
1708 struct dentry *new_dentry) 1714 struct dentry *new_dentry)
1709{ 1715{
1710 struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec; 1716 struct inode_security_struct *old_dsec, *new_dsec, *old_isec, *new_isec;
1711 struct avc_audit_data ad; 1717 struct common_audit_data ad;
1712 u32 sid = current_sid(); 1718 u32 sid = current_sid();
1713 u32 av; 1719 u32 av;
1714 int old_is_dir, new_is_dir; 1720 int old_is_dir, new_is_dir;
@@ -1719,7 +1725,7 @@ static inline int may_rename(struct inode *old_dir,
1719 old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); 1725 old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
1720 new_dsec = new_dir->i_security; 1726 new_dsec = new_dir->i_security;
1721 1727
1722 AVC_AUDIT_DATA_INIT(&ad, FS); 1728 COMMON_AUDIT_DATA_INIT(&ad, FS);
1723 1729
1724 ad.u.fs.path.dentry = old_dentry; 1730 ad.u.fs.path.dentry = old_dentry;
1725 rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR, 1731 rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
@@ -1761,7 +1767,7 @@ static inline int may_rename(struct inode *old_dir,
1761static int superblock_has_perm(const struct cred *cred, 1767static int superblock_has_perm(const struct cred *cred,
1762 struct super_block *sb, 1768 struct super_block *sb,
1763 u32 perms, 1769 u32 perms,
1764 struct avc_audit_data *ad) 1770 struct common_audit_data *ad)
1765{ 1771{
1766 struct superblock_security_struct *sbsec; 1772 struct superblock_security_struct *sbsec;
1767 u32 sid = cred_sid(cred); 1773 u32 sid = cred_sid(cred);
@@ -1855,12 +1861,12 @@ static inline u32 open_file_to_av(struct file *file)
1855 1861
1856/* Hook functions begin here. */ 1862/* Hook functions begin here. */
1857 1863
1858static int selinux_ptrace_may_access(struct task_struct *child, 1864static int selinux_ptrace_access_check(struct task_struct *child,
1859 unsigned int mode) 1865 unsigned int mode)
1860{ 1866{
1861 int rc; 1867 int rc;
1862 1868
1863 rc = cap_ptrace_may_access(child, mode); 1869 rc = cap_ptrace_access_check(child, mode);
1864 if (rc) 1870 if (rc)
1865 return rc; 1871 return rc;
1866 1872
@@ -2101,7 +2107,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
2101 const struct task_security_struct *old_tsec; 2107 const struct task_security_struct *old_tsec;
2102 struct task_security_struct *new_tsec; 2108 struct task_security_struct *new_tsec;
2103 struct inode_security_struct *isec; 2109 struct inode_security_struct *isec;
2104 struct avc_audit_data ad; 2110 struct common_audit_data ad;
2105 struct inode *inode = bprm->file->f_path.dentry->d_inode; 2111 struct inode *inode = bprm->file->f_path.dentry->d_inode;
2106 int rc; 2112 int rc;
2107 2113
@@ -2139,7 +2145,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
2139 return rc; 2145 return rc;
2140 } 2146 }
2141 2147
2142 AVC_AUDIT_DATA_INIT(&ad, FS); 2148 COMMON_AUDIT_DATA_INIT(&ad, FS);
2143 ad.u.fs.path = bprm->file->f_path; 2149 ad.u.fs.path = bprm->file->f_path;
2144 2150
2145 if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) 2151 if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
@@ -2232,7 +2238,7 @@ extern struct dentry *selinux_null;
2232static inline void flush_unauthorized_files(const struct cred *cred, 2238static inline void flush_unauthorized_files(const struct cred *cred,
2233 struct files_struct *files) 2239 struct files_struct *files)
2234{ 2240{
2235 struct avc_audit_data ad; 2241 struct common_audit_data ad;
2236 struct file *file, *devnull = NULL; 2242 struct file *file, *devnull = NULL;
2237 struct tty_struct *tty; 2243 struct tty_struct *tty;
2238 struct fdtable *fdt; 2244 struct fdtable *fdt;
@@ -2266,7 +2272,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2266 2272
2267 /* Revalidate access to inherited open files. */ 2273 /* Revalidate access to inherited open files. */
2268 2274
2269 AVC_AUDIT_DATA_INIT(&ad, FS); 2275 COMMON_AUDIT_DATA_INIT(&ad, FS);
2270 2276
2271 spin_lock(&files->file_lock); 2277 spin_lock(&files->file_lock);
2272 for (;;) { 2278 for (;;) {
@@ -2515,7 +2521,7 @@ out:
2515static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) 2521static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
2516{ 2522{
2517 const struct cred *cred = current_cred(); 2523 const struct cred *cred = current_cred();
2518 struct avc_audit_data ad; 2524 struct common_audit_data ad;
2519 int rc; 2525 int rc;
2520 2526
2521 rc = superblock_doinit(sb, data); 2527 rc = superblock_doinit(sb, data);
@@ -2526,7 +2532,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
2526 if (flags & MS_KERNMOUNT) 2532 if (flags & MS_KERNMOUNT)
2527 return 0; 2533 return 0;
2528 2534
2529 AVC_AUDIT_DATA_INIT(&ad, FS); 2535 COMMON_AUDIT_DATA_INIT(&ad, FS);
2530 ad.u.fs.path.dentry = sb->s_root; 2536 ad.u.fs.path.dentry = sb->s_root;
2531 return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad); 2537 return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
2532} 2538}
@@ -2534,9 +2540,9 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
2534static int selinux_sb_statfs(struct dentry *dentry) 2540static int selinux_sb_statfs(struct dentry *dentry)
2535{ 2541{
2536 const struct cred *cred = current_cred(); 2542 const struct cred *cred = current_cred();
2537 struct avc_audit_data ad; 2543 struct common_audit_data ad;
2538 2544
2539 AVC_AUDIT_DATA_INIT(&ad, FS); 2545 COMMON_AUDIT_DATA_INIT(&ad, FS);
2540 ad.u.fs.path.dentry = dentry->d_sb->s_root; 2546 ad.u.fs.path.dentry = dentry->d_sb->s_root;
2541 return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad); 2547 return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
2542} 2548}
@@ -2711,12 +2717,18 @@ static int selinux_inode_permission(struct inode *inode, int mask)
2711static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) 2717static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
2712{ 2718{
2713 const struct cred *cred = current_cred(); 2719 const struct cred *cred = current_cred();
2720 unsigned int ia_valid = iattr->ia_valid;
2721
2722 /* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
2723 if (ia_valid & ATTR_FORCE) {
2724 ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE |
2725 ATTR_FORCE);
2726 if (!ia_valid)
2727 return 0;
2728 }
2714 2729
2715 if (iattr->ia_valid & ATTR_FORCE) 2730 if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
2716 return 0; 2731 ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
2717
2718 if (iattr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
2719 ATTR_ATIME_SET | ATTR_MTIME_SET))
2720 return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); 2732 return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR);
2721 2733
2722 return dentry_has_perm(cred, NULL, dentry, FILE__WRITE); 2734 return dentry_has_perm(cred, NULL, dentry, FILE__WRITE);
@@ -2756,7 +2768,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
2756 struct inode *inode = dentry->d_inode; 2768 struct inode *inode = dentry->d_inode;
2757 struct inode_security_struct *isec = inode->i_security; 2769 struct inode_security_struct *isec = inode->i_security;
2758 struct superblock_security_struct *sbsec; 2770 struct superblock_security_struct *sbsec;
2759 struct avc_audit_data ad; 2771 struct common_audit_data ad;
2760 u32 newsid, sid = current_sid(); 2772 u32 newsid, sid = current_sid();
2761 int rc = 0; 2773 int rc = 0;
2762 2774
@@ -2770,7 +2782,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
2770 if (!is_owner_or_cap(inode)) 2782 if (!is_owner_or_cap(inode))
2771 return -EPERM; 2783 return -EPERM;
2772 2784
2773 AVC_AUDIT_DATA_INIT(&ad, FS); 2785 COMMON_AUDIT_DATA_INIT(&ad, FS);
2774 ad.u.fs.path.dentry = dentry; 2786 ad.u.fs.path.dentry = dentry;
2775 2787
2776 rc = avc_has_perm(sid, isec->sid, isec->sclass, 2788 rc = avc_has_perm(sid, isec->sid, isec->sclass,
@@ -2915,6 +2927,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
2915 return rc; 2927 return rc;
2916 2928
2917 isec->sid = newsid; 2929 isec->sid = newsid;
2930 isec->initialized = 1;
2918 return 0; 2931 return 0;
2919} 2932}
2920 2933
@@ -2939,11 +2952,6 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
2939 const struct cred *cred = current_cred(); 2952 const struct cred *cred = current_cred();
2940 struct inode *inode = file->f_path.dentry->d_inode; 2953 struct inode *inode = file->f_path.dentry->d_inode;
2941 2954
2942 if (!mask) {
2943 /* No permission to check. Existence test. */
2944 return 0;
2945 }
2946
2947 /* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */ 2955 /* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
2948 if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE)) 2956 if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
2949 mask |= MAY_APPEND; 2957 mask |= MAY_APPEND;
@@ -2954,10 +2962,20 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
2954 2962
2955static int selinux_file_permission(struct file *file, int mask) 2963static int selinux_file_permission(struct file *file, int mask)
2956{ 2964{
2965 struct inode *inode = file->f_path.dentry->d_inode;
2966 struct file_security_struct *fsec = file->f_security;
2967 struct inode_security_struct *isec = inode->i_security;
2968 u32 sid = current_sid();
2969
2957 if (!mask) 2970 if (!mask)
2958 /* No permission to check. Existence test. */ 2971 /* No permission to check. Existence test. */
2959 return 0; 2972 return 0;
2960 2973
2974 if (sid == fsec->sid && fsec->isid == isec->sid &&
2975 fsec->pseqno == avc_policy_seqno())
2976 /* No change since dentry_open check. */
2977 return 0;
2978
2961 return selinux_revalidate_file_permission(file, mask); 2979 return selinux_revalidate_file_permission(file, mask);
2962} 2980}
2963 2981
@@ -3220,12 +3238,29 @@ static int selinux_task_create(unsigned long clone_flags)
3220} 3238}
3221 3239
3222/* 3240/*
3241 * allocate the SELinux part of blank credentials
3242 */
3243static int selinux_cred_alloc_blank(struct cred *cred, gfp_t gfp)
3244{
3245 struct task_security_struct *tsec;
3246
3247 tsec = kzalloc(sizeof(struct task_security_struct), gfp);
3248 if (!tsec)
3249 return -ENOMEM;
3250
3251 cred->security = tsec;
3252 return 0;
3253}
3254
3255/*
3223 * detach and free the LSM part of a set of credentials 3256 * detach and free the LSM part of a set of credentials
3224 */ 3257 */
3225static void selinux_cred_free(struct cred *cred) 3258static void selinux_cred_free(struct cred *cred)
3226{ 3259{
3227 struct task_security_struct *tsec = cred->security; 3260 struct task_security_struct *tsec = cred->security;
3228 cred->security = NULL; 3261
3262 BUG_ON((unsigned long) cred->security < PAGE_SIZE);
3263 cred->security = (void *) 0x7UL;
3229 kfree(tsec); 3264 kfree(tsec);
3230} 3265}
3231 3266
@@ -3249,6 +3284,17 @@ static int selinux_cred_prepare(struct cred *new, const struct cred *old,
3249} 3284}
3250 3285
3251/* 3286/*
3287 * transfer the SELinux data to a blank set of creds
3288 */
3289static void selinux_cred_transfer(struct cred *new, const struct cred *old)
3290{
3291 const struct task_security_struct *old_tsec = old->security;
3292 struct task_security_struct *tsec = new->security;
3293
3294 *tsec = *old_tsec;
3295}
3296
3297/*
3252 * set the security data for a kernel service 3298 * set the security data for a kernel service
3253 * - all the creation contexts are set to unlabelled 3299 * - all the creation contexts are set to unlabelled
3254 */ 3300 */
@@ -3292,6 +3338,11 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
3292 return 0; 3338 return 0;
3293} 3339}
3294 3340
3341static int selinux_kernel_module_request(void)
3342{
3343 return task_has_system(current, SYSTEM__MODULE_REQUEST);
3344}
3345
3295static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) 3346static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
3296{ 3347{
3297 return current_has_perm(p, PROCESS__SETPGID); 3348 return current_has_perm(p, PROCESS__SETPGID);
@@ -3409,7 +3460,7 @@ static void selinux_task_to_inode(struct task_struct *p,
3409 3460
3410/* Returns error only if unable to parse addresses */ 3461/* Returns error only if unable to parse addresses */
3411static int selinux_parse_skb_ipv4(struct sk_buff *skb, 3462static int selinux_parse_skb_ipv4(struct sk_buff *skb,
3412 struct avc_audit_data *ad, u8 *proto) 3463 struct common_audit_data *ad, u8 *proto)
3413{ 3464{
3414 int offset, ihlen, ret = -EINVAL; 3465 int offset, ihlen, ret = -EINVAL;
3415 struct iphdr _iph, *ih; 3466 struct iphdr _iph, *ih;
@@ -3490,7 +3541,7 @@ out:
3490 3541
3491/* Returns error only if unable to parse addresses */ 3542/* Returns error only if unable to parse addresses */
3492static int selinux_parse_skb_ipv6(struct sk_buff *skb, 3543static int selinux_parse_skb_ipv6(struct sk_buff *skb,
3493 struct avc_audit_data *ad, u8 *proto) 3544 struct common_audit_data *ad, u8 *proto)
3494{ 3545{
3495 u8 nexthdr; 3546 u8 nexthdr;
3496 int ret = -EINVAL, offset; 3547 int ret = -EINVAL, offset;
@@ -3561,7 +3612,7 @@ out:
3561 3612
3562#endif /* IPV6 */ 3613#endif /* IPV6 */
3563 3614
3564static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad, 3615static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
3565 char **_addrp, int src, u8 *proto) 3616 char **_addrp, int src, u8 *proto)
3566{ 3617{
3567 char *addrp; 3618 char *addrp;
@@ -3643,7 +3694,7 @@ static int socket_has_perm(struct task_struct *task, struct socket *sock,
3643 u32 perms) 3694 u32 perms)
3644{ 3695{
3645 struct inode_security_struct *isec; 3696 struct inode_security_struct *isec;
3646 struct avc_audit_data ad; 3697 struct common_audit_data ad;
3647 u32 sid; 3698 u32 sid;
3648 int err = 0; 3699 int err = 0;
3649 3700
@@ -3653,7 +3704,7 @@ static int socket_has_perm(struct task_struct *task, struct socket *sock,
3653 goto out; 3704 goto out;
3654 sid = task_sid(task); 3705 sid = task_sid(task);
3655 3706
3656 AVC_AUDIT_DATA_INIT(&ad, NET); 3707 COMMON_AUDIT_DATA_INIT(&ad, NET);
3657 ad.u.net.sk = sock->sk; 3708 ad.u.net.sk = sock->sk;
3658 err = avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad); 3709 err = avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
3659 3710
@@ -3740,7 +3791,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
3740 if (family == PF_INET || family == PF_INET6) { 3791 if (family == PF_INET || family == PF_INET6) {
3741 char *addrp; 3792 char *addrp;
3742 struct inode_security_struct *isec; 3793 struct inode_security_struct *isec;
3743 struct avc_audit_data ad; 3794 struct common_audit_data ad;
3744 struct sockaddr_in *addr4 = NULL; 3795 struct sockaddr_in *addr4 = NULL;
3745 struct sockaddr_in6 *addr6 = NULL; 3796 struct sockaddr_in6 *addr6 = NULL;
3746 unsigned short snum; 3797 unsigned short snum;
@@ -3769,7 +3820,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
3769 snum, &sid); 3820 snum, &sid);
3770 if (err) 3821 if (err)
3771 goto out; 3822 goto out;
3772 AVC_AUDIT_DATA_INIT(&ad, NET); 3823 COMMON_AUDIT_DATA_INIT(&ad, NET);
3773 ad.u.net.sport = htons(snum); 3824 ad.u.net.sport = htons(snum);
3774 ad.u.net.family = family; 3825 ad.u.net.family = family;
3775 err = avc_has_perm(isec->sid, sid, 3826 err = avc_has_perm(isec->sid, sid,
@@ -3802,7 +3853,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
3802 if (err) 3853 if (err)
3803 goto out; 3854 goto out;
3804 3855
3805 AVC_AUDIT_DATA_INIT(&ad, NET); 3856 COMMON_AUDIT_DATA_INIT(&ad, NET);
3806 ad.u.net.sport = htons(snum); 3857 ad.u.net.sport = htons(snum);
3807 ad.u.net.family = family; 3858 ad.u.net.family = family;
3808 3859
@@ -3836,7 +3887,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
3836 isec = SOCK_INODE(sock)->i_security; 3887 isec = SOCK_INODE(sock)->i_security;
3837 if (isec->sclass == SECCLASS_TCP_SOCKET || 3888 if (isec->sclass == SECCLASS_TCP_SOCKET ||
3838 isec->sclass == SECCLASS_DCCP_SOCKET) { 3889 isec->sclass == SECCLASS_DCCP_SOCKET) {
3839 struct avc_audit_data ad; 3890 struct common_audit_data ad;
3840 struct sockaddr_in *addr4 = NULL; 3891 struct sockaddr_in *addr4 = NULL;
3841 struct sockaddr_in6 *addr6 = NULL; 3892 struct sockaddr_in6 *addr6 = NULL;
3842 unsigned short snum; 3893 unsigned short snum;
@@ -3861,7 +3912,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
3861 perm = (isec->sclass == SECCLASS_TCP_SOCKET) ? 3912 perm = (isec->sclass == SECCLASS_TCP_SOCKET) ?
3862 TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT; 3913 TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
3863 3914
3864 AVC_AUDIT_DATA_INIT(&ad, NET); 3915 COMMON_AUDIT_DATA_INIT(&ad, NET);
3865 ad.u.net.dport = htons(snum); 3916 ad.u.net.dport = htons(snum);
3866 ad.u.net.family = sk->sk_family; 3917 ad.u.net.family = sk->sk_family;
3867 err = avc_has_perm(isec->sid, sid, isec->sclass, perm, &ad); 3918 err = avc_has_perm(isec->sid, sid, isec->sclass, perm, &ad);
@@ -3951,13 +4002,13 @@ static int selinux_socket_unix_stream_connect(struct socket *sock,
3951 struct sk_security_struct *ssec; 4002 struct sk_security_struct *ssec;
3952 struct inode_security_struct *isec; 4003 struct inode_security_struct *isec;
3953 struct inode_security_struct *other_isec; 4004 struct inode_security_struct *other_isec;
3954 struct avc_audit_data ad; 4005 struct common_audit_data ad;
3955 int err; 4006 int err;
3956 4007
3957 isec = SOCK_INODE(sock)->i_security; 4008 isec = SOCK_INODE(sock)->i_security;
3958 other_isec = SOCK_INODE(other)->i_security; 4009 other_isec = SOCK_INODE(other)->i_security;
3959 4010
3960 AVC_AUDIT_DATA_INIT(&ad, NET); 4011 COMMON_AUDIT_DATA_INIT(&ad, NET);
3961 ad.u.net.sk = other->sk; 4012 ad.u.net.sk = other->sk;
3962 4013
3963 err = avc_has_perm(isec->sid, other_isec->sid, 4014 err = avc_has_perm(isec->sid, other_isec->sid,
@@ -3983,13 +4034,13 @@ static int selinux_socket_unix_may_send(struct socket *sock,
3983{ 4034{
3984 struct inode_security_struct *isec; 4035 struct inode_security_struct *isec;
3985 struct inode_security_struct *other_isec; 4036 struct inode_security_struct *other_isec;
3986 struct avc_audit_data ad; 4037 struct common_audit_data ad;
3987 int err; 4038 int err;
3988 4039
3989 isec = SOCK_INODE(sock)->i_security; 4040 isec = SOCK_INODE(sock)->i_security;
3990 other_isec = SOCK_INODE(other)->i_security; 4041 other_isec = SOCK_INODE(other)->i_security;
3991 4042
3992 AVC_AUDIT_DATA_INIT(&ad, NET); 4043 COMMON_AUDIT_DATA_INIT(&ad, NET);
3993 ad.u.net.sk = other->sk; 4044 ad.u.net.sk = other->sk;
3994 4045
3995 err = avc_has_perm(isec->sid, other_isec->sid, 4046 err = avc_has_perm(isec->sid, other_isec->sid,
@@ -4002,7 +4053,7 @@ static int selinux_socket_unix_may_send(struct socket *sock,
4002 4053
4003static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family, 4054static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
4004 u32 peer_sid, 4055 u32 peer_sid,
4005 struct avc_audit_data *ad) 4056 struct common_audit_data *ad)
4006{ 4057{
4007 int err; 4058 int err;
4008 u32 if_sid; 4059 u32 if_sid;
@@ -4030,10 +4081,10 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
4030 struct sk_security_struct *sksec = sk->sk_security; 4081 struct sk_security_struct *sksec = sk->sk_security;
4031 u32 peer_sid; 4082 u32 peer_sid;
4032 u32 sk_sid = sksec->sid; 4083 u32 sk_sid = sksec->sid;
4033 struct avc_audit_data ad; 4084 struct common_audit_data ad;
4034 char *addrp; 4085 char *addrp;
4035 4086
4036 AVC_AUDIT_DATA_INIT(&ad, NET); 4087 COMMON_AUDIT_DATA_INIT(&ad, NET);
4037 ad.u.net.netif = skb->iif; 4088 ad.u.net.netif = skb->iif;
4038 ad.u.net.family = family; 4089 ad.u.net.family = family;
4039 err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL); 4090 err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
@@ -4071,7 +4122,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4071 struct sk_security_struct *sksec = sk->sk_security; 4122 struct sk_security_struct *sksec = sk->sk_security;
4072 u16 family = sk->sk_family; 4123 u16 family = sk->sk_family;
4073 u32 sk_sid = sksec->sid; 4124 u32 sk_sid = sksec->sid;
4074 struct avc_audit_data ad; 4125 struct common_audit_data ad;
4075 char *addrp; 4126 char *addrp;
4076 u8 secmark_active; 4127 u8 secmark_active;
4077 u8 peerlbl_active; 4128 u8 peerlbl_active;
@@ -4095,7 +4146,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4095 if (!secmark_active && !peerlbl_active) 4146 if (!secmark_active && !peerlbl_active)
4096 return 0; 4147 return 0;
4097 4148
4098 AVC_AUDIT_DATA_INIT(&ad, NET); 4149 COMMON_AUDIT_DATA_INIT(&ad, NET);
4099 ad.u.net.netif = skb->iif; 4150 ad.u.net.netif = skb->iif;
4100 ad.u.net.family = family; 4151 ad.u.net.family = family;
4101 err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL); 4152 err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
@@ -4309,6 +4360,59 @@ static void selinux_req_classify_flow(const struct request_sock *req,
4309 fl->secid = req->secid; 4360 fl->secid = req->secid;
4310} 4361}
4311 4362
4363static int selinux_tun_dev_create(void)
4364{
4365 u32 sid = current_sid();
4366
4367 /* we aren't taking into account the "sockcreate" SID since the socket
4368 * that is being created here is not a socket in the traditional sense,
4369 * instead it is a private sock, accessible only to the kernel, and
4370 * representing a wide range of network traffic spanning multiple
4371 * connections unlike traditional sockets - check the TUN driver to
4372 * get a better understanding of why this socket is special */
4373
4374 return avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__CREATE,
4375 NULL);
4376}
4377
4378static void selinux_tun_dev_post_create(struct sock *sk)
4379{
4380 struct sk_security_struct *sksec = sk->sk_security;
4381
4382 /* we don't currently perform any NetLabel based labeling here and it
4383 * isn't clear that we would want to do so anyway; while we could apply
4384 * labeling without the support of the TUN user the resulting labeled
4385 * traffic from the other end of the connection would almost certainly
4386 * cause confusion to the TUN user that had no idea network labeling
4387 * protocols were being used */
4388
4389 /* see the comments in selinux_tun_dev_create() about why we don't use
4390 * the sockcreate SID here */
4391
4392 sksec->sid = current_sid();
4393 sksec->sclass = SECCLASS_TUN_SOCKET;
4394}
4395
4396static int selinux_tun_dev_attach(struct sock *sk)
4397{
4398 struct sk_security_struct *sksec = sk->sk_security;
4399 u32 sid = current_sid();
4400 int err;
4401
4402 err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
4403 TUN_SOCKET__RELABELFROM, NULL);
4404 if (err)
4405 return err;
4406 err = avc_has_perm(sid, sid, SECCLASS_TUN_SOCKET,
4407 TUN_SOCKET__RELABELTO, NULL);
4408 if (err)
4409 return err;
4410
4411 sksec->sid = sid;
4412
4413 return 0;
4414}
4415
4312static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) 4416static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
4313{ 4417{
4314 int err = 0; 4418 int err = 0;
@@ -4353,7 +4457,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
4353 int err; 4457 int err;
4354 char *addrp; 4458 char *addrp;
4355 u32 peer_sid; 4459 u32 peer_sid;
4356 struct avc_audit_data ad; 4460 struct common_audit_data ad;
4357 u8 secmark_active; 4461 u8 secmark_active;
4358 u8 netlbl_active; 4462 u8 netlbl_active;
4359 u8 peerlbl_active; 4463 u8 peerlbl_active;
@@ -4370,7 +4474,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
4370 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0) 4474 if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
4371 return NF_DROP; 4475 return NF_DROP;
4372 4476
4373 AVC_AUDIT_DATA_INIT(&ad, NET); 4477 COMMON_AUDIT_DATA_INIT(&ad, NET);
4374 ad.u.net.netif = ifindex; 4478 ad.u.net.netif = ifindex;
4375 ad.u.net.family = family; 4479 ad.u.net.family = family;
4376 if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0) 4480 if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
@@ -4458,7 +4562,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
4458{ 4562{
4459 struct sock *sk = skb->sk; 4563 struct sock *sk = skb->sk;
4460 struct sk_security_struct *sksec; 4564 struct sk_security_struct *sksec;
4461 struct avc_audit_data ad; 4565 struct common_audit_data ad;
4462 char *addrp; 4566 char *addrp;
4463 u8 proto; 4567 u8 proto;
4464 4568
@@ -4466,7 +4570,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
4466 return NF_ACCEPT; 4570 return NF_ACCEPT;
4467 sksec = sk->sk_security; 4571 sksec = sk->sk_security;
4468 4572
4469 AVC_AUDIT_DATA_INIT(&ad, NET); 4573 COMMON_AUDIT_DATA_INIT(&ad, NET);
4470 ad.u.net.netif = ifindex; 4574 ad.u.net.netif = ifindex;
4471 ad.u.net.family = family; 4575 ad.u.net.family = family;
4472 if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto)) 4576 if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
@@ -4490,7 +4594,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4490 u32 secmark_perm; 4594 u32 secmark_perm;
4491 u32 peer_sid; 4595 u32 peer_sid;
4492 struct sock *sk; 4596 struct sock *sk;
4493 struct avc_audit_data ad; 4597 struct common_audit_data ad;
4494 char *addrp; 4598 char *addrp;
4495 u8 secmark_active; 4599 u8 secmark_active;
4496 u8 peerlbl_active; 4600 u8 peerlbl_active;
@@ -4549,7 +4653,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4549 secmark_perm = PACKET__SEND; 4653 secmark_perm = PACKET__SEND;
4550 } 4654 }
4551 4655
4552 AVC_AUDIT_DATA_INIT(&ad, NET); 4656 COMMON_AUDIT_DATA_INIT(&ad, NET);
4553 ad.u.net.netif = ifindex; 4657 ad.u.net.netif = ifindex;
4554 ad.u.net.family = family; 4658 ad.u.net.family = family;
4555 if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL)) 4659 if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
@@ -4619,13 +4723,13 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
4619static int selinux_netlink_recv(struct sk_buff *skb, int capability) 4723static int selinux_netlink_recv(struct sk_buff *skb, int capability)
4620{ 4724{
4621 int err; 4725 int err;
4622 struct avc_audit_data ad; 4726 struct common_audit_data ad;
4623 4727
4624 err = cap_netlink_recv(skb, capability); 4728 err = cap_netlink_recv(skb, capability);
4625 if (err) 4729 if (err)
4626 return err; 4730 return err;
4627 4731
4628 AVC_AUDIT_DATA_INIT(&ad, CAP); 4732 COMMON_AUDIT_DATA_INIT(&ad, CAP);
4629 ad.u.cap = capability; 4733 ad.u.cap = capability;
4630 4734
4631 return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid, 4735 return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
@@ -4684,12 +4788,12 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
4684 u32 perms) 4788 u32 perms)
4685{ 4789{
4686 struct ipc_security_struct *isec; 4790 struct ipc_security_struct *isec;
4687 struct avc_audit_data ad; 4791 struct common_audit_data ad;
4688 u32 sid = current_sid(); 4792 u32 sid = current_sid();
4689 4793
4690 isec = ipc_perms->security; 4794 isec = ipc_perms->security;
4691 4795
4692 AVC_AUDIT_DATA_INIT(&ad, IPC); 4796 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4693 ad.u.ipc_id = ipc_perms->key; 4797 ad.u.ipc_id = ipc_perms->key;
4694 4798
4695 return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad); 4799 return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4709,7 +4813,7 @@ static void selinux_msg_msg_free_security(struct msg_msg *msg)
4709static int selinux_msg_queue_alloc_security(struct msg_queue *msq) 4813static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
4710{ 4814{
4711 struct ipc_security_struct *isec; 4815 struct ipc_security_struct *isec;
4712 struct avc_audit_data ad; 4816 struct common_audit_data ad;
4713 u32 sid = current_sid(); 4817 u32 sid = current_sid();
4714 int rc; 4818 int rc;
4715 4819
@@ -4719,7 +4823,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
4719 4823
4720 isec = msq->q_perm.security; 4824 isec = msq->q_perm.security;
4721 4825
4722 AVC_AUDIT_DATA_INIT(&ad, IPC); 4826 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4723 ad.u.ipc_id = msq->q_perm.key; 4827 ad.u.ipc_id = msq->q_perm.key;
4724 4828
4725 rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ, 4829 rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4739,12 +4843,12 @@ static void selinux_msg_queue_free_security(struct msg_queue *msq)
4739static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg) 4843static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
4740{ 4844{
4741 struct ipc_security_struct *isec; 4845 struct ipc_security_struct *isec;
4742 struct avc_audit_data ad; 4846 struct common_audit_data ad;
4743 u32 sid = current_sid(); 4847 u32 sid = current_sid();
4744 4848
4745 isec = msq->q_perm.security; 4849 isec = msq->q_perm.security;
4746 4850
4747 AVC_AUDIT_DATA_INIT(&ad, IPC); 4851 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4748 ad.u.ipc_id = msq->q_perm.key; 4852 ad.u.ipc_id = msq->q_perm.key;
4749 4853
4750 return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ, 4854 return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4783,7 +4887,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
4783{ 4887{
4784 struct ipc_security_struct *isec; 4888 struct ipc_security_struct *isec;
4785 struct msg_security_struct *msec; 4889 struct msg_security_struct *msec;
4786 struct avc_audit_data ad; 4890 struct common_audit_data ad;
4787 u32 sid = current_sid(); 4891 u32 sid = current_sid();
4788 int rc; 4892 int rc;
4789 4893
@@ -4804,7 +4908,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
4804 return rc; 4908 return rc;
4805 } 4909 }
4806 4910
4807 AVC_AUDIT_DATA_INIT(&ad, IPC); 4911 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4808 ad.u.ipc_id = msq->q_perm.key; 4912 ad.u.ipc_id = msq->q_perm.key;
4809 4913
4810 /* Can this process write to the queue? */ 4914 /* Can this process write to the queue? */
@@ -4828,14 +4932,14 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
4828{ 4932{
4829 struct ipc_security_struct *isec; 4933 struct ipc_security_struct *isec;
4830 struct msg_security_struct *msec; 4934 struct msg_security_struct *msec;
4831 struct avc_audit_data ad; 4935 struct common_audit_data ad;
4832 u32 sid = task_sid(target); 4936 u32 sid = task_sid(target);
4833 int rc; 4937 int rc;
4834 4938
4835 isec = msq->q_perm.security; 4939 isec = msq->q_perm.security;
4836 msec = msg->security; 4940 msec = msg->security;
4837 4941
4838 AVC_AUDIT_DATA_INIT(&ad, IPC); 4942 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4839 ad.u.ipc_id = msq->q_perm.key; 4943 ad.u.ipc_id = msq->q_perm.key;
4840 4944
4841 rc = avc_has_perm(sid, isec->sid, 4945 rc = avc_has_perm(sid, isec->sid,
@@ -4850,7 +4954,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
4850static int selinux_shm_alloc_security(struct shmid_kernel *shp) 4954static int selinux_shm_alloc_security(struct shmid_kernel *shp)
4851{ 4955{
4852 struct ipc_security_struct *isec; 4956 struct ipc_security_struct *isec;
4853 struct avc_audit_data ad; 4957 struct common_audit_data ad;
4854 u32 sid = current_sid(); 4958 u32 sid = current_sid();
4855 int rc; 4959 int rc;
4856 4960
@@ -4860,7 +4964,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
4860 4964
4861 isec = shp->shm_perm.security; 4965 isec = shp->shm_perm.security;
4862 4966
4863 AVC_AUDIT_DATA_INIT(&ad, IPC); 4967 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4864 ad.u.ipc_id = shp->shm_perm.key; 4968 ad.u.ipc_id = shp->shm_perm.key;
4865 4969
4866 rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM, 4970 rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4880,12 +4984,12 @@ static void selinux_shm_free_security(struct shmid_kernel *shp)
4880static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg) 4984static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
4881{ 4985{
4882 struct ipc_security_struct *isec; 4986 struct ipc_security_struct *isec;
4883 struct avc_audit_data ad; 4987 struct common_audit_data ad;
4884 u32 sid = current_sid(); 4988 u32 sid = current_sid();
4885 4989
4886 isec = shp->shm_perm.security; 4990 isec = shp->shm_perm.security;
4887 4991
4888 AVC_AUDIT_DATA_INIT(&ad, IPC); 4992 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4889 ad.u.ipc_id = shp->shm_perm.key; 4993 ad.u.ipc_id = shp->shm_perm.key;
4890 4994
4891 return avc_has_perm(sid, isec->sid, SECCLASS_SHM, 4995 return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4942,7 +5046,7 @@ static int selinux_shm_shmat(struct shmid_kernel *shp,
4942static int selinux_sem_alloc_security(struct sem_array *sma) 5046static int selinux_sem_alloc_security(struct sem_array *sma)
4943{ 5047{
4944 struct ipc_security_struct *isec; 5048 struct ipc_security_struct *isec;
4945 struct avc_audit_data ad; 5049 struct common_audit_data ad;
4946 u32 sid = current_sid(); 5050 u32 sid = current_sid();
4947 int rc; 5051 int rc;
4948 5052
@@ -4952,7 +5056,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
4952 5056
4953 isec = sma->sem_perm.security; 5057 isec = sma->sem_perm.security;
4954 5058
4955 AVC_AUDIT_DATA_INIT(&ad, IPC); 5059 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4956 ad.u.ipc_id = sma->sem_perm.key; 5060 ad.u.ipc_id = sma->sem_perm.key;
4957 5061
4958 rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM, 5062 rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -4972,12 +5076,12 @@ static void selinux_sem_free_security(struct sem_array *sma)
4972static int selinux_sem_associate(struct sem_array *sma, int semflg) 5076static int selinux_sem_associate(struct sem_array *sma, int semflg)
4973{ 5077{
4974 struct ipc_security_struct *isec; 5078 struct ipc_security_struct *isec;
4975 struct avc_audit_data ad; 5079 struct common_audit_data ad;
4976 u32 sid = current_sid(); 5080 u32 sid = current_sid();
4977 5081
4978 isec = sma->sem_perm.security; 5082 isec = sma->sem_perm.security;
4979 5083
4980 AVC_AUDIT_DATA_INIT(&ad, IPC); 5084 COMMON_AUDIT_DATA_INIT(&ad, IPC);
4981 ad.u.ipc_id = sma->sem_perm.key; 5085 ad.u.ipc_id = sma->sem_perm.key;
4982 5086
4983 return avc_has_perm(sid, isec->sid, SECCLASS_SEM, 5087 return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5195,7 +5299,7 @@ static int selinux_setprocattr(struct task_struct *p,
5195 5299
5196 /* Only allow single threaded processes to change context */ 5300 /* Only allow single threaded processes to change context */
5197 error = -EPERM; 5301 error = -EPERM;
5198 if (!is_single_threaded(p)) { 5302 if (!current_is_single_threaded()) {
5199 error = security_bounded_transition(tsec->sid, sid); 5303 error = security_bounded_transition(tsec->sid, sid);
5200 if (error) 5304 if (error)
5201 goto abort_change; 5305 goto abort_change;
@@ -5252,6 +5356,32 @@ static void selinux_release_secctx(char *secdata, u32 seclen)
5252 kfree(secdata); 5356 kfree(secdata);
5253} 5357}
5254 5358
5359/*
5360 * called with inode->i_mutex locked
5361 */
5362static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
5363{
5364 return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
5365}
5366
5367/*
5368 * called with inode->i_mutex locked
5369 */
5370static int selinux_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
5371{
5372 return __vfs_setxattr_noperm(dentry, XATTR_NAME_SELINUX, ctx, ctxlen, 0);
5373}
5374
5375static int selinux_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
5376{
5377 int len = 0;
5378 len = selinux_inode_getsecurity(inode, XATTR_SELINUX_SUFFIX,
5379 ctx, true);
5380 if (len < 0)
5381 return len;
5382 *ctxlen = len;
5383 return 0;
5384}
5255#ifdef CONFIG_KEYS 5385#ifdef CONFIG_KEYS
5256 5386
5257static int selinux_key_alloc(struct key *k, const struct cred *cred, 5387static int selinux_key_alloc(struct key *k, const struct cred *cred,
@@ -5323,7 +5453,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
5323static struct security_operations selinux_ops = { 5453static struct security_operations selinux_ops = {
5324 .name = "selinux", 5454 .name = "selinux",
5325 5455
5326 .ptrace_may_access = selinux_ptrace_may_access, 5456 .ptrace_access_check = selinux_ptrace_access_check,
5327 .ptrace_traceme = selinux_ptrace_traceme, 5457 .ptrace_traceme = selinux_ptrace_traceme,
5328 .capget = selinux_capget, 5458 .capget = selinux_capget,
5329 .capset = selinux_capset, 5459 .capset = selinux_capset,
@@ -5396,10 +5526,13 @@ static struct security_operations selinux_ops = {
5396 .dentry_open = selinux_dentry_open, 5526 .dentry_open = selinux_dentry_open,
5397 5527
5398 .task_create = selinux_task_create, 5528 .task_create = selinux_task_create,
5529 .cred_alloc_blank = selinux_cred_alloc_blank,
5399 .cred_free = selinux_cred_free, 5530 .cred_free = selinux_cred_free,
5400 .cred_prepare = selinux_cred_prepare, 5531 .cred_prepare = selinux_cred_prepare,
5532 .cred_transfer = selinux_cred_transfer,
5401 .kernel_act_as = selinux_kernel_act_as, 5533 .kernel_act_as = selinux_kernel_act_as,
5402 .kernel_create_files_as = selinux_kernel_create_files_as, 5534 .kernel_create_files_as = selinux_kernel_create_files_as,
5535 .kernel_module_request = selinux_kernel_module_request,
5403 .task_setpgid = selinux_task_setpgid, 5536 .task_setpgid = selinux_task_setpgid,
5404 .task_getpgid = selinux_task_getpgid, 5537 .task_getpgid = selinux_task_getpgid,
5405 .task_getsid = selinux_task_getsid, 5538 .task_getsid = selinux_task_getsid,
@@ -5448,6 +5581,9 @@ static struct security_operations selinux_ops = {
5448 .secid_to_secctx = selinux_secid_to_secctx, 5581 .secid_to_secctx = selinux_secid_to_secctx,
5449 .secctx_to_secid = selinux_secctx_to_secid, 5582 .secctx_to_secid = selinux_secctx_to_secid,
5450 .release_secctx = selinux_release_secctx, 5583 .release_secctx = selinux_release_secctx,
5584 .inode_notifysecctx = selinux_inode_notifysecctx,
5585 .inode_setsecctx = selinux_inode_setsecctx,
5586 .inode_getsecctx = selinux_inode_getsecctx,
5451 5587
5452 .unix_stream_connect = selinux_socket_unix_stream_connect, 5588 .unix_stream_connect = selinux_socket_unix_stream_connect,
5453 .unix_may_send = selinux_socket_unix_may_send, 5589 .unix_may_send = selinux_socket_unix_may_send,
@@ -5477,6 +5613,9 @@ static struct security_operations selinux_ops = {
5477 .inet_csk_clone = selinux_inet_csk_clone, 5613 .inet_csk_clone = selinux_inet_csk_clone,
5478 .inet_conn_established = selinux_inet_conn_established, 5614 .inet_conn_established = selinux_inet_conn_established,
5479 .req_classify_flow = selinux_req_classify_flow, 5615 .req_classify_flow = selinux_req_classify_flow,
5616 .tun_dev_create = selinux_tun_dev_create,
5617 .tun_dev_post_create = selinux_tun_dev_post_create,
5618 .tun_dev_attach = selinux_tun_dev_attach,
5480 5619
5481#ifdef CONFIG_SECURITY_NETWORK_XFRM 5620#ifdef CONFIG_SECURITY_NETWORK_XFRM
5482 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, 5621 .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
@@ -5691,6 +5830,9 @@ int selinux_disable(void)
5691 selinux_disabled = 1; 5830 selinux_disabled = 1;
5692 selinux_enabled = 0; 5831 selinux_enabled = 0;
5693 5832
5833 /* Try to destroy the avc node cache */
5834 avc_disable();
5835
5694 /* Reset security_ops to the secondary module, dummy or capability. */ 5836 /* Reset security_ops to the secondary module, dummy or capability. */
5695 security_ops = secondary_ops; 5837 security_ops = secondary_ops;
5696 5838
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h
index 8377a4ba3b95..abedcd704dae 100644
--- a/security/selinux/include/av_inherit.h
+++ b/security/selinux/include/av_inherit.h
@@ -15,6 +15,7 @@
15 S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL) 15 S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL)
16 S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL) 16 S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL)
17 S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL) 17 S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL)
18 S_(SECCLASS_TUN_SOCKET, socket, 0x00400000UL)
18 S_(SECCLASS_IPC, ipc, 0x00000200UL) 19 S_(SECCLASS_IPC, ipc, 0x00000200UL)
19 S_(SECCLASS_SEM, ipc, 0x00000200UL) 20 S_(SECCLASS_SEM, ipc, 0x00000200UL)
20 S_(SECCLASS_MSGQ, ipc, 0x00000200UL) 21 S_(SECCLASS_MSGQ, ipc, 0x00000200UL)
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
index 31df1d7c1aee..2b683ad83d21 100644
--- a/security/selinux/include/av_perm_to_string.h
+++ b/security/selinux/include/av_perm_to_string.h
@@ -107,6 +107,7 @@
107 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read") 107 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read")
108 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod") 108 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod")
109 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console") 109 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console")
110 S_(SECCLASS_SYSTEM, SYSTEM__MODULE_REQUEST, "module_request")
110 S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown") 111 S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown")
111 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override") 112 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override")
112 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search") 113 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
index d645192ee950..0546d616ccac 100644
--- a/security/selinux/include/av_permissions.h
+++ b/security/selinux/include/av_permissions.h
@@ -423,6 +423,28 @@
423#define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL 423#define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL
424#define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL 424#define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL
425#define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL 425#define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL
426#define TUN_SOCKET__IOCTL 0x00000001UL
427#define TUN_SOCKET__READ 0x00000002UL
428#define TUN_SOCKET__WRITE 0x00000004UL
429#define TUN_SOCKET__CREATE 0x00000008UL
430#define TUN_SOCKET__GETATTR 0x00000010UL
431#define TUN_SOCKET__SETATTR 0x00000020UL
432#define TUN_SOCKET__LOCK 0x00000040UL
433#define TUN_SOCKET__RELABELFROM 0x00000080UL
434#define TUN_SOCKET__RELABELTO 0x00000100UL
435#define TUN_SOCKET__APPEND 0x00000200UL
436#define TUN_SOCKET__BIND 0x00000400UL
437#define TUN_SOCKET__CONNECT 0x00000800UL
438#define TUN_SOCKET__LISTEN 0x00001000UL
439#define TUN_SOCKET__ACCEPT 0x00002000UL
440#define TUN_SOCKET__GETOPT 0x00004000UL
441#define TUN_SOCKET__SETOPT 0x00008000UL
442#define TUN_SOCKET__SHUTDOWN 0x00010000UL
443#define TUN_SOCKET__RECVFROM 0x00020000UL
444#define TUN_SOCKET__SENDTO 0x00040000UL
445#define TUN_SOCKET__RECV_MSG 0x00080000UL
446#define TUN_SOCKET__SEND_MSG 0x00100000UL
447#define TUN_SOCKET__NAME_BIND 0x00200000UL
426#define PROCESS__FORK 0x00000001UL 448#define PROCESS__FORK 0x00000001UL
427#define PROCESS__TRANSITION 0x00000002UL 449#define PROCESS__TRANSITION 0x00000002UL
428#define PROCESS__SIGCHLD 0x00000004UL 450#define PROCESS__SIGCHLD 0x00000004UL
@@ -508,6 +530,7 @@
508#define SYSTEM__SYSLOG_READ 0x00000002UL 530#define SYSTEM__SYSLOG_READ 0x00000002UL
509#define SYSTEM__SYSLOG_MOD 0x00000004UL 531#define SYSTEM__SYSLOG_MOD 0x00000004UL
510#define SYSTEM__SYSLOG_CONSOLE 0x00000008UL 532#define SYSTEM__SYSLOG_CONSOLE 0x00000008UL
533#define SYSTEM__MODULE_REQUEST 0x00000010UL
511#define CAPABILITY__CHOWN 0x00000001UL 534#define CAPABILITY__CHOWN 0x00000001UL
512#define CAPABILITY__DAC_OVERRIDE 0x00000002UL 535#define CAPABILITY__DAC_OVERRIDE 0x00000002UL
513#define CAPABILITY__DAC_READ_SEARCH 0x00000004UL 536#define CAPABILITY__DAC_READ_SEARCH 0x00000004UL
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index d12ff1a9c0aa..e94e82f73818 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -13,6 +13,7 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/audit.h> 15#include <linux/audit.h>
16#include <linux/lsm_audit.h>
16#include <linux/in6.h> 17#include <linux/in6.h>
17#include <linux/path.h> 18#include <linux/path.h>
18#include <asm/system.h> 19#include <asm/system.h>
@@ -36,48 +37,6 @@ struct inode;
36struct sock; 37struct sock;
37struct sk_buff; 38struct sk_buff;
38 39
39/* Auxiliary data to use in generating the audit record. */
40struct avc_audit_data {
41 char type;
42#define AVC_AUDIT_DATA_FS 1
43#define AVC_AUDIT_DATA_NET 2
44#define AVC_AUDIT_DATA_CAP 3
45#define AVC_AUDIT_DATA_IPC 4
46 struct task_struct *tsk;
47 union {
48 struct {
49 struct path path;
50 struct inode *inode;
51 } fs;
52 struct {
53 int netif;
54 struct sock *sk;
55 u16 family;
56 __be16 dport;
57 __be16 sport;
58 union {
59 struct {
60 __be32 daddr;
61 __be32 saddr;
62 } v4;
63 struct {
64 struct in6_addr daddr;
65 struct in6_addr saddr;
66 } v6;
67 } fam;
68 } net;
69 int cap;
70 int ipc_id;
71 } u;
72};
73
74#define v4info fam.v4
75#define v6info fam.v6
76
77/* Initialize an AVC audit data structure. */
78#define AVC_AUDIT_DATA_INIT(_d,_t) \
79 { memset((_d), 0, sizeof(struct avc_audit_data)); (_d)->type = AVC_AUDIT_DATA_##_t; }
80
81/* 40/*
82 * AVC statistics 41 * AVC statistics
83 */ 42 */
@@ -98,7 +57,9 @@ void __init avc_init(void);
98 57
99void avc_audit(u32 ssid, u32 tsid, 58void avc_audit(u32 ssid, u32 tsid,
100 u16 tclass, u32 requested, 59 u16 tclass, u32 requested,
101 struct av_decision *avd, int result, struct avc_audit_data *auditdata); 60 struct av_decision *avd,
61 int result,
62 struct common_audit_data *a);
102 63
103#define AVC_STRICT 1 /* Ignore permissive mode. */ 64#define AVC_STRICT 1 /* Ignore permissive mode. */
104int avc_has_perm_noaudit(u32 ssid, u32 tsid, 65int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -108,7 +69,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
108 69
109int avc_has_perm(u32 ssid, u32 tsid, 70int avc_has_perm(u32 ssid, u32 tsid,
110 u16 tclass, u32 requested, 71 u16 tclass, u32 requested,
111 struct avc_audit_data *auditdata); 72 struct common_audit_data *auditdata);
112 73
113u32 avc_policy_seqno(void); 74u32 avc_policy_seqno(void);
114 75
@@ -127,13 +88,13 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
127 u32 events, u32 ssid, u32 tsid, 88 u32 events, u32 ssid, u32 tsid,
128 u16 tclass, u32 perms); 89 u16 tclass, u32 perms);
129 90
130/* Shows permission in human readable form */
131void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av);
132
133/* Exported to selinuxfs */ 91/* Exported to selinuxfs */
134int avc_get_hash_stats(char *page); 92int avc_get_hash_stats(char *page);
135extern unsigned int avc_cache_threshold; 93extern unsigned int avc_cache_threshold;
136 94
95/* Attempt to free avc node cache */
96void avc_disable(void);
97
137#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS 98#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
138DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats); 99DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
139#endif 100#endif
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
index 21ec786611d4..7ab9299bfb6b 100644
--- a/security/selinux/include/class_to_string.h
+++ b/security/selinux/include/class_to_string.h
@@ -77,3 +77,4 @@
77 S_(NULL) 77 S_(NULL)
78 S_(NULL) 78 S_(NULL)
79 S_("kernel_service") 79 S_("kernel_service")
80 S_("tun_socket")
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
index 882f27d66fac..f248500a1e3c 100644
--- a/security/selinux/include/flask.h
+++ b/security/selinux/include/flask.h
@@ -53,6 +53,7 @@
53#define SECCLASS_PEER 68 53#define SECCLASS_PEER 68
54#define SECCLASS_CAPABILITY2 69 54#define SECCLASS_CAPABILITY2 69
55#define SECCLASS_KERNEL_SERVICE 74 55#define SECCLASS_KERNEL_SERVICE 74
56#define SECCLASS_TUN_SOCKET 75
56 57
57/* 58/*
58 * Security identifier indices for initial entities 59 * Security identifier indices for initial entities
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index b4b5b9b2f0be..8d7384280a7a 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -59,7 +59,7 @@ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
59int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, 59int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
60 struct sk_buff *skb, 60 struct sk_buff *skb,
61 u16 family, 61 u16 family,
62 struct avc_audit_data *ad); 62 struct common_audit_data *ad);
63int selinux_netlbl_socket_setsockopt(struct socket *sock, 63int selinux_netlbl_socket_setsockopt(struct socket *sock,
64 int level, 64 int level,
65 int optname); 65 int optname);
@@ -129,7 +129,7 @@ static inline int selinux_netlbl_socket_post_create(struct sock *sk,
129static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, 129static inline int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
130 struct sk_buff *skb, 130 struct sk_buff *skb,
131 u16 family, 131 u16 family,
132 struct avc_audit_data *ad) 132 struct common_audit_data *ad)
133{ 133{
134 return 0; 134 return 0;
135} 135}
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 289e24b39e3e..13128f9a3e5a 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -41,9 +41,9 @@ static inline int selinux_xfrm_enabled(void)
41} 41}
42 42
43int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb, 43int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
44 struct avc_audit_data *ad); 44 struct common_audit_data *ad);
45int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 45int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
46 struct avc_audit_data *ad, u8 proto); 46 struct common_audit_data *ad, u8 proto);
47int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); 47int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
48 48
49static inline void selinux_xfrm_notify_policyload(void) 49static inline void selinux_xfrm_notify_policyload(void)
@@ -57,13 +57,13 @@ static inline int selinux_xfrm_enabled(void)
57} 57}
58 58
59static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, 59static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
60 struct avc_audit_data *ad) 60 struct common_audit_data *ad)
61{ 61{
62 return 0; 62 return 0;
63} 63}
64 64
65static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 65static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
66 struct avc_audit_data *ad, u8 proto) 66 struct common_audit_data *ad, u8 proto)
67{ 67{
68 return 0; 68 return 0;
69} 69}
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 2e984413c7b2..e68823741ad5 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -342,7 +342,7 @@ int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
342int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec, 342int selinux_netlbl_sock_rcv_skb(struct sk_security_struct *sksec,
343 struct sk_buff *skb, 343 struct sk_buff *skb,
344 u16 family, 344 u16 family,
345 struct avc_audit_data *ad) 345 struct common_audit_data *ad)
346{ 346{
347 int rc; 347 int rc;
348 u32 nlbl_sid; 348 u32 nlbl_sid;
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 500e6f78e115..ff17820d35ec 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -22,6 +22,11 @@
22 * 22 *
23 * Added validation of kernel classes and permissions 23 * Added validation of kernel classes and permissions
24 * 24 *
25 * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
26 *
27 * Added support for bounds domain and audit messaged on masked permissions
28 *
29 * Copyright (C) 2008, 2009 NEC Corporation
25 * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P. 30 * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
26 * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. 31 * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
27 * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC 32 * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC
@@ -279,6 +284,95 @@ mls_ops:
279} 284}
280 285
281/* 286/*
287 * security_dump_masked_av - dumps masked permissions during
288 * security_compute_av due to RBAC, MLS/Constraint and Type bounds.
289 */
290static int dump_masked_av_helper(void *k, void *d, void *args)
291{
292 struct perm_datum *pdatum = d;
293 char **permission_names = args;
294
295 BUG_ON(pdatum->value < 1 || pdatum->value > 32);
296
297 permission_names[pdatum->value - 1] = (char *)k;
298
299 return 0;
300}
301
302static void security_dump_masked_av(struct context *scontext,
303 struct context *tcontext,
304 u16 tclass,
305 u32 permissions,
306 const char *reason)
307{
308 struct common_datum *common_dat;
309 struct class_datum *tclass_dat;
310 struct audit_buffer *ab;
311 char *tclass_name;
312 char *scontext_name = NULL;
313 char *tcontext_name = NULL;
314 char *permission_names[32];
315 int index, length;
316 bool need_comma = false;
317
318 if (!permissions)
319 return;
320
321 tclass_name = policydb.p_class_val_to_name[tclass - 1];
322 tclass_dat = policydb.class_val_to_struct[tclass - 1];
323 common_dat = tclass_dat->comdatum;
324
325 /* init permission_names */
326 if (common_dat &&
327 hashtab_map(common_dat->permissions.table,
328 dump_masked_av_helper, permission_names) < 0)
329 goto out;
330
331 if (hashtab_map(tclass_dat->permissions.table,
332 dump_masked_av_helper, permission_names) < 0)
333 goto out;
334
335 /* get scontext/tcontext in text form */
336 if (context_struct_to_string(scontext,
337 &scontext_name, &length) < 0)
338 goto out;
339
340 if (context_struct_to_string(tcontext,
341 &tcontext_name, &length) < 0)
342 goto out;
343
344 /* audit a message */
345 ab = audit_log_start(current->audit_context,
346 GFP_ATOMIC, AUDIT_SELINUX_ERR);
347 if (!ab)
348 goto out;
349
350 audit_log_format(ab, "op=security_compute_av reason=%s "
351 "scontext=%s tcontext=%s tclass=%s perms=",
352 reason, scontext_name, tcontext_name, tclass_name);
353
354 for (index = 0; index < 32; index++) {
355 u32 mask = (1 << index);
356
357 if ((mask & permissions) == 0)
358 continue;
359
360 audit_log_format(ab, "%s%s",
361 need_comma ? "," : "",
362 permission_names[index]
363 ? permission_names[index] : "????");
364 need_comma = true;
365 }
366 audit_log_end(ab);
367out:
368 /* release scontext/tcontext */
369 kfree(tcontext_name);
370 kfree(scontext_name);
371
372 return;
373}
374
375/*
282 * security_boundary_permission - drops violated permissions 376 * security_boundary_permission - drops violated permissions
283 * on boundary constraint. 377 * on boundary constraint.
284 */ 378 */
@@ -347,28 +441,12 @@ static void type_attribute_bounds_av(struct context *scontext,
347 } 441 }
348 442
349 if (masked) { 443 if (masked) {
350 struct audit_buffer *ab;
351 char *stype_name
352 = policydb.p_type_val_to_name[source->value - 1];
353 char *ttype_name
354 = policydb.p_type_val_to_name[target->value - 1];
355 char *tclass_name
356 = policydb.p_class_val_to_name[tclass - 1];
357
358 /* mask violated permissions */ 444 /* mask violated permissions */
359 avd->allowed &= ~masked; 445 avd->allowed &= ~masked;
360 446
361 /* notice to userspace via audit message */ 447 /* audit masked permissions */
362 ab = audit_log_start(current->audit_context, 448 security_dump_masked_av(scontext, tcontext,
363 GFP_ATOMIC, AUDIT_SELINUX_ERR); 449 tclass, masked, "bounds");
364 if (!ab)
365 return;
366
367 audit_log_format(ab, "av boundary violation: "
368 "source=%s target=%s tclass=%s",
369 stype_name, ttype_name, tclass_name);
370 avc_dump_av(ab, tclass, masked);
371 audit_log_end(ab);
372 } 450 }
373} 451}
374 452
@@ -480,7 +558,7 @@ static int context_struct_compute_av(struct context *scontext,
480 if ((constraint->permissions & (avd->allowed)) && 558 if ((constraint->permissions & (avd->allowed)) &&
481 !constraint_expr_eval(scontext, tcontext, NULL, 559 !constraint_expr_eval(scontext, tcontext, NULL,
482 constraint->expr)) { 560 constraint->expr)) {
483 avd->allowed = (avd->allowed) & ~(constraint->permissions); 561 avd->allowed &= ~(constraint->permissions);
484 } 562 }
485 constraint = constraint->next; 563 constraint = constraint->next;
486 } 564 }
@@ -499,8 +577,8 @@ static int context_struct_compute_av(struct context *scontext,
499 break; 577 break;
500 } 578 }
501 if (!ra) 579 if (!ra)
502 avd->allowed = (avd->allowed) & ~(PROCESS__TRANSITION | 580 avd->allowed &= ~(PROCESS__TRANSITION |
503 PROCESS__DYNTRANSITION); 581 PROCESS__DYNTRANSITION);
504 } 582 }
505 583
506 /* 584 /*
@@ -687,6 +765,26 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
687 } 765 }
688 index = type->bounds; 766 index = type->bounds;
689 } 767 }
768
769 if (rc) {
770 char *old_name = NULL;
771 char *new_name = NULL;
772 int length;
773
774 if (!context_struct_to_string(old_context,
775 &old_name, &length) &&
776 !context_struct_to_string(new_context,
777 &new_name, &length)) {
778 audit_log(current->audit_context,
779 GFP_ATOMIC, AUDIT_SELINUX_ERR,
780 "op=security_bounded_transition "
781 "result=denied "
782 "oldcontext=%s newcontext=%s",
783 old_name, new_name);
784 }
785 kfree(new_name);
786 kfree(old_name);
787 }
690out: 788out:
691 read_unlock(&policy_rwlock); 789 read_unlock(&policy_rwlock);
692 790
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 72b18452e1a1..f3cb9ed731a9 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -401,7 +401,7 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
401 * gone thru the IPSec process. 401 * gone thru the IPSec process.
402 */ 402 */
403int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, 403int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
404 struct avc_audit_data *ad) 404 struct common_audit_data *ad)
405{ 405{
406 int i, rc = 0; 406 int i, rc = 0;
407 struct sec_path *sp; 407 struct sec_path *sp;
@@ -442,7 +442,7 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
442 * checked in the selinux_xfrm_state_pol_flow_match hook above. 442 * checked in the selinux_xfrm_state_pol_flow_match hook above.
443 */ 443 */
444int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, 444int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
445 struct avc_audit_data *ad, u8 proto) 445 struct common_audit_data *ad, u8 proto)
446{ 446{
447 struct dst_entry *dst; 447 struct dst_entry *dst;
448 int rc = 0; 448 int rc = 0;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 243bec175be0..c6e9acae72e4 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -275,7 +275,7 @@ static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
275{ 275{
276 memset(a, 0, sizeof(*a)); 276 memset(a, 0, sizeof(*a));
277 a->a.type = type; 277 a->a.type = type;
278 a->a.function = func; 278 a->a.smack_audit_data.function = func;
279} 279}
280 280
281static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a, 281static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 513dc1aa16dd..0f9ac8146900 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -240,8 +240,9 @@ static inline void smack_str_from_perm(char *string, int access)
240static void smack_log_callback(struct audit_buffer *ab, void *a) 240static void smack_log_callback(struct audit_buffer *ab, void *a)
241{ 241{
242 struct common_audit_data *ad = a; 242 struct common_audit_data *ad = a;
243 struct smack_audit_data *sad = &ad->lsm_priv.smack_audit_data; 243 struct smack_audit_data *sad = &ad->smack_audit_data;
244 audit_log_format(ab, "lsm=SMACK fn=%s action=%s", ad->function, 244 audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
245 ad->smack_audit_data.function,
245 sad->result ? "denied" : "granted"); 246 sad->result ? "denied" : "granted");
246 audit_log_format(ab, " subject="); 247 audit_log_format(ab, " subject=");
247 audit_log_untrustedstring(ab, sad->subject); 248 audit_log_untrustedstring(ab, sad->subject);
@@ -274,11 +275,11 @@ void smack_log(char *subject_label, char *object_label, int request,
274 if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0) 275 if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
275 return; 276 return;
276 277
277 if (a->function == NULL) 278 if (a->smack_audit_data.function == NULL)
278 a->function = "unknown"; 279 a->smack_audit_data.function = "unknown";
279 280
280 /* end preparing the audit data */ 281 /* end preparing the audit data */
281 sad = &a->lsm_priv.smack_audit_data; 282 sad = &a->smack_audit_data;
282 smack_str_from_perm(request_buffer, request); 283 smack_str_from_perm(request_buffer, request);
283 sad->subject = subject_label; 284 sad->subject = subject_label;
284 sad->object = object_label; 285 sad->object = object_label;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 0023182078c7..acae7ef4092d 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -91,7 +91,7 @@ struct inode_smack *new_inode_smack(char *smack)
91 */ 91 */
92 92
93/** 93/**
94 * smack_ptrace_may_access - Smack approval on PTRACE_ATTACH 94 * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
95 * @ctp: child task pointer 95 * @ctp: child task pointer
96 * @mode: ptrace attachment mode 96 * @mode: ptrace attachment mode
97 * 97 *
@@ -99,13 +99,13 @@ struct inode_smack *new_inode_smack(char *smack)
99 * 99 *
100 * Do the capability checks, and require read and write. 100 * Do the capability checks, and require read and write.
101 */ 101 */
102static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode) 102static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
103{ 103{
104 int rc; 104 int rc;
105 struct smk_audit_info ad; 105 struct smk_audit_info ad;
106 char *sp, *tsp; 106 char *sp, *tsp;
107 107
108 rc = cap_ptrace_may_access(ctp, mode); 108 rc = cap_ptrace_access_check(ctp, mode);
109 if (rc != 0) 109 if (rc != 0)
110 return rc; 110 return rc;
111 111
@@ -1080,6 +1080,22 @@ static int smack_file_receive(struct file *file)
1080 */ 1080 */
1081 1081
1082/** 1082/**
1083 * smack_cred_alloc_blank - "allocate" blank task-level security credentials
1084 * @new: the new credentials
1085 * @gfp: the atomicity of any memory allocations
1086 *
1087 * Prepare a blank set of credentials for modification. This must allocate all
1088 * the memory the LSM module might require such that cred_transfer() can
1089 * complete without error.
1090 */
1091static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1092{
1093 cred->security = NULL;
1094 return 0;
1095}
1096
1097
1098/**
1083 * smack_cred_free - "free" task-level security credentials 1099 * smack_cred_free - "free" task-level security credentials
1084 * @cred: the credentials in question 1100 * @cred: the credentials in question
1085 * 1101 *
@@ -1117,6 +1133,18 @@ static void smack_cred_commit(struct cred *new, const struct cred *old)
1117} 1133}
1118 1134
1119/** 1135/**
1136 * smack_cred_transfer - Transfer the old credentials to the new credentials
1137 * @new: the new credentials
1138 * @old: the original credentials
1139 *
1140 * Fill in a set of blank credentials from another set of credentials.
1141 */
1142static void smack_cred_transfer(struct cred *new, const struct cred *old)
1143{
1144 new->security = old->security;
1145}
1146
1147/**
1120 * smack_kernel_act_as - Set the subjective context in a set of credentials 1148 * smack_kernel_act_as - Set the subjective context in a set of credentials
1121 * @new: points to the set of credentials to be modified. 1149 * @new: points to the set of credentials to be modified.
1122 * @secid: specifies the security ID to be set 1150 * @secid: specifies the security ID to be set
@@ -1638,6 +1666,7 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
1638 1666
1639 if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { 1667 if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
1640 nsp->smk_inode = sp; 1668 nsp->smk_inode = sp;
1669 nsp->smk_flags |= SMK_INODE_INSTANT;
1641 return 0; 1670 return 0;
1642 } 1671 }
1643 /* 1672 /*
@@ -2464,7 +2493,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
2464 /* 2493 /*
2465 * Perfectly reasonable for this to be NULL 2494 * Perfectly reasonable for this to be NULL
2466 */ 2495 */
2467 if (sip == NULL || sip->sin_family != PF_INET) 2496 if (sip == NULL || sip->sin_family != AF_INET)
2468 return 0; 2497 return 0;
2469 2498
2470 return smack_netlabel_send(sock->sk, sip); 2499 return smack_netlabel_send(sock->sk, sip);
@@ -3029,10 +3058,31 @@ static void smack_release_secctx(char *secdata, u32 seclen)
3029{ 3058{
3030} 3059}
3031 3060
3061static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
3062{
3063 return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0);
3064}
3065
3066static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
3067{
3068 return __vfs_setxattr_noperm(dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0);
3069}
3070
3071static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
3072{
3073 int len = 0;
3074 len = smack_inode_getsecurity(inode, XATTR_SMACK_SUFFIX, ctx, true);
3075
3076 if (len < 0)
3077 return len;
3078 *ctxlen = len;
3079 return 0;
3080}
3081
3032struct security_operations smack_ops = { 3082struct security_operations smack_ops = {
3033 .name = "smack", 3083 .name = "smack",
3034 3084
3035 .ptrace_may_access = smack_ptrace_may_access, 3085 .ptrace_access_check = smack_ptrace_access_check,
3036 .ptrace_traceme = smack_ptrace_traceme, 3086 .ptrace_traceme = smack_ptrace_traceme,
3037 .syslog = smack_syslog, 3087 .syslog = smack_syslog,
3038 3088
@@ -3073,9 +3123,11 @@ struct security_operations smack_ops = {
3073 .file_send_sigiotask = smack_file_send_sigiotask, 3123 .file_send_sigiotask = smack_file_send_sigiotask,
3074 .file_receive = smack_file_receive, 3124 .file_receive = smack_file_receive,
3075 3125
3126 .cred_alloc_blank = smack_cred_alloc_blank,
3076 .cred_free = smack_cred_free, 3127 .cred_free = smack_cred_free,
3077 .cred_prepare = smack_cred_prepare, 3128 .cred_prepare = smack_cred_prepare,
3078 .cred_commit = smack_cred_commit, 3129 .cred_commit = smack_cred_commit,
3130 .cred_transfer = smack_cred_transfer,
3079 .kernel_act_as = smack_kernel_act_as, 3131 .kernel_act_as = smack_kernel_act_as,
3080 .kernel_create_files_as = smack_kernel_create_files_as, 3132 .kernel_create_files_as = smack_kernel_create_files_as,
3081 .task_setpgid = smack_task_setpgid, 3133 .task_setpgid = smack_task_setpgid,
@@ -3155,6 +3207,9 @@ struct security_operations smack_ops = {
3155 .secid_to_secctx = smack_secid_to_secctx, 3207 .secid_to_secctx = smack_secid_to_secctx,
3156 .secctx_to_secid = smack_secctx_to_secid, 3208 .secctx_to_secid = smack_secctx_to_secid,
3157 .release_secctx = smack_release_secctx, 3209 .release_secctx = smack_release_secctx,
3210 .inode_notifysecctx = smack_inode_notifysecctx,
3211 .inode_setsecctx = smack_inode_setsecctx,
3212 .inode_getsecctx = smack_inode_getsecctx,
3158}; 3213};
3159 3214
3160 3215
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index fdd1f4b8c448..3c8bd8ee0b95 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1285,6 +1285,36 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
1285} 1285}
1286 1286
1287/** 1287/**
1288 * tomoyo_delete_domain - Delete a domain.
1289 *
1290 * @domainname: The name of domain.
1291 *
1292 * Returns 0.
1293 */
1294static int tomoyo_delete_domain(char *domainname)
1295{
1296 struct tomoyo_domain_info *domain;
1297 struct tomoyo_path_info name;
1298
1299 name.name = domainname;
1300 tomoyo_fill_path_info(&name);
1301 down_write(&tomoyo_domain_list_lock);
1302 /* Is there an active domain? */
1303 list_for_each_entry(domain, &tomoyo_domain_list, list) {
1304 /* Never delete tomoyo_kernel_domain */
1305 if (domain == &tomoyo_kernel_domain)
1306 continue;
1307 if (domain->is_deleted ||
1308 tomoyo_pathcmp(domain->domainname, &name))
1309 continue;
1310 domain->is_deleted = true;
1311 break;
1312 }
1313 up_write(&tomoyo_domain_list_lock);
1314 return 0;
1315}
1316
1317/**
1288 * tomoyo_write_domain_policy - Write domain policy. 1318 * tomoyo_write_domain_policy - Write domain policy.
1289 * 1319 *
1290 * @head: Pointer to "struct tomoyo_io_buffer". 1320 * @head: Pointer to "struct tomoyo_io_buffer".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 6d6ba09af457..31df541911f7 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -339,8 +339,6 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
339const char *tomoyo_get_msg(const bool is_enforce); 339const char *tomoyo_get_msg(const bool is_enforce);
340/* Convert single path operation to operation name. */ 340/* Convert single path operation to operation name. */
341const char *tomoyo_sp2keyword(const u8 operation); 341const char *tomoyo_sp2keyword(const u8 operation);
342/* Delete a domain. */
343int tomoyo_delete_domain(char *data);
344/* Create "alias" entry in exception policy. */ 342/* Create "alias" entry in exception policy. */
345int tomoyo_write_alias_policy(char *data, const bool is_delete); 343int tomoyo_write_alias_policy(char *data, const bool is_delete);
346/* 344/*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 1d8b16960576..fcf52accce2b 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -717,38 +717,6 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
717 return tomoyo_update_alias_entry(data, cp, is_delete); 717 return tomoyo_update_alias_entry(data, cp, is_delete);
718} 718}
719 719
720/* Domain create/delete handler. */
721
722/**
723 * tomoyo_delete_domain - Delete a domain.
724 *
725 * @domainname: The name of domain.
726 *
727 * Returns 0.
728 */
729int tomoyo_delete_domain(char *domainname)
730{
731 struct tomoyo_domain_info *domain;
732 struct tomoyo_path_info name;
733
734 name.name = domainname;
735 tomoyo_fill_path_info(&name);
736 down_write(&tomoyo_domain_list_lock);
737 /* Is there an active domain? */
738 list_for_each_entry(domain, &tomoyo_domain_list, list) {
739 /* Never delete tomoyo_kernel_domain */
740 if (domain == &tomoyo_kernel_domain)
741 continue;
742 if (domain->is_deleted ||
743 tomoyo_pathcmp(domain->domainname, &name))
744 continue;
745 domain->is_deleted = true;
746 break;
747 }
748 up_write(&tomoyo_domain_list_lock);
749 return 0;
750}
751
752/** 720/**
753 * tomoyo_find_or_assign_new_domain - Create a domain. 721 * tomoyo_find_or_assign_new_domain - Create a domain.
754 * 722 *
@@ -818,13 +786,11 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
818/** 786/**
819 * tomoyo_find_next_domain - Find a domain. 787 * tomoyo_find_next_domain - Find a domain.
820 * 788 *
821 * @bprm: Pointer to "struct linux_binprm". 789 * @bprm: Pointer to "struct linux_binprm".
822 * @next_domain: Pointer to pointer to "struct tomoyo_domain_info".
823 * 790 *
824 * Returns 0 on success, negative value otherwise. 791 * Returns 0 on success, negative value otherwise.
825 */ 792 */
826int tomoyo_find_next_domain(struct linux_binprm *bprm, 793int tomoyo_find_next_domain(struct linux_binprm *bprm)
827 struct tomoyo_domain_info **next_domain)
828{ 794{
829 /* 795 /*
830 * This function assumes that the size of buffer returned by 796 * This function assumes that the size of buffer returned by
@@ -946,9 +912,11 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm,
946 tomoyo_set_domain_flag(old_domain, false, 912 tomoyo_set_domain_flag(old_domain, false,
947 TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED); 913 TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
948 out: 914 out:
915 if (!domain)
916 domain = old_domain;
917 bprm->cred->security = domain;
949 tomoyo_free(real_program_name); 918 tomoyo_free(real_program_name);
950 tomoyo_free(symlink_program_name); 919 tomoyo_free(symlink_program_name);
951 *next_domain = domain ? domain : old_domain;
952 tomoyo_free(tmp); 920 tomoyo_free(tmp);
953 return retval; 921 return retval;
954} 922}
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 3194d09fe0f4..9548a0984cc4 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -14,6 +14,12 @@
14#include "tomoyo.h" 14#include "tomoyo.h"
15#include "realpath.h" 15#include "realpath.h"
16 16
17static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
18{
19 new->security = NULL;
20 return 0;
21}
22
17static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, 23static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
18 gfp_t gfp) 24 gfp_t gfp)
19{ 25{
@@ -25,6 +31,15 @@ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
25 return 0; 31 return 0;
26} 32}
27 33
34static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
35{
36 /*
37 * Since "struct tomoyo_domain_info *" is a sharable pointer,
38 * we don't need to duplicate.
39 */
40 new->security = old->security;
41}
42
28static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) 43static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
29{ 44{
30 int rc; 45 int rc;
@@ -61,14 +76,8 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
61 * Execute permission is checked against pathname passed to do_execve() 76 * Execute permission is checked against pathname passed to do_execve()
62 * using current domain. 77 * using current domain.
63 */ 78 */
64 if (!domain) { 79 if (!domain)
65 struct tomoyo_domain_info *next_domain = NULL; 80 return tomoyo_find_next_domain(bprm);
66 int retval = tomoyo_find_next_domain(bprm, &next_domain);
67
68 if (!retval)
69 bprm->cred->security = next_domain;
70 return retval;
71 }
72 /* 81 /*
73 * Read permission is checked against interpreters using next domain. 82 * Read permission is checked against interpreters using next domain.
74 * '1' is the result of open_to_namei_flags(O_RDONLY). 83 * '1' is the result of open_to_namei_flags(O_RDONLY).
@@ -268,7 +277,9 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
268 */ 277 */
269static struct security_operations tomoyo_security_ops = { 278static struct security_operations tomoyo_security_ops = {
270 .name = "tomoyo", 279 .name = "tomoyo",
280 .cred_alloc_blank = tomoyo_cred_alloc_blank,
271 .cred_prepare = tomoyo_cred_prepare, 281 .cred_prepare = tomoyo_cred_prepare,
282 .cred_transfer = tomoyo_cred_transfer,
272 .bprm_set_creds = tomoyo_bprm_set_creds, 283 .bprm_set_creds = tomoyo_bprm_set_creds,
273 .bprm_check_security = tomoyo_bprm_check_security, 284 .bprm_check_security = tomoyo_bprm_check_security,
274#ifdef CONFIG_SYSCTL 285#ifdef CONFIG_SYSCTL
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
index 0fd588a629cf..cd6ba0bf7069 100644
--- a/security/tomoyo/tomoyo.h
+++ b/security/tomoyo/tomoyo.h
@@ -31,8 +31,7 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
31 struct path *path2); 31 struct path *path2);
32int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain, 32int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
33 struct file *filp); 33 struct file *filp);
34int tomoyo_find_next_domain(struct linux_binprm *bprm, 34int tomoyo_find_next_domain(struct linux_binprm *bprm);
35 struct tomoyo_domain_info **next_domain);
36 35
37/* Index numbers for Access Controls. */ 36/* Index numbers for Access Controls. */
38 37
diff --git a/sound/Kconfig b/sound/Kconfig
index 1eceb85287c5..439e15c8faa3 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -32,6 +32,34 @@ config SOUND_OSS_CORE
32 bool 32 bool
33 default n 33 default n
34 34
35config SOUND_OSS_CORE_PRECLAIM
36 bool "Preclaim OSS device numbers"
37 depends on SOUND_OSS_CORE
38 default y
39 help
40 With this option enabled, the kernel will claim all OSS device
41 numbers if any OSS support (native or emulation) is enabled
42 whether the respective module is loaded or not and try to load the
43 appropriate module using sound-slot/service-* and char-major-*
44 module aliases when one of the device numbers is opened. With
45 this option disabled, kernel will only claim actually in-use
46 device numbers and opening a missing device will generate only the
47 standard char-major-* aliases.
48
49 The only visible difference is use of additional module aliases
50 and whether OSS sound devices appear multiple times in
51 /proc/devices. sound-slot/service-* module aliases are scheduled
52 to be removed (ie. PRECLAIM won't be available) and this option is
53 to make the transition easier. This option can be overridden
54 during boot using the kernel parameter soundcore.preclaim_oss.
55
56 Disabling this allows alternative OSS implementations.
57
58 Please read Documentation/feature-removal-schedule.txt for
59 details.
60
61 If unusre, say Y.
62
35source "sound/oss/dmasound/Kconfig" 63source "sound/oss/dmasound/Kconfig"
36 64
37if !M68K 65if !M68K
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index c570ebd9d177..4e34d19ddbc0 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -170,6 +170,13 @@ static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
170 struct snd_ac97_bus *ac97_bus; 170 struct snd_ac97_bus *ac97_bus;
171 struct snd_ac97_template ac97_template; 171 struct snd_ac97_template ac97_template;
172 int ret; 172 int ret;
173 pxa2xx_audio_ops_t *pdata = dev->dev.platform_data;
174
175 if (dev->id >= 0) {
176 dev_err(&dev->dev, "PXA2xx has only one AC97 port.\n");
177 ret = -ENXIO;
178 goto err_dev;
179 }
173 180
174 ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, 181 ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
175 THIS_MODULE, 0, &card); 182 THIS_MODULE, 0, &card);
@@ -200,6 +207,8 @@ static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
200 snprintf(card->longname, sizeof(card->longname), 207 snprintf(card->longname, sizeof(card->longname),
201 "%s (%s)", dev->dev.driver->name, card->mixername); 208 "%s (%s)", dev->dev.driver->name, card->mixername);
202 209
210 if (pdata && pdata->codec_pdata[0])
211 snd_ac97_dev_add_pdata(ac97_bus->codec[0], pdata->codec_pdata[0]);
203 snd_card_set_dev(card, &dev->dev); 212 snd_card_set_dev(card, &dev->dev);
204 ret = snd_card_register(card); 213 ret = snd_card_register(card);
205 if (ret == 0) { 214 if (ret == 0) {
@@ -212,6 +221,7 @@ err_remove:
212err: 221err:
213 if (card) 222 if (card)
214 snd_card_free(card); 223 snd_card_free(card);
224err_dev:
215 return ret; 225 return ret;
216} 226}
217 227
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 6205f37d547c..743ac6a29065 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -136,6 +136,9 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
136{ 136{
137 struct pxa2xx_runtime_data *prtd = substream->runtime->private_data; 137 struct pxa2xx_runtime_data *prtd = substream->runtime->private_data;
138 138
139 if (!prtd || !prtd->params)
140 return 0;
141
139 DCSR(prtd->dma_ch) &= ~DCSR_RUN; 142 DCSR(prtd->dma_ch) &= ~DCSR_RUN;
140 DCSR(prtd->dma_ch) = 0; 143 DCSR(prtd->dma_ch) = 0;
141 DCMD(prtd->dma_ch) = 0; 144 DCMD(prtd->dma_ch) = 0;
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 6061fb5f4e1c..c15682a2f9db 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -206,4 +206,8 @@ config SND_PCM_XRUN_DEBUG
206config SND_VMASTER 206config SND_VMASTER
207 bool 207 bool
208 208
209config SND_DMA_SGBUF
210 def_bool y
211 depends on X86
212
209source "sound/core/seq/Kconfig" 213source "sound/core/seq/Kconfig"
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 4229052e7b91..350a08d277f4 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -13,7 +13,7 @@ snd-pcm-objs := pcm.o pcm_native.o pcm_lib.o pcm_timer.o pcm_misc.o \
13 pcm_memory.o 13 pcm_memory.o
14 14
15snd-page-alloc-y := memalloc.o 15snd-page-alloc-y := memalloc.o
16snd-page-alloc-$(CONFIG_HAS_DMA) += sgbuf.o 16snd-page-alloc-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
17 17
18snd-rawmidi-objs := rawmidi.o 18snd-rawmidi-objs := rawmidi.o
19snd-timer-objs := timer.o 19snd-timer-objs := timer.o
diff --git a/sound/core/control.c b/sound/core/control.c
index 17b8d47a5cd0..a8b7fabe645e 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -414,7 +414,7 @@ int snd_ctl_remove_id(struct snd_card *card, struct snd_ctl_elem_id *id)
414EXPORT_SYMBOL(snd_ctl_remove_id); 414EXPORT_SYMBOL(snd_ctl_remove_id);
415 415
416/** 416/**
417 * snd_ctl_remove_unlocked_id - remove the unlocked control of the given id and release it 417 * snd_ctl_remove_user_ctl - remove and release the unlocked user control
418 * @file: active control handle 418 * @file: active control handle
419 * @id: the control id to remove 419 * @id: the control id to remove
420 * 420 *
@@ -423,8 +423,8 @@ EXPORT_SYMBOL(snd_ctl_remove_id);
423 * 423 *
424 * Returns 0 if successful, or a negative error code on failure. 424 * Returns 0 if successful, or a negative error code on failure.
425 */ 425 */
426static int snd_ctl_remove_unlocked_id(struct snd_ctl_file * file, 426static int snd_ctl_remove_user_ctl(struct snd_ctl_file * file,
427 struct snd_ctl_elem_id *id) 427 struct snd_ctl_elem_id *id)
428{ 428{
429 struct snd_card *card = file->card; 429 struct snd_card *card = file->card;
430 struct snd_kcontrol *kctl; 430 struct snd_kcontrol *kctl;
@@ -433,15 +433,23 @@ static int snd_ctl_remove_unlocked_id(struct snd_ctl_file * file,
433 down_write(&card->controls_rwsem); 433 down_write(&card->controls_rwsem);
434 kctl = snd_ctl_find_id(card, id); 434 kctl = snd_ctl_find_id(card, id);
435 if (kctl == NULL) { 435 if (kctl == NULL) {
436 up_write(&card->controls_rwsem); 436 ret = -ENOENT;
437 return -ENOENT; 437 goto error;
438 }
439 if (!(kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_USER)) {
440 ret = -EINVAL;
441 goto error;
438 } 442 }
439 for (idx = 0; idx < kctl->count; idx++) 443 for (idx = 0; idx < kctl->count; idx++)
440 if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) { 444 if (kctl->vd[idx].owner != NULL && kctl->vd[idx].owner != file) {
441 up_write(&card->controls_rwsem); 445 ret = -EBUSY;
442 return -EBUSY; 446 goto error;
443 } 447 }
444 ret = snd_ctl_remove(card, kctl); 448 ret = snd_ctl_remove(card, kctl);
449 if (ret < 0)
450 goto error;
451 card->user_ctl_count--;
452error:
445 up_write(&card->controls_rwsem); 453 up_write(&card->controls_rwsem);
446 return ret; 454 return ret;
447} 455}
@@ -951,7 +959,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
951 959
952 if (card->user_ctl_count >= MAX_USER_CONTROLS) 960 if (card->user_ctl_count >= MAX_USER_CONTROLS)
953 return -ENOMEM; 961 return -ENOMEM;
954 if (info->count > 1024) 962 if (info->count < 1)
955 return -EINVAL; 963 return -EINVAL;
956 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : 964 access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
957 (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE| 965 (info->access & (SNDRV_CTL_ELEM_ACCESS_READWRITE|
@@ -1052,18 +1060,10 @@ static int snd_ctl_elem_remove(struct snd_ctl_file *file,
1052 struct snd_ctl_elem_id __user *_id) 1060 struct snd_ctl_elem_id __user *_id)
1053{ 1061{
1054 struct snd_ctl_elem_id id; 1062 struct snd_ctl_elem_id id;
1055 int err;
1056 1063
1057 if (copy_from_user(&id, _id, sizeof(id))) 1064 if (copy_from_user(&id, _id, sizeof(id)))
1058 return -EFAULT; 1065 return -EFAULT;
1059 err = snd_ctl_remove_unlocked_id(file, &id); 1066 return snd_ctl_remove_user_ctl(file, &id);
1060 if (! err) {
1061 struct snd_card *card = file->card;
1062 down_write(&card->controls_rwsem);
1063 card->user_ctl_count--;
1064 up_write(&card->controls_rwsem);
1065 }
1066 return err;
1067} 1067}
1068 1068
1069static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr) 1069static int snd_ctl_subscribe_events(struct snd_ctl_file *file, int __user *ptr)
diff --git a/sound/core/info.c b/sound/core/info.c
index 35df614f6c55..d749a0d394a7 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -88,12 +88,10 @@ static int resize_info_buffer(struct snd_info_buffer *buffer,
88 char *nbuf; 88 char *nbuf;
89 89
90 nsize = PAGE_ALIGN(nsize); 90 nsize = PAGE_ALIGN(nsize);
91 nbuf = kmalloc(nsize, GFP_KERNEL); 91 nbuf = krealloc(buffer->buffer, nsize, GFP_KERNEL);
92 if (! nbuf) 92 if (! nbuf)
93 return -ENOMEM; 93 return -ENOMEM;
94 94
95 memcpy(nbuf, buffer->buffer, buffer->len);
96 kfree(buffer->buffer);
97 buffer->buffer = nbuf; 95 buffer->buffer = nbuf;
98 buffer->len = nsize; 96 buffer->len = nsize;
99 return 0; 97 return 0;
@@ -108,7 +106,7 @@ static int resize_info_buffer(struct snd_info_buffer *buffer,
108 * 106 *
109 * Returns the size of output string. 107 * Returns the size of output string.
110 */ 108 */
111int snd_iprintf(struct snd_info_buffer *buffer, char *fmt,...) 109int snd_iprintf(struct snd_info_buffer *buffer, const char *fmt, ...)
112{ 110{
113 va_list args; 111 va_list args;
114 int len, res; 112 int len, res;
@@ -727,7 +725,7 @@ EXPORT_SYMBOL(snd_info_get_line);
727 * Returns the updated pointer of the original string so that 725 * Returns the updated pointer of the original string so that
728 * it can be used for the next call. 726 * it can be used for the next call.
729 */ 727 */
730char *snd_info_get_str(char *dest, char *src, int len) 728const char *snd_info_get_str(char *dest, const char *src, int len)
731{ 729{
732 int c; 730 int c;
733 731
diff --git a/sound/core/init.c b/sound/core/init.c
index d5d40d78c409..ec4a50ce5656 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -31,6 +31,14 @@
31#include <sound/control.h> 31#include <sound/control.h>
32#include <sound/info.h> 32#include <sound/info.h>
33 33
34/* monitor files for graceful shutdown (hotplug) */
35struct snd_monitor_file {
36 struct file *file;
37 const struct file_operations *disconnected_f_op;
38 struct list_head shutdown_list; /* still need to shutdown */
39 struct list_head list; /* link of monitor files */
40};
41
34static DEFINE_SPINLOCK(shutdown_lock); 42static DEFINE_SPINLOCK(shutdown_lock);
35static LIST_HEAD(shutdown_files); 43static LIST_HEAD(shutdown_files);
36 44
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 1b3534d67686..9e92441f9b78 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -199,6 +199,8 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
199 case SNDRV_DMA_TYPE_DEV: 199 case SNDRV_DMA_TYPE_DEV:
200 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); 200 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
201 break; 201 break;
202#endif
203#ifdef CONFIG_SND_DMA_SGBUF
202 case SNDRV_DMA_TYPE_DEV_SG: 204 case SNDRV_DMA_TYPE_DEV_SG:
203 snd_malloc_sgbuf_pages(device, size, dmab, NULL); 205 snd_malloc_sgbuf_pages(device, size, dmab, NULL);
204 break; 206 break;
@@ -269,6 +271,8 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
269 case SNDRV_DMA_TYPE_DEV: 271 case SNDRV_DMA_TYPE_DEV:
270 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); 272 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
271 break; 273 break;
274#endif
275#ifdef CONFIG_SND_DMA_SGBUF
272 case SNDRV_DMA_TYPE_DEV_SG: 276 case SNDRV_DMA_TYPE_DEV_SG:
273 snd_free_sgbuf_pages(dmab); 277 snd_free_sgbuf_pages(dmab);
274 break; 278 break;
diff --git a/sound/core/misc.c b/sound/core/misc.c
index a9710e0c97af..23a032c6d487 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -24,6 +24,20 @@
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <sound/core.h> 25#include <sound/core.h>
26 26
27#ifdef CONFIG_SND_DEBUG
28
29#ifdef CONFIG_SND_DEBUG_VERBOSE
30#define DEFAULT_DEBUG_LEVEL 2
31#else
32#define DEFAULT_DEBUG_LEVEL 1
33#endif
34
35static int debug = DEFAULT_DEBUG_LEVEL;
36module_param(debug, int, 0644);
37MODULE_PARM_DESC(debug, "Debug level (0 = disable)");
38
39#endif /* CONFIG_SND_DEBUG */
40
27void release_and_free_resource(struct resource *res) 41void release_and_free_resource(struct resource *res)
28{ 42{
29 if (res) { 43 if (res) {
@@ -35,46 +49,53 @@ void release_and_free_resource(struct resource *res)
35EXPORT_SYMBOL(release_and_free_resource); 49EXPORT_SYMBOL(release_and_free_resource);
36 50
37#ifdef CONFIG_SND_VERBOSE_PRINTK 51#ifdef CONFIG_SND_VERBOSE_PRINTK
38void snd_verbose_printk(const char *file, int line, const char *format, ...) 52/* strip the leading path if the given path is absolute */
53static const char *sanity_file_name(const char *path)
39{ 54{
40 va_list args; 55 if (*path == '/')
41 56 return strrchr(path, '/') + 1;
42 if (format[0] == '<' && format[1] >= '0' && format[1] <= '7' && format[2] == '>') { 57 else
43 char tmp[] = "<0>"; 58 return path;
59}
60
61/* print file and line with a certain printk prefix */
62static int print_snd_pfx(unsigned int level, const char *path, int line,
63 const char *format)
64{
65 const char *file = sanity_file_name(path);
66 char tmp[] = "<0>";
67 const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
68 int ret = 0;
69
70 if (format[0] == '<' && format[2] == '>') {
44 tmp[1] = format[1]; 71 tmp[1] = format[1];
45 printk("%sALSA %s:%d: ", tmp, file, line); 72 pfx = tmp;
46 format += 3; 73 ret = 1;
47 } else {
48 printk("ALSA %s:%d: ", file, line);
49 } 74 }
50 va_start(args, format); 75 printk("%sALSA %s:%d: ", pfx, file, line);
51 vprintk(format, args); 76 return ret;
52 va_end(args);
53} 77}
54 78#else
55EXPORT_SYMBOL(snd_verbose_printk); 79#define print_snd_pfx(level, path, line, format) 0
56#endif 80#endif
57 81
58#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_SND_VERBOSE_PRINTK) 82#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
59void snd_verbose_printd(const char *file, int line, const char *format, ...) 83void __snd_printk(unsigned int level, const char *path, int line,
84 const char *format, ...)
60{ 85{
61 va_list args; 86 va_list args;
62 87
63 if (format[0] == '<' && format[1] >= '0' && format[1] <= '7' && format[2] == '>') { 88#ifdef CONFIG_SND_DEBUG
64 char tmp[] = "<0>"; 89 if (debug < level)
65 tmp[1] = format[1]; 90 return;
66 printk("%sALSA %s:%d: ", tmp, file, line); 91#endif
67 format += 3;
68 } else {
69 printk(KERN_DEBUG "ALSA %s:%d: ", file, line);
70 }
71 va_start(args, format); 92 va_start(args, format);
93 if (print_snd_pfx(level, path, line, format))
94 format += 3; /* skip the printk level-prefix */
72 vprintk(format, args); 95 vprintk(format, args);
73 va_end(args); 96 va_end(args);
74
75} 97}
76 98EXPORT_SYMBOL_GPL(__snd_printk);
77EXPORT_SYMBOL(snd_verbose_printd);
78#endif 99#endif
79 100
80#ifdef CONFIG_PCI 101#ifdef CONFIG_PCI
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 5dcd8a526970..772423889eb3 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1154,7 +1154,8 @@ static void snd_mixer_oss_proc_write(struct snd_info_entry *entry,
1154 struct snd_info_buffer *buffer) 1154 struct snd_info_buffer *buffer)
1155{ 1155{
1156 struct snd_mixer_oss *mixer = entry->private_data; 1156 struct snd_mixer_oss *mixer = entry->private_data;
1157 char line[128], str[32], idxstr[16], *cptr; 1157 char line[128], str[32], idxstr[16];
1158 const char *cptr;
1158 int ch, idx; 1159 int ch, idx;
1159 struct snd_mixer_oss_assign_table *tbl; 1160 struct snd_mixer_oss_assign_table *tbl;
1160 struct slot *slot; 1161 struct slot *slot;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index dbe406b82591..d9c96353121a 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1043,10 +1043,15 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
1043 runtime->oss.channels = params_channels(params); 1043 runtime->oss.channels = params_channels(params);
1044 runtime->oss.rate = params_rate(params); 1044 runtime->oss.rate = params_rate(params);
1045 1045
1046 runtime->oss.params = 0;
1047 runtime->oss.prepare = 1;
1048 vfree(runtime->oss.buffer); 1046 vfree(runtime->oss.buffer);
1049 runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); 1047 runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
1048 if (!runtime->oss.buffer) {
1049 err = -ENOMEM;
1050 goto failure;
1051 }
1052
1053 runtime->oss.params = 0;
1054 runtime->oss.prepare = 1;
1050 runtime->oss.buffer_used = 0; 1055 runtime->oss.buffer_used = 0;
1051 if (runtime->dma_area) 1056 if (runtime->dma_area)
1052 snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); 1057 snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes));
@@ -2836,7 +2841,8 @@ static void snd_pcm_oss_proc_write(struct snd_info_entry *entry,
2836 struct snd_info_buffer *buffer) 2841 struct snd_info_buffer *buffer)
2837{ 2842{
2838 struct snd_pcm_str *pstr = entry->private_data; 2843 struct snd_pcm_str *pstr = entry->private_data;
2839 char line[128], str[32], task_name[32], *ptr; 2844 char line[128], str[32], task_name[32];
2845 const char *ptr;
2840 int idx1; 2846 int idx1;
2841 struct snd_pcm_oss_setup *setup, *setup1, template; 2847 struct snd_pcm_oss_setup *setup, *setup1, template;
2842 2848
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 145931a9ff30..0c1440121c22 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -162,18 +162,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
162 return -ENOIOCTLCMD; 162 return -ENOIOCTLCMD;
163} 163}
164 164
165#ifdef CONFIG_SND_VERBOSE_PROCFS
166
167#define STATE(v) [SNDRV_PCM_STATE_##v] = #v
168#define STREAM(v) [SNDRV_PCM_STREAM_##v] = #v
169#define READY(v) [SNDRV_PCM_READY_##v] = #v
170#define XRUN(v) [SNDRV_PCM_XRUN_##v] = #v
171#define SILENCE(v) [SNDRV_PCM_SILENCE_##v] = #v
172#define TSTAMP(v) [SNDRV_PCM_TSTAMP_##v] = #v
173#define ACCESS(v) [SNDRV_PCM_ACCESS_##v] = #v
174#define START(v) [SNDRV_PCM_START_##v] = #v
175#define FORMAT(v) [SNDRV_PCM_FORMAT_##v] = #v 165#define FORMAT(v) [SNDRV_PCM_FORMAT_##v] = #v
176#define SUBFORMAT(v) [SNDRV_PCM_SUBFORMAT_##v] = #v
177 166
178static char *snd_pcm_format_names[] = { 167static char *snd_pcm_format_names[] = {
179 FORMAT(S8), 168 FORMAT(S8),
@@ -216,10 +205,23 @@ static char *snd_pcm_format_names[] = {
216 FORMAT(U18_3BE), 205 FORMAT(U18_3BE),
217}; 206};
218 207
219static const char *snd_pcm_format_name(snd_pcm_format_t format) 208const char *snd_pcm_format_name(snd_pcm_format_t format)
220{ 209{
221 return snd_pcm_format_names[format]; 210 return snd_pcm_format_names[format];
222} 211}
212EXPORT_SYMBOL_GPL(snd_pcm_format_name);
213
214#ifdef CONFIG_SND_VERBOSE_PROCFS
215
216#define STATE(v) [SNDRV_PCM_STATE_##v] = #v
217#define STREAM(v) [SNDRV_PCM_STREAM_##v] = #v
218#define READY(v) [SNDRV_PCM_READY_##v] = #v
219#define XRUN(v) [SNDRV_PCM_XRUN_##v] = #v
220#define SILENCE(v) [SNDRV_PCM_SILENCE_##v] = #v
221#define TSTAMP(v) [SNDRV_PCM_TSTAMP_##v] = #v
222#define ACCESS(v) [SNDRV_PCM_ACCESS_##v] = #v
223#define START(v) [SNDRV_PCM_START_##v] = #v
224#define SUBFORMAT(v) [SNDRV_PCM_SUBFORMAT_##v] = #v
223 225
224static char *snd_pcm_stream_names[] = { 226static char *snd_pcm_stream_names[] = {
225 STREAM(PLAYBACK), 227 STREAM(PLAYBACK),
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 9db60d831bb2..30f410832a25 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -197,12 +197,16 @@ static int snd_pcm_update_hw_ptr_post(struct snd_pcm_substream *substream,
197 avail = snd_pcm_capture_avail(runtime); 197 avail = snd_pcm_capture_avail(runtime);
198 if (avail > runtime->avail_max) 198 if (avail > runtime->avail_max)
199 runtime->avail_max = avail; 199 runtime->avail_max = avail;
200 if (avail >= runtime->stop_threshold) { 200 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
201 if (substream->runtime->status->state == SNDRV_PCM_STATE_DRAINING) 201 if (avail >= runtime->buffer_size) {
202 snd_pcm_drain_done(substream); 202 snd_pcm_drain_done(substream);
203 else 203 return -EPIPE;
204 }
205 } else {
206 if (avail >= runtime->stop_threshold) {
204 xrun(substream); 207 xrun(substream);
205 return -EPIPE; 208 return -EPIPE;
209 }
206 } 210 }
207 if (avail >= runtime->control->avail_min) 211 if (avail >= runtime->control->avail_min)
208 wake_up(&runtime->sleep); 212 wake_up(&runtime->sleep);
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index a6d42808828c..caa7796bc2f5 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -304,6 +304,7 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
304 304
305EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all); 305EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
306 306
307#ifdef CONFIG_SND_DMA_SGBUF
307/** 308/**
308 * snd_pcm_sgbuf_ops_page - get the page struct at the given offset 309 * snd_pcm_sgbuf_ops_page - get the page struct at the given offset
309 * @substream: the pcm substream instance 310 * @substream: the pcm substream instance
@@ -349,6 +350,7 @@ unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream,
349 return size; 350 return size;
350} 351}
351EXPORT_SYMBOL(snd_pcm_sgbuf_get_chunk_size); 352EXPORT_SYMBOL(snd_pcm_sgbuf_get_chunk_size);
353#endif /* CONFIG_SND_DMA_SGBUF */
352 354
353/** 355/**
354 * snd_pcm_lib_malloc_pages - allocate the DMA buffer 356 * snd_pcm_lib_malloc_pages - allocate the DMA buffer
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index ac2150e0670d..59e5fbe6af51 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1343,8 +1343,6 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
1343 1343
1344static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state) 1344static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
1345{ 1345{
1346 if (substream->f_flags & O_NONBLOCK)
1347 return -EAGAIN;
1348 substream->runtime->trigger_master = substream; 1346 substream->runtime->trigger_master = substream;
1349 return 0; 1347 return 0;
1350} 1348}
@@ -1392,7 +1390,6 @@ static struct action_ops snd_pcm_action_drain_init = {
1392struct drain_rec { 1390struct drain_rec {
1393 struct snd_pcm_substream *substream; 1391 struct snd_pcm_substream *substream;
1394 wait_queue_t wait; 1392 wait_queue_t wait;
1395 snd_pcm_uframes_t stop_threshold;
1396}; 1393};
1397 1394
1398static int snd_pcm_drop(struct snd_pcm_substream *substream); 1395static int snd_pcm_drop(struct snd_pcm_substream *substream);
@@ -1404,13 +1401,15 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream);
1404 * After this call, all streams are supposed to be either SETUP or DRAINING 1401 * After this call, all streams are supposed to be either SETUP or DRAINING
1405 * (capture only) state. 1402 * (capture only) state.
1406 */ 1403 */
1407static int snd_pcm_drain(struct snd_pcm_substream *substream) 1404static int snd_pcm_drain(struct snd_pcm_substream *substream,
1405 struct file *file)
1408{ 1406{
1409 struct snd_card *card; 1407 struct snd_card *card;
1410 struct snd_pcm_runtime *runtime; 1408 struct snd_pcm_runtime *runtime;
1411 struct snd_pcm_substream *s; 1409 struct snd_pcm_substream *s;
1412 int result = 0; 1410 int result = 0;
1413 int i, num_drecs; 1411 int i, num_drecs;
1412 int nonblock = 0;
1414 struct drain_rec *drec, drec_tmp, *d; 1413 struct drain_rec *drec, drec_tmp, *d;
1415 1414
1416 card = substream->pcm->card; 1415 card = substream->pcm->card;
@@ -1428,6 +1427,15 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
1428 } 1427 }
1429 } 1428 }
1430 1429
1430 if (file) {
1431 if (file->f_flags & O_NONBLOCK)
1432 nonblock = 1;
1433 } else if (substream->f_flags & O_NONBLOCK)
1434 nonblock = 1;
1435
1436 if (nonblock)
1437 goto lock; /* no need to allocate waitqueues */
1438
1431 /* allocate temporary record for drain sync */ 1439 /* allocate temporary record for drain sync */
1432 down_read(&snd_pcm_link_rwsem); 1440 down_read(&snd_pcm_link_rwsem);
1433 if (snd_pcm_stream_linked(substream)) { 1441 if (snd_pcm_stream_linked(substream)) {
@@ -1449,16 +1457,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
1449 d->substream = s; 1457 d->substream = s;
1450 init_waitqueue_entry(&d->wait, current); 1458 init_waitqueue_entry(&d->wait, current);
1451 add_wait_queue(&runtime->sleep, &d->wait); 1459 add_wait_queue(&runtime->sleep, &d->wait);
1452 /* stop_threshold fixup to avoid endless loop when
1453 * stop_threshold > buffer_size
1454 */
1455 d->stop_threshold = runtime->stop_threshold;
1456 if (runtime->stop_threshold > runtime->buffer_size)
1457 runtime->stop_threshold = runtime->buffer_size;
1458 } 1460 }
1459 } 1461 }
1460 up_read(&snd_pcm_link_rwsem); 1462 up_read(&snd_pcm_link_rwsem);
1461 1463
1464 lock:
1462 snd_pcm_stream_lock_irq(substream); 1465 snd_pcm_stream_lock_irq(substream);
1463 /* resume pause */ 1466 /* resume pause */
1464 if (substream->runtime->status->state == SNDRV_PCM_STATE_PAUSED) 1467 if (substream->runtime->status->state == SNDRV_PCM_STATE_PAUSED)
@@ -1466,9 +1469,12 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
1466 1469
1467 /* pre-start/stop - all running streams are changed to DRAINING state */ 1470 /* pre-start/stop - all running streams are changed to DRAINING state */
1468 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0); 1471 result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
1469 if (result < 0) { 1472 if (result < 0)
1470 snd_pcm_stream_unlock_irq(substream); 1473 goto unlock;
1471 goto _error; 1474 /* in non-blocking, we don't wait in ioctl but let caller poll */
1475 if (nonblock) {
1476 result = -EAGAIN;
1477 goto unlock;
1472 } 1478 }
1473 1479
1474 for (;;) { 1480 for (;;) {
@@ -1504,18 +1510,18 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream)
1504 } 1510 }
1505 } 1511 }
1506 1512
1513 unlock:
1507 snd_pcm_stream_unlock_irq(substream); 1514 snd_pcm_stream_unlock_irq(substream);
1508 1515
1509 _error: 1516 if (!nonblock) {
1510 for (i = 0; i < num_drecs; i++) { 1517 for (i = 0; i < num_drecs; i++) {
1511 d = &drec[i]; 1518 d = &drec[i];
1512 runtime = d->substream->runtime; 1519 runtime = d->substream->runtime;
1513 remove_wait_queue(&runtime->sleep, &d->wait); 1520 remove_wait_queue(&runtime->sleep, &d->wait);
1514 runtime->stop_threshold = d->stop_threshold; 1521 }
1522 if (drec != &drec_tmp)
1523 kfree(drec);
1515 } 1524 }
1516
1517 if (drec != &drec_tmp)
1518 kfree(drec);
1519 snd_power_unlock(card); 1525 snd_power_unlock(card);
1520 1526
1521 return result; 1527 return result;
@@ -2208,6 +2214,9 @@ static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *subst
2208 case SNDRV_PCM_STATE_XRUN: 2214 case SNDRV_PCM_STATE_XRUN:
2209 ret = -EPIPE; 2215 ret = -EPIPE;
2210 goto __end; 2216 goto __end;
2217 case SNDRV_PCM_STATE_SUSPENDED:
2218 ret = -ESTRPIPE;
2219 goto __end;
2211 default: 2220 default:
2212 ret = -EBADFD; 2221 ret = -EBADFD;
2213 goto __end; 2222 goto __end;
@@ -2253,6 +2262,9 @@ static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substr
2253 case SNDRV_PCM_STATE_XRUN: 2262 case SNDRV_PCM_STATE_XRUN:
2254 ret = -EPIPE; 2263 ret = -EPIPE;
2255 goto __end; 2264 goto __end;
2265 case SNDRV_PCM_STATE_SUSPENDED:
2266 ret = -ESTRPIPE;
2267 goto __end;
2256 default: 2268 default:
2257 ret = -EBADFD; 2269 ret = -EBADFD;
2258 goto __end; 2270 goto __end;
@@ -2299,6 +2311,9 @@ static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *subs
2299 case SNDRV_PCM_STATE_XRUN: 2311 case SNDRV_PCM_STATE_XRUN:
2300 ret = -EPIPE; 2312 ret = -EPIPE;
2301 goto __end; 2313 goto __end;
2314 case SNDRV_PCM_STATE_SUSPENDED:
2315 ret = -ESTRPIPE;
2316 goto __end;
2302 default: 2317 default:
2303 ret = -EBADFD; 2318 ret = -EBADFD;
2304 goto __end; 2319 goto __end;
@@ -2345,6 +2360,9 @@ static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *subst
2345 case SNDRV_PCM_STATE_XRUN: 2360 case SNDRV_PCM_STATE_XRUN:
2346 ret = -EPIPE; 2361 ret = -EPIPE;
2347 goto __end; 2362 goto __end;
2363 case SNDRV_PCM_STATE_SUSPENDED:
2364 ret = -ESTRPIPE;
2365 goto __end;
2348 default: 2366 default:
2349 ret = -EBADFD; 2367 ret = -EBADFD;
2350 goto __end; 2368 goto __end;
@@ -2544,7 +2562,7 @@ static int snd_pcm_common_ioctl1(struct file *file,
2544 return snd_pcm_hw_params_old_user(substream, arg); 2562 return snd_pcm_hw_params_old_user(substream, arg);
2545#endif 2563#endif
2546 case SNDRV_PCM_IOCTL_DRAIN: 2564 case SNDRV_PCM_IOCTL_DRAIN:
2547 return snd_pcm_drain(substream); 2565 return snd_pcm_drain(substream, file);
2548 case SNDRV_PCM_IOCTL_DROP: 2566 case SNDRV_PCM_IOCTL_DROP:
2549 return snd_pcm_drop(substream); 2567 return snd_pcm_drop(substream);
2550 case SNDRV_PCM_IOCTL_PAUSE: 2568 case SNDRV_PCM_IOCTL_PAUSE:
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 473247c8e6d3..c0adc14c91f0 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -274,7 +274,7 @@ static int open_substream(struct snd_rawmidi *rmidi,
274 return err; 274 return err;
275 substream->opened = 1; 275 substream->opened = 1;
276 if (substream->use_count++ == 0) 276 if (substream->use_count++ == 0)
277 substream->active_sensing = 1; 277 substream->active_sensing = 0;
278 if (mode & SNDRV_RAWMIDI_LFLG_APPEND) 278 if (mode & SNDRV_RAWMIDI_LFLG_APPEND)
279 substream->append = 1; 279 substream->append = 1;
280 rmidi->streams[substream->stream].substream_opened++; 280 rmidi->streams[substream->stream].substream_opened++;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 0a711d2d04f0..9dfb2f77be60 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -20,6 +20,7 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23#include <sound/asoundef.h>
23#include "seq_oss_midi.h" 24#include "seq_oss_midi.h"
24#include "seq_oss_readq.h" 25#include "seq_oss_readq.h"
25#include "seq_oss_timer.h" 26#include "seq_oss_timer.h"
@@ -476,19 +477,20 @@ snd_seq_oss_midi_reset(struct seq_oss_devinfo *dp, int dev)
476 ev.source.port = dp->port; 477 ev.source.port = dp->port;
477 if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) { 478 if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_SYNTH) {
478 ev.type = SNDRV_SEQ_EVENT_SENSING; 479 ev.type = SNDRV_SEQ_EVENT_SENSING;
479 snd_seq_oss_dispatch(dp, &ev, 0, 0); /* active sensing */ 480 snd_seq_oss_dispatch(dp, &ev, 0, 0);
480 } 481 }
481 for (c = 0; c < 16; c++) { 482 for (c = 0; c < 16; c++) {
482 ev.type = SNDRV_SEQ_EVENT_CONTROLLER; 483 ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
483 ev.data.control.channel = c; 484 ev.data.control.channel = c;
484 ev.data.control.param = 123; 485 ev.data.control.param = MIDI_CTL_ALL_NOTES_OFF;
485 snd_seq_oss_dispatch(dp, &ev, 0, 0); /* all notes off */ 486 snd_seq_oss_dispatch(dp, &ev, 0, 0);
486 if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) { 487 if (dp->seq_mode == SNDRV_SEQ_OSS_MODE_MUSIC) {
487 ev.data.control.param = 121; 488 ev.data.control.param =
488 snd_seq_oss_dispatch(dp, &ev, 0, 0); /* reset all controllers */ 489 MIDI_CTL_RESET_CONTROLLERS;
490 snd_seq_oss_dispatch(dp, &ev, 0, 0);
489 ev.type = SNDRV_SEQ_EVENT_PITCHBEND; 491 ev.type = SNDRV_SEQ_EVENT_PITCHBEND;
490 ev.data.control.value = 0; 492 ev.data.control.value = 0;
491 snd_seq_oss_dispatch(dp, &ev, 0, 0); /* bender off */ 493 snd_seq_oss_dispatch(dp, &ev, 0, 0);
492 } 494 }
493 } 495 }
494 } 496 }
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 4d26146a62cc..ebaf1b541dcd 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -120,7 +120,8 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
120 return -EINVAL; 120 return -EINVAL;
121 runtime = substream->runtime; 121 runtime = substream->runtime;
122 if ((tmp = runtime->avail) < count) { 122 if ((tmp = runtime->avail) < count) {
123 snd_printd("warning, output event was lost (count = %i, available = %i)\n", count, tmp); 123 if (printk_ratelimit())
124 snd_printk(KERN_ERR "MIDI output buffer overrun\n");
124 return -ENOMEM; 125 return -ENOMEM;
125 } 126 }
126 if (snd_rawmidi_kernel_write(substream, buf, count) < count) 127 if (snd_rawmidi_kernel_write(substream, buf, count) < count)
@@ -236,6 +237,7 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
236 memset(&params, 0, sizeof(params)); 237 memset(&params, 0, sizeof(params));
237 params.avail_min = 1; 238 params.avail_min = 1;
238 params.buffer_size = output_buffer_size; 239 params.buffer_size = output_buffer_size;
240 params.no_active_sensing = 1;
239 if ((err = snd_rawmidi_output_params(msynth->output_rfile.output, &params)) < 0) { 241 if ((err = snd_rawmidi_output_params(msynth->output_rfile.output, &params)) < 0) {
240 snd_rawmidi_kernel_release(&msynth->output_rfile); 242 snd_rawmidi_kernel_release(&msynth->output_rfile);
241 return err; 243 return err;
@@ -248,12 +250,9 @@ static int midisynth_use(void *private_data, struct snd_seq_port_subscribe *info
248static int midisynth_unuse(void *private_data, struct snd_seq_port_subscribe *info) 250static int midisynth_unuse(void *private_data, struct snd_seq_port_subscribe *info)
249{ 251{
250 struct seq_midisynth *msynth = private_data; 252 struct seq_midisynth *msynth = private_data;
251 unsigned char buf = 0xff; /* MIDI reset */
252 253
253 if (snd_BUG_ON(!msynth->output_rfile.output)) 254 if (snd_BUG_ON(!msynth->output_rfile.output))
254 return -EINVAL; 255 return -EINVAL;
255 /* sending single MIDI reset message to shut the device up */
256 snd_rawmidi_kernel_write(msynth->output_rfile.output, &buf, 1);
257 snd_rawmidi_drain_output(msynth->output_rfile.output); 256 snd_rawmidi_drain_output(msynth->output_rfile.output);
258 return snd_rawmidi_kernel_release(&msynth->output_rfile); 257 return snd_rawmidi_kernel_release(&msynth->output_rfile);
259} 258}
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 257624bd1997..3b9b550109cb 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -353,7 +353,8 @@ static void master_free(struct snd_kcontrol *kcontrol)
353 * 353 *
354 * The optional argument @tlv can be used to specify the TLV information 354 * The optional argument @tlv can be used to specify the TLV information
355 * for dB scale of the master control. It should be a single element 355 * for dB scale of the master control. It should be a single element
356 * with #SNDRV_CTL_TLVT_DB_SCALE type, and should be the max 0dB. 356 * with #SNDRV_CTL_TLVT_DB_SCALE, #SNDRV_CTL_TLV_DB_MINMAX or
357 * #SNDRV_CTL_TLVT_DB_MINMAX_MUTE type, and should be the max 0dB.
357 */ 358 */
358struct snd_kcontrol *snd_ctl_make_virtual_master(char *name, 359struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
359 const unsigned int *tlv) 360 const unsigned int *tlv)
@@ -384,7 +385,10 @@ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name,
384 kctl->private_free = master_free; 385 kctl->private_free = master_free;
385 386
386 /* additional (constant) TLV read */ 387 /* additional (constant) TLV read */
387 if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) { 388 if (tlv &&
389 (tlv[0] == SNDRV_CTL_TLVT_DB_SCALE ||
390 tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX ||
391 tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX_MUTE)) {
388 kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; 392 kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
389 memcpy(master->tlv, tlv, sizeof(master->tlv)); 393 memcpy(master->tlv, tlv, sizeof(master->tlv));
390 kctl->tlv.p = master->tlv; 394 kctl->tlv.p = master->tlv;
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 54239d2e0997..6ba066c41d2e 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -25,12 +25,15 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/time.h> 26#include <linux/time.h>
27#include <linux/wait.h> 27#include <linux/wait.h>
28#include <linux/hrtimer.h>
29#include <linux/math64.h>
28#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
29#include <sound/core.h> 31#include <sound/core.h>
30#include <sound/control.h> 32#include <sound/control.h>
31#include <sound/tlv.h> 33#include <sound/tlv.h>
32#include <sound/pcm.h> 34#include <sound/pcm.h>
33#include <sound/rawmidi.h> 35#include <sound/rawmidi.h>
36#include <sound/info.h>
34#include <sound/initval.h> 37#include <sound/initval.h>
35 38
36MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); 39MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
@@ -39,7 +42,7 @@ MODULE_LICENSE("GPL");
39MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}"); 42MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}");
40 43
41#define MAX_PCM_DEVICES 4 44#define MAX_PCM_DEVICES 4
42#define MAX_PCM_SUBSTREAMS 16 45#define MAX_PCM_SUBSTREAMS 128
43#define MAX_MIDI_DEVICES 2 46#define MAX_MIDI_DEVICES 2
44 47
45#if 0 /* emu10k1 emulation */ 48#if 0 /* emu10k1 emulation */
@@ -148,6 +151,10 @@ static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0};
148static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; 151static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
149static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8}; 152static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8};
150//static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; 153//static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2};
154#ifdef CONFIG_HIGH_RES_TIMERS
155static int hrtimer = 1;
156#endif
157static int fake_buffer = 1;
151 158
152module_param_array(index, int, NULL, 0444); 159module_param_array(index, int, NULL, 0444);
153MODULE_PARM_DESC(index, "Index value for dummy soundcard."); 160MODULE_PARM_DESC(index, "Index value for dummy soundcard.");
@@ -161,6 +168,12 @@ module_param_array(pcm_substreams, int, NULL, 0444);
161MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-16) for dummy driver."); 168MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-16) for dummy driver.");
162//module_param_array(midi_devs, int, NULL, 0444); 169//module_param_array(midi_devs, int, NULL, 0444);
163//MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver."); 170//MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver.");
171module_param(fake_buffer, bool, 0444);
172MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
173#ifdef CONFIG_HIGH_RES_TIMERS
174module_param(hrtimer, bool, 0644);
175MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
176#endif
164 177
165static struct platform_device *devices[SNDRV_CARDS]; 178static struct platform_device *devices[SNDRV_CARDS];
166 179
@@ -171,137 +184,324 @@ static struct platform_device *devices[SNDRV_CARDS];
171#define MIXER_ADDR_CD 4 184#define MIXER_ADDR_CD 4
172#define MIXER_ADDR_LAST 4 185#define MIXER_ADDR_LAST 4
173 186
187struct dummy_timer_ops {
188 int (*create)(struct snd_pcm_substream *);
189 void (*free)(struct snd_pcm_substream *);
190 int (*prepare)(struct snd_pcm_substream *);
191 int (*start)(struct snd_pcm_substream *);
192 int (*stop)(struct snd_pcm_substream *);
193 snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
194};
195
174struct snd_dummy { 196struct snd_dummy {
175 struct snd_card *card; 197 struct snd_card *card;
176 struct snd_pcm *pcm; 198 struct snd_pcm *pcm;
177 spinlock_t mixer_lock; 199 spinlock_t mixer_lock;
178 int mixer_volume[MIXER_ADDR_LAST+1][2]; 200 int mixer_volume[MIXER_ADDR_LAST+1][2];
179 int capture_source[MIXER_ADDR_LAST+1][2]; 201 int capture_source[MIXER_ADDR_LAST+1][2];
202 const struct dummy_timer_ops *timer_ops;
180}; 203};
181 204
182struct snd_dummy_pcm { 205/*
183 struct snd_dummy *dummy; 206 * system timer interface
207 */
208
209struct dummy_systimer_pcm {
184 spinlock_t lock; 210 spinlock_t lock;
185 struct timer_list timer; 211 struct timer_list timer;
186 unsigned int pcm_buffer_size; 212 unsigned long base_time;
187 unsigned int pcm_period_size; 213 unsigned int frac_pos; /* fractional sample position (based HZ) */
188 unsigned int pcm_bps; /* bytes per second */ 214 unsigned int frac_period_rest;
189 unsigned int pcm_hz; /* HZ */ 215 unsigned int frac_buffer_size; /* buffer_size * HZ */
190 unsigned int pcm_irq_pos; /* IRQ position */ 216 unsigned int frac_period_size; /* period_size * HZ */
191 unsigned int pcm_buf_pos; /* position in buffer */ 217 unsigned int rate;
218 int elapsed;
192 struct snd_pcm_substream *substream; 219 struct snd_pcm_substream *substream;
193}; 220};
194 221
195 222static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm)
196static inline void snd_card_dummy_pcm_timer_start(struct snd_dummy_pcm *dpcm)
197{ 223{
198 dpcm->timer.expires = 1 + jiffies; 224 dpcm->timer.expires = jiffies +
225 (dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate;
199 add_timer(&dpcm->timer); 226 add_timer(&dpcm->timer);
200} 227}
201 228
202static inline void snd_card_dummy_pcm_timer_stop(struct snd_dummy_pcm *dpcm) 229static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm)
203{ 230{
204 del_timer(&dpcm->timer); 231 unsigned long delta;
232
233 delta = jiffies - dpcm->base_time;
234 if (!delta)
235 return;
236 dpcm->base_time += delta;
237 delta *= dpcm->rate;
238 dpcm->frac_pos += delta;
239 while (dpcm->frac_pos >= dpcm->frac_buffer_size)
240 dpcm->frac_pos -= dpcm->frac_buffer_size;
241 while (dpcm->frac_period_rest <= delta) {
242 dpcm->elapsed++;
243 dpcm->frac_period_rest += dpcm->frac_period_size;
244 }
245 dpcm->frac_period_rest -= delta;
205} 246}
206 247
207static int snd_card_dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 248static int dummy_systimer_start(struct snd_pcm_substream *substream)
208{ 249{
209 struct snd_pcm_runtime *runtime = substream->runtime; 250 struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
210 struct snd_dummy_pcm *dpcm = runtime->private_data; 251 spin_lock(&dpcm->lock);
211 int err = 0; 252 dpcm->base_time = jiffies;
253 dummy_systimer_rearm(dpcm);
254 spin_unlock(&dpcm->lock);
255 return 0;
256}
212 257
258static int dummy_systimer_stop(struct snd_pcm_substream *substream)
259{
260 struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
213 spin_lock(&dpcm->lock); 261 spin_lock(&dpcm->lock);
214 switch (cmd) { 262 del_timer(&dpcm->timer);
215 case SNDRV_PCM_TRIGGER_START:
216 case SNDRV_PCM_TRIGGER_RESUME:
217 snd_card_dummy_pcm_timer_start(dpcm);
218 break;
219 case SNDRV_PCM_TRIGGER_STOP:
220 case SNDRV_PCM_TRIGGER_SUSPEND:
221 snd_card_dummy_pcm_timer_stop(dpcm);
222 break;
223 default:
224 err = -EINVAL;
225 break;
226 }
227 spin_unlock(&dpcm->lock); 263 spin_unlock(&dpcm->lock);
228 return 0; 264 return 0;
229} 265}
230 266
231static int snd_card_dummy_pcm_prepare(struct snd_pcm_substream *substream) 267static int dummy_systimer_prepare(struct snd_pcm_substream *substream)
232{ 268{
233 struct snd_pcm_runtime *runtime = substream->runtime; 269 struct snd_pcm_runtime *runtime = substream->runtime;
234 struct snd_dummy_pcm *dpcm = runtime->private_data; 270 struct dummy_systimer_pcm *dpcm = runtime->private_data;
235 int bps;
236
237 bps = snd_pcm_format_width(runtime->format) * runtime->rate *
238 runtime->channels / 8;
239
240 if (bps <= 0)
241 return -EINVAL;
242
243 dpcm->pcm_bps = bps;
244 dpcm->pcm_hz = HZ;
245 dpcm->pcm_buffer_size = snd_pcm_lib_buffer_bytes(substream);
246 dpcm->pcm_period_size = snd_pcm_lib_period_bytes(substream);
247 dpcm->pcm_irq_pos = 0;
248 dpcm->pcm_buf_pos = 0;
249 271
250 snd_pcm_format_set_silence(runtime->format, runtime->dma_area, 272 dpcm->frac_pos = 0;
251 bytes_to_samples(runtime, runtime->dma_bytes)); 273 dpcm->rate = runtime->rate;
274 dpcm->frac_buffer_size = runtime->buffer_size * HZ;
275 dpcm->frac_period_size = runtime->period_size * HZ;
276 dpcm->frac_period_rest = dpcm->frac_period_size;
277 dpcm->elapsed = 0;
252 278
253 return 0; 279 return 0;
254} 280}
255 281
256static void snd_card_dummy_pcm_timer_function(unsigned long data) 282static void dummy_systimer_callback(unsigned long data)
257{ 283{
258 struct snd_dummy_pcm *dpcm = (struct snd_dummy_pcm *)data; 284 struct dummy_systimer_pcm *dpcm = (struct dummy_systimer_pcm *)data;
259 unsigned long flags; 285 unsigned long flags;
286 int elapsed = 0;
260 287
261 spin_lock_irqsave(&dpcm->lock, flags); 288 spin_lock_irqsave(&dpcm->lock, flags);
262 dpcm->timer.expires = 1 + jiffies; 289 dummy_systimer_update(dpcm);
263 add_timer(&dpcm->timer); 290 dummy_systimer_rearm(dpcm);
264 dpcm->pcm_irq_pos += dpcm->pcm_bps; 291 elapsed = dpcm->elapsed;
265 dpcm->pcm_buf_pos += dpcm->pcm_bps; 292 dpcm->elapsed = 0;
266 dpcm->pcm_buf_pos %= dpcm->pcm_buffer_size * dpcm->pcm_hz; 293 spin_unlock_irqrestore(&dpcm->lock, flags);
267 if (dpcm->pcm_irq_pos >= dpcm->pcm_period_size * dpcm->pcm_hz) { 294 if (elapsed)
268 dpcm->pcm_irq_pos %= dpcm->pcm_period_size * dpcm->pcm_hz; 295 snd_pcm_period_elapsed(dpcm->substream);
269 spin_unlock_irqrestore(&dpcm->lock, flags); 296}
297
298static snd_pcm_uframes_t
299dummy_systimer_pointer(struct snd_pcm_substream *substream)
300{
301 struct dummy_systimer_pcm *dpcm = substream->runtime->private_data;
302 snd_pcm_uframes_t pos;
303
304 spin_lock(&dpcm->lock);
305 dummy_systimer_update(dpcm);
306 pos = dpcm->frac_pos / HZ;
307 spin_unlock(&dpcm->lock);
308 return pos;
309}
310
311static int dummy_systimer_create(struct snd_pcm_substream *substream)
312{
313 struct dummy_systimer_pcm *dpcm;
314
315 dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
316 if (!dpcm)
317 return -ENOMEM;
318 substream->runtime->private_data = dpcm;
319 init_timer(&dpcm->timer);
320 dpcm->timer.data = (unsigned long) dpcm;
321 dpcm->timer.function = dummy_systimer_callback;
322 spin_lock_init(&dpcm->lock);
323 dpcm->substream = substream;
324 return 0;
325}
326
327static void dummy_systimer_free(struct snd_pcm_substream *substream)
328{
329 kfree(substream->runtime->private_data);
330}
331
332static struct dummy_timer_ops dummy_systimer_ops = {
333 .create = dummy_systimer_create,
334 .free = dummy_systimer_free,
335 .prepare = dummy_systimer_prepare,
336 .start = dummy_systimer_start,
337 .stop = dummy_systimer_stop,
338 .pointer = dummy_systimer_pointer,
339};
340
341#ifdef CONFIG_HIGH_RES_TIMERS
342/*
343 * hrtimer interface
344 */
345
346struct dummy_hrtimer_pcm {
347 ktime_t base_time;
348 ktime_t period_time;
349 atomic_t running;
350 struct hrtimer timer;
351 struct tasklet_struct tasklet;
352 struct snd_pcm_substream *substream;
353};
354
355static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
356{
357 struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
358 if (atomic_read(&dpcm->running))
270 snd_pcm_period_elapsed(dpcm->substream); 359 snd_pcm_period_elapsed(dpcm->substream);
271 } else
272 spin_unlock_irqrestore(&dpcm->lock, flags);
273} 360}
274 361
275static snd_pcm_uframes_t snd_card_dummy_pcm_pointer(struct snd_pcm_substream *substream) 362static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
363{
364 struct dummy_hrtimer_pcm *dpcm;
365
366 dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
367 if (!atomic_read(&dpcm->running))
368 return HRTIMER_NORESTART;
369 tasklet_schedule(&dpcm->tasklet);
370 hrtimer_forward_now(timer, dpcm->period_time);
371 return HRTIMER_RESTART;
372}
373
374static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
375{
376 struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
377
378 dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
379 hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
380 atomic_set(&dpcm->running, 1);
381 return 0;
382}
383
384static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
385{
386 struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
387
388 atomic_set(&dpcm->running, 0);
389 hrtimer_cancel(&dpcm->timer);
390 return 0;
391}
392
393static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
394{
395 tasklet_kill(&dpcm->tasklet);
396}
397
398static snd_pcm_uframes_t
399dummy_hrtimer_pointer(struct snd_pcm_substream *substream)
276{ 400{
277 struct snd_pcm_runtime *runtime = substream->runtime; 401 struct snd_pcm_runtime *runtime = substream->runtime;
278 struct snd_dummy_pcm *dpcm = runtime->private_data; 402 struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
403 u64 delta;
404 u32 pos;
405
406 delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer),
407 dpcm->base_time);
408 delta = div_u64(delta * runtime->rate + 999999, 1000000);
409 div_u64_rem(delta, runtime->buffer_size, &pos);
410 return pos;
411}
279 412
280 return bytes_to_frames(runtime, dpcm->pcm_buf_pos / dpcm->pcm_hz); 413static int dummy_hrtimer_prepare(struct snd_pcm_substream *substream)
414{
415 struct snd_pcm_runtime *runtime = substream->runtime;
416 struct dummy_hrtimer_pcm *dpcm = runtime->private_data;
417 unsigned int period, rate;
418 long sec;
419 unsigned long nsecs;
420
421 dummy_hrtimer_sync(dpcm);
422 period = runtime->period_size;
423 rate = runtime->rate;
424 sec = period / rate;
425 period %= rate;
426 nsecs = div_u64((u64)period * 1000000000UL + rate - 1, rate);
427 dpcm->period_time = ktime_set(sec, nsecs);
428
429 return 0;
281} 430}
282 431
283static struct snd_pcm_hardware snd_card_dummy_playback = 432static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
284{ 433{
285 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 434 struct dummy_hrtimer_pcm *dpcm;
286 SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), 435
287 .formats = USE_FORMATS, 436 dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
288 .rates = USE_RATE, 437 if (!dpcm)
289 .rate_min = USE_RATE_MIN, 438 return -ENOMEM;
290 .rate_max = USE_RATE_MAX, 439 substream->runtime->private_data = dpcm;
291 .channels_min = USE_CHANNELS_MIN, 440 hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
292 .channels_max = USE_CHANNELS_MAX, 441 dpcm->timer.function = dummy_hrtimer_callback;
293 .buffer_bytes_max = MAX_BUFFER_SIZE, 442 dpcm->substream = substream;
294 .period_bytes_min = 64, 443 atomic_set(&dpcm->running, 0);
295 .period_bytes_max = MAX_PERIOD_SIZE, 444 tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
296 .periods_min = USE_PERIODS_MIN, 445 (unsigned long)dpcm);
297 .periods_max = USE_PERIODS_MAX, 446 return 0;
298 .fifo_size = 0, 447}
448
449static void dummy_hrtimer_free(struct snd_pcm_substream *substream)
450{
451 struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
452 dummy_hrtimer_sync(dpcm);
453 kfree(dpcm);
454}
455
456static struct dummy_timer_ops dummy_hrtimer_ops = {
457 .create = dummy_hrtimer_create,
458 .free = dummy_hrtimer_free,
459 .prepare = dummy_hrtimer_prepare,
460 .start = dummy_hrtimer_start,
461 .stop = dummy_hrtimer_stop,
462 .pointer = dummy_hrtimer_pointer,
299}; 463};
300 464
301static struct snd_pcm_hardware snd_card_dummy_capture = 465#endif /* CONFIG_HIGH_RES_TIMERS */
466
467/*
468 * PCM interface
469 */
470
471static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
302{ 472{
303 .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | 473 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
304 SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), 474
475 switch (cmd) {
476 case SNDRV_PCM_TRIGGER_START:
477 case SNDRV_PCM_TRIGGER_RESUME:
478 return dummy->timer_ops->start(substream);
479 case SNDRV_PCM_TRIGGER_STOP:
480 case SNDRV_PCM_TRIGGER_SUSPEND:
481 return dummy->timer_ops->stop(substream);
482 }
483 return -EINVAL;
484}
485
486static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
487{
488 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
489
490 return dummy->timer_ops->prepare(substream);
491}
492
493static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
494{
495 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
496
497 return dummy->timer_ops->pointer(substream);
498}
499
500static struct snd_pcm_hardware dummy_pcm_hardware = {
501 .info = (SNDRV_PCM_INFO_MMAP |
502 SNDRV_PCM_INFO_INTERLEAVED |
503 SNDRV_PCM_INFO_RESUME |
504 SNDRV_PCM_INFO_MMAP_VALID),
305 .formats = USE_FORMATS, 505 .formats = USE_FORMATS,
306 .rates = USE_RATE, 506 .rates = USE_RATE,
307 .rate_min = USE_RATE_MIN, 507 .rate_min = USE_RATE_MIN,
@@ -316,123 +516,152 @@ static struct snd_pcm_hardware snd_card_dummy_capture =
316 .fifo_size = 0, 516 .fifo_size = 0,
317}; 517};
318 518
319static void snd_card_dummy_runtime_free(struct snd_pcm_runtime *runtime) 519static int dummy_pcm_hw_params(struct snd_pcm_substream *substream,
320{ 520 struct snd_pcm_hw_params *hw_params)
321 kfree(runtime->private_data);
322}
323
324static int snd_card_dummy_hw_params(struct snd_pcm_substream *substream,
325 struct snd_pcm_hw_params *hw_params)
326{ 521{
327 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); 522 if (fake_buffer) {
523 /* runtime->dma_bytes has to be set manually to allow mmap */
524 substream->runtime->dma_bytes = params_buffer_bytes(hw_params);
525 return 0;
526 }
527 return snd_pcm_lib_malloc_pages(substream,
528 params_buffer_bytes(hw_params));
328} 529}
329 530
330static int snd_card_dummy_hw_free(struct snd_pcm_substream *substream) 531static int dummy_pcm_hw_free(struct snd_pcm_substream *substream)
331{ 532{
533 if (fake_buffer)
534 return 0;
332 return snd_pcm_lib_free_pages(substream); 535 return snd_pcm_lib_free_pages(substream);
333} 536}
334 537
335static struct snd_dummy_pcm *new_pcm_stream(struct snd_pcm_substream *substream) 538static int dummy_pcm_open(struct snd_pcm_substream *substream)
336{
337 struct snd_dummy_pcm *dpcm;
338
339 dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL);
340 if (! dpcm)
341 return dpcm;
342 init_timer(&dpcm->timer);
343 dpcm->timer.data = (unsigned long) dpcm;
344 dpcm->timer.function = snd_card_dummy_pcm_timer_function;
345 spin_lock_init(&dpcm->lock);
346 dpcm->substream = substream;
347 return dpcm;
348}
349
350static int snd_card_dummy_playback_open(struct snd_pcm_substream *substream)
351{ 539{
540 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
352 struct snd_pcm_runtime *runtime = substream->runtime; 541 struct snd_pcm_runtime *runtime = substream->runtime;
353 struct snd_dummy_pcm *dpcm;
354 int err; 542 int err;
355 543
356 if ((dpcm = new_pcm_stream(substream)) == NULL) 544 dummy->timer_ops = &dummy_systimer_ops;
357 return -ENOMEM; 545#ifdef CONFIG_HIGH_RES_TIMERS
358 runtime->private_data = dpcm; 546 if (hrtimer)
359 /* makes the infrastructure responsible for freeing dpcm */ 547 dummy->timer_ops = &dummy_hrtimer_ops;
360 runtime->private_free = snd_card_dummy_runtime_free; 548#endif
361 runtime->hw = snd_card_dummy_playback; 549
550 err = dummy->timer_ops->create(substream);
551 if (err < 0)
552 return err;
553
554 runtime->hw = dummy_pcm_hardware;
362 if (substream->pcm->device & 1) { 555 if (substream->pcm->device & 1) {
363 runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED; 556 runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED;
364 runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; 557 runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED;
365 } 558 }
366 if (substream->pcm->device & 2) 559 if (substream->pcm->device & 2)
367 runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP|SNDRV_PCM_INFO_MMAP_VALID); 560 runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP |
368 err = add_playback_constraints(runtime); 561 SNDRV_PCM_INFO_MMAP_VALID);
369 if (err < 0) 562
563 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
564 err = add_playback_constraints(substream->runtime);
565 else
566 err = add_capture_constraints(substream->runtime);
567 if (err < 0) {
568 dummy->timer_ops->free(substream);
370 return err; 569 return err;
371 570 }
372 return 0; 571 return 0;
373} 572}
374 573
375static int snd_card_dummy_capture_open(struct snd_pcm_substream *substream) 574static int dummy_pcm_close(struct snd_pcm_substream *substream)
376{ 575{
377 struct snd_pcm_runtime *runtime = substream->runtime; 576 struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
378 struct snd_dummy_pcm *dpcm; 577 dummy->timer_ops->free(substream);
379 int err; 578 return 0;
579}
380 580
381 if ((dpcm = new_pcm_stream(substream)) == NULL) 581/*
382 return -ENOMEM; 582 * dummy buffer handling
383 runtime->private_data = dpcm; 583 */
384 /* makes the infrastructure responsible for freeing dpcm */ 584
385 runtime->private_free = snd_card_dummy_runtime_free; 585static void *dummy_page[2];
386 runtime->hw = snd_card_dummy_capture; 586
387 if (substream->pcm->device == 1) { 587static void free_fake_buffer(void)
388 runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED; 588{
389 runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; 589 if (fake_buffer) {
590 int i;
591 for (i = 0; i < 2; i++)
592 if (dummy_page[i]) {
593 free_page((unsigned long)dummy_page[i]);
594 dummy_page[i] = NULL;
595 }
390 } 596 }
391 if (substream->pcm->device & 2) 597}
392 runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP|SNDRV_PCM_INFO_MMAP_VALID);
393 err = add_capture_constraints(runtime);
394 if (err < 0)
395 return err;
396 598
599static int alloc_fake_buffer(void)
600{
601 int i;
602
603 if (!fake_buffer)
604 return 0;
605 for (i = 0; i < 2; i++) {
606 dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL);
607 if (!dummy_page[i]) {
608 free_fake_buffer();
609 return -ENOMEM;
610 }
611 }
397 return 0; 612 return 0;
398} 613}
399 614
400static int snd_card_dummy_playback_close(struct snd_pcm_substream *substream) 615static int dummy_pcm_copy(struct snd_pcm_substream *substream,
616 int channel, snd_pcm_uframes_t pos,
617 void __user *dst, snd_pcm_uframes_t count)
401{ 618{
402 return 0; 619 return 0; /* do nothing */
403} 620}
404 621
405static int snd_card_dummy_capture_close(struct snd_pcm_substream *substream) 622static int dummy_pcm_silence(struct snd_pcm_substream *substream,
623 int channel, snd_pcm_uframes_t pos,
624 snd_pcm_uframes_t count)
406{ 625{
407 return 0; 626 return 0; /* do nothing */
627}
628
629static struct page *dummy_pcm_page(struct snd_pcm_substream *substream,
630 unsigned long offset)
631{
632 return virt_to_page(dummy_page[substream->stream]); /* the same page */
408} 633}
409 634
410static struct snd_pcm_ops snd_card_dummy_playback_ops = { 635static struct snd_pcm_ops dummy_pcm_ops = {
411 .open = snd_card_dummy_playback_open, 636 .open = dummy_pcm_open,
412 .close = snd_card_dummy_playback_close, 637 .close = dummy_pcm_close,
413 .ioctl = snd_pcm_lib_ioctl, 638 .ioctl = snd_pcm_lib_ioctl,
414 .hw_params = snd_card_dummy_hw_params, 639 .hw_params = dummy_pcm_hw_params,
415 .hw_free = snd_card_dummy_hw_free, 640 .hw_free = dummy_pcm_hw_free,
416 .prepare = snd_card_dummy_pcm_prepare, 641 .prepare = dummy_pcm_prepare,
417 .trigger = snd_card_dummy_pcm_trigger, 642 .trigger = dummy_pcm_trigger,
418 .pointer = snd_card_dummy_pcm_pointer, 643 .pointer = dummy_pcm_pointer,
419}; 644};
420 645
421static struct snd_pcm_ops snd_card_dummy_capture_ops = { 646static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
422 .open = snd_card_dummy_capture_open, 647 .open = dummy_pcm_open,
423 .close = snd_card_dummy_capture_close, 648 .close = dummy_pcm_close,
424 .ioctl = snd_pcm_lib_ioctl, 649 .ioctl = snd_pcm_lib_ioctl,
425 .hw_params = snd_card_dummy_hw_params, 650 .hw_params = dummy_pcm_hw_params,
426 .hw_free = snd_card_dummy_hw_free, 651 .hw_free = dummy_pcm_hw_free,
427 .prepare = snd_card_dummy_pcm_prepare, 652 .prepare = dummy_pcm_prepare,
428 .trigger = snd_card_dummy_pcm_trigger, 653 .trigger = dummy_pcm_trigger,
429 .pointer = snd_card_dummy_pcm_pointer, 654 .pointer = dummy_pcm_pointer,
655 .copy = dummy_pcm_copy,
656 .silence = dummy_pcm_silence,
657 .page = dummy_pcm_page,
430}; 658};
431 659
432static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device, 660static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
433 int substreams) 661 int substreams)
434{ 662{
435 struct snd_pcm *pcm; 663 struct snd_pcm *pcm;
664 struct snd_pcm_ops *ops;
436 int err; 665 int err;
437 666
438 err = snd_pcm_new(dummy->card, "Dummy PCM", device, 667 err = snd_pcm_new(dummy->card, "Dummy PCM", device,
@@ -440,17 +669,28 @@ static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device,
440 if (err < 0) 669 if (err < 0)
441 return err; 670 return err;
442 dummy->pcm = pcm; 671 dummy->pcm = pcm;
443 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_card_dummy_playback_ops); 672 if (fake_buffer)
444 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_card_dummy_capture_ops); 673 ops = &dummy_pcm_ops_no_buf;
674 else
675 ops = &dummy_pcm_ops;
676 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ops);
677 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops);
445 pcm->private_data = dummy; 678 pcm->private_data = dummy;
446 pcm->info_flags = 0; 679 pcm->info_flags = 0;
447 strcpy(pcm->name, "Dummy PCM"); 680 strcpy(pcm->name, "Dummy PCM");
448 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, 681 if (!fake_buffer) {
449 snd_dma_continuous_data(GFP_KERNEL), 682 snd_pcm_lib_preallocate_pages_for_all(pcm,
450 0, 64*1024); 683 SNDRV_DMA_TYPE_CONTINUOUS,
684 snd_dma_continuous_data(GFP_KERNEL),
685 0, 64*1024);
686 }
451 return 0; 687 return 0;
452} 688}
453 689
690/*
691 * mixer interface
692 */
693
454#define DUMMY_VOLUME(xname, xindex, addr) \ 694#define DUMMY_VOLUME(xname, xindex, addr) \
455{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 695{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
456 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ 696 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
@@ -581,6 +821,131 @@ static int __devinit snd_card_dummy_new_mixer(struct snd_dummy *dummy)
581 return 0; 821 return 0;
582} 822}
583 823
824#if defined(CONFIG_SND_DEBUG) && defined(CONFIG_PROC_FS)
825/*
826 * proc interface
827 */
828static void print_formats(struct snd_info_buffer *buffer)
829{
830 int i;
831
832 for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) {
833 if (dummy_pcm_hardware.formats & (1ULL << i))
834 snd_iprintf(buffer, " %s", snd_pcm_format_name(i));
835 }
836}
837
838static void print_rates(struct snd_info_buffer *buffer)
839{
840 static int rates[] = {
841 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000,
842 64000, 88200, 96000, 176400, 192000,
843 };
844 int i;
845
846 if (dummy_pcm_hardware.rates & SNDRV_PCM_RATE_CONTINUOUS)
847 snd_iprintf(buffer, " continuous");
848 if (dummy_pcm_hardware.rates & SNDRV_PCM_RATE_KNOT)
849 snd_iprintf(buffer, " knot");
850 for (i = 0; i < ARRAY_SIZE(rates); i++)
851 if (dummy_pcm_hardware.rates & (1 << i))
852 snd_iprintf(buffer, " %d", rates[i]);
853}
854
855#define get_dummy_int_ptr(ofs) \
856 (unsigned int *)((char *)&dummy_pcm_hardware + (ofs))
857#define get_dummy_ll_ptr(ofs) \
858 (unsigned long long *)((char *)&dummy_pcm_hardware + (ofs))
859
860struct dummy_hw_field {
861 const char *name;
862 const char *format;
863 unsigned int offset;
864 unsigned int size;
865};
866#define FIELD_ENTRY(item, fmt) { \
867 .name = #item, \
868 .format = fmt, \
869 .offset = offsetof(struct snd_pcm_hardware, item), \
870 .size = sizeof(dummy_pcm_hardware.item) }
871
872static struct dummy_hw_field fields[] = {
873 FIELD_ENTRY(formats, "%#llx"),
874 FIELD_ENTRY(rates, "%#x"),
875 FIELD_ENTRY(rate_min, "%d"),
876 FIELD_ENTRY(rate_max, "%d"),
877 FIELD_ENTRY(channels_min, "%d"),
878 FIELD_ENTRY(channels_max, "%d"),
879 FIELD_ENTRY(buffer_bytes_max, "%ld"),
880 FIELD_ENTRY(period_bytes_min, "%ld"),
881 FIELD_ENTRY(period_bytes_max, "%ld"),
882 FIELD_ENTRY(periods_min, "%d"),
883 FIELD_ENTRY(periods_max, "%d"),
884};
885
886static void dummy_proc_read(struct snd_info_entry *entry,
887 struct snd_info_buffer *buffer)
888{
889 int i;
890
891 for (i = 0; i < ARRAY_SIZE(fields); i++) {
892 snd_iprintf(buffer, "%s ", fields[i].name);
893 if (fields[i].size == sizeof(int))
894 snd_iprintf(buffer, fields[i].format,
895 *get_dummy_int_ptr(fields[i].offset));
896 else
897 snd_iprintf(buffer, fields[i].format,
898 *get_dummy_ll_ptr(fields[i].offset));
899 if (!strcmp(fields[i].name, "formats"))
900 print_formats(buffer);
901 else if (!strcmp(fields[i].name, "rates"))
902 print_rates(buffer);
903 snd_iprintf(buffer, "\n");
904 }
905}
906
907static void dummy_proc_write(struct snd_info_entry *entry,
908 struct snd_info_buffer *buffer)
909{
910 char line[64];
911
912 while (!snd_info_get_line(buffer, line, sizeof(line))) {
913 char item[20];
914 const char *ptr;
915 unsigned long long val;
916 int i;
917
918 ptr = snd_info_get_str(item, line, sizeof(item));
919 for (i = 0; i < ARRAY_SIZE(fields); i++) {
920 if (!strcmp(item, fields[i].name))
921 break;
922 }
923 if (i >= ARRAY_SIZE(fields))
924 continue;
925 snd_info_get_str(item, ptr, sizeof(item));
926 if (strict_strtoull(item, 0, &val))
927 continue;
928 if (fields[i].size == sizeof(int))
929 *get_dummy_int_ptr(fields[i].offset) = val;
930 else
931 *get_dummy_ll_ptr(fields[i].offset) = val;
932 }
933}
934
935static void __devinit dummy_proc_init(struct snd_dummy *chip)
936{
937 struct snd_info_entry *entry;
938
939 if (!snd_card_proc_new(chip->card, "dummy_pcm", &entry)) {
940 snd_info_set_text_ops(entry, chip, dummy_proc_read);
941 entry->c.text.write = dummy_proc_write;
942 entry->mode |= S_IWUSR;
943 }
944}
945#else
946#define dummy_proc_init(x)
947#endif /* CONFIG_SND_DEBUG && CONFIG_PROC_FS */
948
584static int __devinit snd_dummy_probe(struct platform_device *devptr) 949static int __devinit snd_dummy_probe(struct platform_device *devptr)
585{ 950{
586 struct snd_card *card; 951 struct snd_card *card;
@@ -610,6 +975,8 @@ static int __devinit snd_dummy_probe(struct platform_device *devptr)
610 strcpy(card->shortname, "Dummy"); 975 strcpy(card->shortname, "Dummy");
611 sprintf(card->longname, "Dummy %i", dev + 1); 976 sprintf(card->longname, "Dummy %i", dev + 1);
612 977
978 dummy_proc_init(dummy);
979
613 snd_card_set_dev(card, &devptr->dev); 980 snd_card_set_dev(card, &devptr->dev);
614 981
615 err = snd_card_register(card); 982 err = snd_card_register(card);
@@ -670,6 +1037,7 @@ static void snd_dummy_unregister_all(void)
670 for (i = 0; i < ARRAY_SIZE(devices); ++i) 1037 for (i = 0; i < ARRAY_SIZE(devices); ++i)
671 platform_device_unregister(devices[i]); 1038 platform_device_unregister(devices[i]);
672 platform_driver_unregister(&snd_dummy_driver); 1039 platform_driver_unregister(&snd_dummy_driver);
1040 free_fake_buffer();
673} 1041}
674 1042
675static int __init alsa_card_dummy_init(void) 1043static int __init alsa_card_dummy_init(void)
@@ -680,6 +1048,12 @@ static int __init alsa_card_dummy_init(void)
680 if (err < 0) 1048 if (err < 0)
681 return err; 1049 return err;
682 1050
1051 err = alloc_fake_buffer();
1052 if (err < 0) {
1053 platform_driver_unregister(&snd_dummy_driver);
1054 return err;
1055 }
1056
683 cards = 0; 1057 cards = 0;
684 for (i = 0; i < SNDRV_CARDS; i++) { 1058 for (i = 0; i < SNDRV_CARDS; i++) {
685 struct platform_device *device; 1059 struct platform_device *device;
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 3ee0269e5bd0..02f79d252718 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Driver for C-Media's CMI8330 soundcards. 2 * Driver for C-Media's CMI8330 and CMI8329 soundcards.
3 * Copyright (c) by George Talusan <gstalusan@uwaterloo.ca> 3 * Copyright (c) by George Talusan <gstalusan@uwaterloo.ca>
4 * http://www.undergrad.math.uwaterloo.ca/~gstalusa 4 * http://www.undergrad.math.uwaterloo.ca/~gstalusa
5 * 5 *
@@ -35,7 +35,7 @@
35 * 35 *
36 * This card has two mixers and two PCM devices. I've cheesed it such 36 * This card has two mixers and two PCM devices. I've cheesed it such
37 * that recording and playback can be done through the same device. 37 * that recording and playback can be done through the same device.
38 * The driver "magically" routes the capturing to the CMI8330 codec, 38 * The driver "magically" routes the capturing to the AD1848 codec,
39 * and playback to the SB16 codec. This allows for full-duplex mode 39 * and playback to the SB16 codec. This allows for full-duplex mode
40 * to some extent. 40 * to some extent.
41 * The utilities in alsa-utils are aware of both devices, so passing 41 * The utilities in alsa-utils are aware of both devices, so passing
@@ -64,7 +64,7 @@
64/* 64/*
65 */ 65 */
66MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>"); 66MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>");
67MODULE_DESCRIPTION("C-Media CMI8330"); 67MODULE_DESCRIPTION("C-Media CMI8330/CMI8329");
68MODULE_LICENSE("GPL"); 68MODULE_LICENSE("GPL");
69MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}"); 69MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}");
70 70
@@ -86,38 +86,38 @@ static long mpuport[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
86static int mpuirq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; 86static int mpuirq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
87 87
88module_param_array(index, int, NULL, 0444); 88module_param_array(index, int, NULL, 0444);
89MODULE_PARM_DESC(index, "Index value for CMI8330 soundcard."); 89MODULE_PARM_DESC(index, "Index value for CMI8330/CMI8329 soundcard.");
90module_param_array(id, charp, NULL, 0444); 90module_param_array(id, charp, NULL, 0444);
91MODULE_PARM_DESC(id, "ID string for CMI8330 soundcard."); 91MODULE_PARM_DESC(id, "ID string for CMI8330/CMI8329 soundcard.");
92module_param_array(enable, bool, NULL, 0444); 92module_param_array(enable, bool, NULL, 0444);
93MODULE_PARM_DESC(enable, "Enable CMI8330 soundcard."); 93MODULE_PARM_DESC(enable, "Enable CMI8330/CMI8329 soundcard.");
94#ifdef CONFIG_PNP 94#ifdef CONFIG_PNP
95module_param_array(isapnp, bool, NULL, 0444); 95module_param_array(isapnp, bool, NULL, 0444);
96MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard."); 96MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
97#endif 97#endif
98 98
99module_param_array(sbport, long, NULL, 0444); 99module_param_array(sbport, long, NULL, 0444);
100MODULE_PARM_DESC(sbport, "Port # for CMI8330 SB driver."); 100MODULE_PARM_DESC(sbport, "Port # for CMI8330/CMI8329 SB driver.");
101module_param_array(sbirq, int, NULL, 0444); 101module_param_array(sbirq, int, NULL, 0444);
102MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330 SB driver."); 102MODULE_PARM_DESC(sbirq, "IRQ # for CMI8330/CMI8329 SB driver.");
103module_param_array(sbdma8, int, NULL, 0444); 103module_param_array(sbdma8, int, NULL, 0444);
104MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330 SB driver."); 104MODULE_PARM_DESC(sbdma8, "DMA8 for CMI8330/CMI8329 SB driver.");
105module_param_array(sbdma16, int, NULL, 0444); 105module_param_array(sbdma16, int, NULL, 0444);
106MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330 SB driver."); 106MODULE_PARM_DESC(sbdma16, "DMA16 for CMI8330/CMI8329 SB driver.");
107 107
108module_param_array(wssport, long, NULL, 0444); 108module_param_array(wssport, long, NULL, 0444);
109MODULE_PARM_DESC(wssport, "Port # for CMI8330 WSS driver."); 109MODULE_PARM_DESC(wssport, "Port # for CMI8330/CMI8329 WSS driver.");
110module_param_array(wssirq, int, NULL, 0444); 110module_param_array(wssirq, int, NULL, 0444);
111MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330 WSS driver."); 111MODULE_PARM_DESC(wssirq, "IRQ # for CMI8330/CMI8329 WSS driver.");
112module_param_array(wssdma, int, NULL, 0444); 112module_param_array(wssdma, int, NULL, 0444);
113MODULE_PARM_DESC(wssdma, "DMA for CMI8330 WSS driver."); 113MODULE_PARM_DESC(wssdma, "DMA for CMI8330/CMI8329 WSS driver.");
114 114
115module_param_array(fmport, long, NULL, 0444); 115module_param_array(fmport, long, NULL, 0444);
116MODULE_PARM_DESC(fmport, "FM port # for CMI8330 driver."); 116MODULE_PARM_DESC(fmport, "FM port # for CMI8330/CMI8329 driver.");
117module_param_array(mpuport, long, NULL, 0444); 117module_param_array(mpuport, long, NULL, 0444);
118MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330 driver."); 118MODULE_PARM_DESC(mpuport, "MPU-401 port # for CMI8330/CMI8329 driver.");
119module_param_array(mpuirq, int, NULL, 0444); 119module_param_array(mpuirq, int, NULL, 0444);
120MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330 MPU-401 port."); 120MODULE_PARM_DESC(mpuirq, "IRQ # for CMI8330/CMI8329 MPU-401 port.");
121#ifdef CONFIG_PNP 121#ifdef CONFIG_PNP
122static int isa_registered; 122static int isa_registered;
123static int pnp_registered; 123static int pnp_registered;
@@ -156,6 +156,11 @@ static unsigned char snd_cmi8330_image[((CMI8330_CDINGAIN)-16) + 1] =
156 156
157typedef int (*snd_pcm_open_callback_t)(struct snd_pcm_substream *); 157typedef int (*snd_pcm_open_callback_t)(struct snd_pcm_substream *);
158 158
159enum card_type {
160 CMI8330,
161 CMI8329
162};
163
159struct snd_cmi8330 { 164struct snd_cmi8330 {
160#ifdef CONFIG_PNP 165#ifdef CONFIG_PNP
161 struct pnp_dev *cap; 166 struct pnp_dev *cap;
@@ -172,11 +177,14 @@ struct snd_cmi8330 {
172 snd_pcm_open_callback_t open; 177 snd_pcm_open_callback_t open;
173 void *private_data; /* sb or wss */ 178 void *private_data; /* sb or wss */
174 } streams[2]; 179 } streams[2];
180
181 enum card_type type;
175}; 182};
176 183
177#ifdef CONFIG_PNP 184#ifdef CONFIG_PNP
178 185
179static struct pnp_card_device_id snd_cmi8330_pnpids[] = { 186static struct pnp_card_device_id snd_cmi8330_pnpids[] = {
187 { .id = "CMI0001", .devs = { { "@X@0001" }, { "@@@0001" }, { "@H@0001" }, { "A@@0001" } } },
180 { .id = "CMI0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } } }, 188 { .id = "CMI0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } } },
181 { .id = "" } 189 { .id = "" }
182}; 190};
@@ -304,7 +312,7 @@ static int __devinit snd_cmi8330_mixer(struct snd_card *card, struct snd_cmi8330
304 unsigned int idx; 312 unsigned int idx;
305 int err; 313 int err;
306 314
307 strcpy(card->mixername, "CMI8330/C3D"); 315 strcpy(card->mixername, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
308 316
309 for (idx = 0; idx < ARRAY_SIZE(snd_cmi8330_controls); idx++) { 317 for (idx = 0; idx < ARRAY_SIZE(snd_cmi8330_controls); idx++) {
310 err = snd_ctl_add(card, 318 err = snd_ctl_add(card,
@@ -329,6 +337,9 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
329 struct pnp_dev *pdev; 337 struct pnp_dev *pdev;
330 int err; 338 int err;
331 339
340 /* CMI8329 has a device with ID A@@0001, CMI8330 does not */
341 acard->type = (id->devs[3].id[0]) ? CMI8329 : CMI8330;
342
332 acard->cap = pnp_request_card_device(card, id->devs[0].id, NULL); 343 acard->cap = pnp_request_card_device(card, id->devs[0].id, NULL);
333 if (acard->cap == NULL) 344 if (acard->cap == NULL)
334 return -EBUSY; 345 return -EBUSY;
@@ -345,38 +356,45 @@ static int __devinit snd_cmi8330_pnp(int dev, struct snd_cmi8330 *acard,
345 356
346 err = pnp_activate_dev(pdev); 357 err = pnp_activate_dev(pdev);
347 if (err < 0) { 358 if (err < 0) {
348 snd_printk(KERN_ERR "CMI8330/C3D PnP configure failure\n"); 359 snd_printk(KERN_ERR "AD1848 PnP configure failure\n");
349 return -EBUSY; 360 return -EBUSY;
350 } 361 }
351 wssport[dev] = pnp_port_start(pdev, 0); 362 wssport[dev] = pnp_port_start(pdev, 0);
352 wssdma[dev] = pnp_dma(pdev, 0); 363 wssdma[dev] = pnp_dma(pdev, 0);
353 wssirq[dev] = pnp_irq(pdev, 0); 364 wssirq[dev] = pnp_irq(pdev, 0);
354 fmport[dev] = pnp_port_start(pdev, 1); 365 if (pnp_port_start(pdev, 1))
366 fmport[dev] = pnp_port_start(pdev, 1);
355 367
356 /* allocate SB16 resources */ 368 /* allocate SB16 resources */
357 pdev = acard->play; 369 pdev = acard->play;
358 370
359 err = pnp_activate_dev(pdev); 371 err = pnp_activate_dev(pdev);
360 if (err < 0) { 372 if (err < 0) {
361 snd_printk(KERN_ERR "CMI8330/C3D (SB16) PnP configure failure\n"); 373 snd_printk(KERN_ERR "SB16 PnP configure failure\n");
362 return -EBUSY; 374 return -EBUSY;
363 } 375 }
364 sbport[dev] = pnp_port_start(pdev, 0); 376 sbport[dev] = pnp_port_start(pdev, 0);
365 sbdma8[dev] = pnp_dma(pdev, 0); 377 sbdma8[dev] = pnp_dma(pdev, 0);
366 sbdma16[dev] = pnp_dma(pdev, 1); 378 sbdma16[dev] = pnp_dma(pdev, 1);
367 sbirq[dev] = pnp_irq(pdev, 0); 379 sbirq[dev] = pnp_irq(pdev, 0);
380 /* On CMI8239, the OPL3 port might be present in SB16 PnP resources */
381 if (fmport[dev] == SNDRV_AUTO_PORT) {
382 if (pnp_port_start(pdev, 1))
383 fmport[dev] = pnp_port_start(pdev, 1);
384 else
385 fmport[dev] = 0x388; /* Or hardwired */
386 }
368 387
369 /* allocate MPU-401 resources */ 388 /* allocate MPU-401 resources */
370 pdev = acard->mpu; 389 pdev = acard->mpu;
371 390
372 err = pnp_activate_dev(pdev); 391 err = pnp_activate_dev(pdev);
373 if (err < 0) { 392 if (err < 0)
374 snd_printk(KERN_ERR 393 snd_printk(KERN_ERR "MPU-401 PnP configure failure: will be disabled\n");
375 "CMI8330/C3D (MPU-401) PnP configure failure\n"); 394 else {
376 return -EBUSY; 395 mpuport[dev] = pnp_port_start(pdev, 0);
396 mpuirq[dev] = pnp_irq(pdev, 0);
377 } 397 }
378 mpuport[dev] = pnp_port_start(pdev, 0);
379 mpuirq[dev] = pnp_irq(pdev, 0);
380 return 0; 398 return 0;
381} 399}
382#endif 400#endif
@@ -430,9 +448,9 @@ static int __devinit snd_cmi8330_pcm(struct snd_card *card, struct snd_cmi8330 *
430 snd_cmi8330_capture_open 448 snd_cmi8330_capture_open
431 }; 449 };
432 450
433 if ((err = snd_pcm_new(card, "CMI8330", 0, 1, 1, &pcm)) < 0) 451 if ((err = snd_pcm_new(card, (chip->type == CMI8329) ? "CMI8329" : "CMI8330", 0, 1, 1, &pcm)) < 0)
434 return err; 452 return err;
435 strcpy(pcm->name, "CMI8330"); 453 strcpy(pcm->name, (chip->type == CMI8329) ? "CMI8329" : "CMI8330");
436 pcm->private_data = chip; 454 pcm->private_data = chip;
437 455
438 /* SB16 */ 456 /* SB16 */
@@ -527,11 +545,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
527 wssdma[dev], -1, 545 wssdma[dev], -1,
528 WSS_HW_DETECT, 0, &acard->wss); 546 WSS_HW_DETECT, 0, &acard->wss);
529 if (err < 0) { 547 if (err < 0) {
530 snd_printk(KERN_ERR PFX "(CMI8330) device busy??\n"); 548 snd_printk(KERN_ERR PFX "AD1848 device busy??\n");
531 return err; 549 return err;
532 } 550 }
533 if (acard->wss->hardware != WSS_HW_CMI8330) { 551 if (acard->wss->hardware != WSS_HW_CMI8330) {
534 snd_printk(KERN_ERR PFX "(CMI8330) not found during probe\n"); 552 snd_printk(KERN_ERR PFX "AD1848 not found during probe\n");
535 return -ENODEV; 553 return -ENODEV;
536 } 554 }
537 555
@@ -541,11 +559,11 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
541 sbdma8[dev], 559 sbdma8[dev],
542 sbdma16[dev], 560 sbdma16[dev],
543 SB_HW_AUTO, &acard->sb)) < 0) { 561 SB_HW_AUTO, &acard->sb)) < 0) {
544 snd_printk(KERN_ERR PFX "(SB16) device busy??\n"); 562 snd_printk(KERN_ERR PFX "SB16 device busy??\n");
545 return err; 563 return err;
546 } 564 }
547 if (acard->sb->hardware != SB_HW_16) { 565 if (acard->sb->hardware != SB_HW_16) {
548 snd_printk(KERN_ERR PFX "(SB16) not found during probe\n"); 566 snd_printk(KERN_ERR PFX "SB16 not found during probe\n");
549 return err; 567 return err;
550 } 568 }
551 569
@@ -585,8 +603,8 @@ static int __devinit snd_cmi8330_probe(struct snd_card *card, int dev)
585 mpuport[dev]); 603 mpuport[dev]);
586 } 604 }
587 605
588 strcpy(card->driver, "CMI8330/C3D"); 606 strcpy(card->driver, (acard->type == CMI8329) ? "CMI8329" : "CMI8330/C3D");
589 strcpy(card->shortname, "C-Media CMI8330/C3D"); 607 strcpy(card->shortname, (acard->type == CMI8329) ? "C-Media CMI8329" : "C-Media CMI8330/C3D");
590 sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d", 608 sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
591 card->shortname, 609 card->shortname,
592 acard->wss->port, 610 acard->wss->port,
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c
index a40be0cf1d97..782b3b84dac6 100644
--- a/sound/oss/midibuf.c
+++ b/sound/oss/midibuf.c
@@ -127,15 +127,16 @@ static void midi_poll(unsigned long dummy)
127 for (dev = 0; dev < num_midis; dev++) 127 for (dev = 0; dev < num_midis; dev++)
128 if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL) 128 if (midi_devs[dev] != NULL && midi_out_buf[dev] != NULL)
129 { 129 {
130 int ok = 1; 130 while (DATA_AVAIL(midi_out_buf[dev]))
131
132 while (DATA_AVAIL(midi_out_buf[dev]) && ok)
133 { 131 {
132 int ok;
134 int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head]; 133 int c = midi_out_buf[dev]->queue[midi_out_buf[dev]->head];
135 134
136 spin_unlock_irqrestore(&lock,flags);/* Give some time to others */ 135 spin_unlock_irqrestore(&lock,flags);/* Give some time to others */
137 ok = midi_devs[dev]->outputc(dev, c); 136 ok = midi_devs[dev]->outputc(dev, c);
138 spin_lock_irqsave(&lock, flags); 137 spin_lock_irqsave(&lock, flags);
138 if (!ok)
139 break;
139 midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE; 140 midi_out_buf[dev]->head = (midi_out_buf[dev]->head + 1) % MAX_QUEUE_SIZE;
140 midi_out_buf[dev]->len--; 141 midi_out_buf[dev]->len--;
141 } 142 }
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index 187f72750e8f..6713110bdc75 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -628,7 +628,7 @@ static void li_setup_dma(dma_chan_t *chan,
628 ASSERT(!(buffer_paddr & 0xFF)); 628 ASSERT(!(buffer_paddr & 0xFF));
629 chan->baseval = (buffer_paddr >> 8) | 1 << (37 - 8); 629 chan->baseval = (buffer_paddr >> 8) | 1 << (37 - 8);
630 630
631 chan->cfgval = (!LI_CCFG_LOCK | 631 chan->cfgval = ((chan->cfgval & ~LI_CCFG_LOCK) |
632 SHIFT_FIELD(desc->ad1843_slot, LI_CCFG_SLOT) | 632 SHIFT_FIELD(desc->ad1843_slot, LI_CCFG_SLOT) |
633 desc->direction | 633 desc->direction |
634 mode | 634 mode |
@@ -638,9 +638,9 @@ static void li_setup_dma(dma_chan_t *chan,
638 tmask = 13 - fragshift; /* See Lithium DMA Notes above. */ 638 tmask = 13 - fragshift; /* See Lithium DMA Notes above. */
639 ASSERT(size >= 2 && size <= 7); 639 ASSERT(size >= 2 && size <= 7);
640 ASSERT(tmask >= 1 && tmask <= 7); 640 ASSERT(tmask >= 1 && tmask <= 7);
641 chan->ctlval = (!LI_CCTL_RESET | 641 chan->ctlval = ((chan->ctlval & ~LI_CCTL_RESET) |
642 SHIFT_FIELD(size, LI_CCTL_SIZE) | 642 SHIFT_FIELD(size, LI_CCTL_SIZE) |
643 !LI_CCTL_DMA_ENABLE | 643 (chan->ctlval & ~LI_CCTL_DMA_ENABLE) |
644 SHIFT_FIELD(tmask, LI_CCTL_TMASK) | 644 SHIFT_FIELD(tmask, LI_CCTL_TMASK) |
645 SHIFT_FIELD(0, LI_CCTL_TPTR)); 645 SHIFT_FIELD(0, LI_CCTL_TPTR));
646 646
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 748f6b7d90b7..fb5ee3cc3968 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -135,11 +135,11 @@ config SND_AW2
135 135
136 136
137config SND_AZT3328 137config SND_AZT3328
138 tristate "Aztech AZF3328 / PCI168 (EXPERIMENTAL)" 138 tristate "Aztech AZF3328 / PCI168"
139 depends on EXPERIMENTAL
140 select SND_OPL3_LIB 139 select SND_OPL3_LIB
141 select SND_MPU401_UART 140 select SND_MPU401_UART
142 select SND_PCM 141 select SND_PCM
142 select SND_RAWMIDI
143 help 143 help
144 Say Y here to include support for Aztech AZF3328 (PCI168) 144 Say Y here to include support for Aztech AZF3328 (PCI168)
145 soundcards. 145 soundcards.
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 76d76c08339b..b458d208720b 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -478,45 +478,6 @@ static int snd_ali_reset_5451(struct snd_ali *codec)
478 return 0; 478 return 0;
479} 479}
480 480
481#ifdef CODEC_RESET
482
483static int snd_ali_reset_codec(struct snd_ali *codec)
484{
485 struct pci_dev *pci_dev;
486 unsigned char bVal;
487 unsigned int dwVal;
488 unsigned short wCount, wReg;
489
490 pci_dev = codec->pci_m1533;
491
492 pci_read_config_dword(pci_dev, 0x7c, &dwVal);
493 pci_write_config_dword(pci_dev, 0x7c, dwVal | 0x08000000);
494 udelay(5000);
495 pci_read_config_dword(pci_dev, 0x7c, &dwVal);
496 pci_write_config_dword(pci_dev, 0x7c, dwVal & 0xf7ffffff);
497 udelay(5000);
498
499 bVal = inb(ALI_REG(codec,ALI_SCTRL));
500 bVal |= 0x02;
501 outb(ALI_REG(codec,ALI_SCTRL),bVal);
502 udelay(5000);
503 bVal = inb(ALI_REG(codec,ALI_SCTRL));
504 bVal &= 0xfd;
505 outb(ALI_REG(codec,ALI_SCTRL),bVal);
506 udelay(15000);
507
508 wCount = 200;
509 while (wCount--) {
510 wReg = snd_ali_codec_read(codec->ac97, AC97_POWERDOWN);
511 if ((wReg & 0x000f) == 0x000f)
512 return 0;
513 udelay(5000);
514 }
515 return -1;
516}
517
518#endif
519
520/* 481/*
521 * ALI 5451 Controller 482 * ALI 5451 Controller
522 */ 483 */
@@ -561,22 +522,6 @@ static void snd_ali_disable_address_interrupt(struct snd_ali *codec)
561 outl(gc, ALI_REG(codec, ALI_GC_CIR)); 522 outl(gc, ALI_REG(codec, ALI_GC_CIR));
562} 523}
563 524
564#if 0 /* not used */
565static void snd_ali_enable_voice_irq(struct snd_ali *codec,
566 unsigned int channel)
567{
568 unsigned int mask;
569 struct snd_ali_channel_control *pchregs = &(codec->chregs);
570
571 snd_ali_printk("enable_voice_irq channel=%d\n",channel);
572
573 mask = 1 << (channel & 0x1f);
574 pchregs->data.ainten = inl(ALI_REG(codec, pchregs->regs.ainten));
575 pchregs->data.ainten |= mask;
576 outl(pchregs->data.ainten, ALI_REG(codec, pchregs->regs.ainten));
577}
578#endif
579
580static void snd_ali_disable_voice_irq(struct snd_ali *codec, 525static void snd_ali_disable_voice_irq(struct snd_ali *codec,
581 unsigned int channel) 526 unsigned int channel)
582{ 527{
@@ -677,16 +622,6 @@ static void snd_ali_free_channel_pcm(struct snd_ali *codec, int channel)
677 } 622 }
678} 623}
679 624
680#if 0 /* not used */
681static void snd_ali_start_voice(struct snd_ali *codec, unsigned int channel)
682{
683 unsigned int mask = 1 << (channel & 0x1f);
684
685 snd_ali_printk("start_voice: channel=%d\n",channel);
686 outl(mask, ALI_REG(codec,codec->chregs.regs.start));
687}
688#endif
689
690static void snd_ali_stop_voice(struct snd_ali *codec, unsigned int channel) 625static void snd_ali_stop_voice(struct snd_ali *codec, unsigned int channel)
691{ 626{
692 unsigned int mask = 1 << (channel & 0x1f); 627 unsigned int mask = 1 << (channel & 0x1f);
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index f290bc56178f..8451a0169f32 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168). 2 * azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
3 * Copyright (C) 2002, 2005 - 2008 by Andreas Mohr <andi AT lisas.de> 3 * Copyright (C) 2002, 2005 - 2009 by Andreas Mohr <andi AT lisas.de>
4 * 4 *
5 * Framework borrowed from Bart Hartgers's als4000.c. 5 * Framework borrowed from Bart Hartgers's als4000.c.
6 * Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801), 6 * Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -10,6 +10,13 @@
10 * PCI168 A/AP, sub ID 8000 10 * PCI168 A/AP, sub ID 8000
11 * Please give me feedback in case you try my driver with one of these!! 11 * Please give me feedback in case you try my driver with one of these!!
12 * 12 *
13 * Keywords: Windows XP Vista 168nt4-125.zip 168win95-125.zip PCI 168 download
14 * (XP/Vista do not support this card at all but every Linux distribution
15 * has very good support out of the box;
16 * just to make sure that the right people hit this and get to know that,
17 * despite the high level of Internet ignorance - as usual :-P -
18 * about very good support for this card - on Linux!)
19 *
13 * GPL LICENSE 20 * GPL LICENSE
14 * This program is free software; you can redistribute it and/or modify 21 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 22 * it under the terms of the GNU General Public License as published by
@@ -71,10 +78,11 @@
71 * - built-in General DirectX timer having a 20 bits counter 78 * - built-in General DirectX timer having a 20 bits counter
72 * with 1us resolution (see below!) 79 * with 1us resolution (see below!)
73 * - I2S serial output port for external DAC 80 * - I2S serial output port for external DAC
81 * [FIXME: 3.3V or 5V level? maximum rate is 66.2kHz right?]
74 * - supports 33MHz PCI spec 2.1, PCI power management 1.0, compliant with ACPI 82 * - supports 33MHz PCI spec 2.1, PCI power management 1.0, compliant with ACPI
75 * - supports hardware volume control 83 * - supports hardware volume control
76 * - single chip low cost solution (128 pin QFP) 84 * - single chip low cost solution (128 pin QFP)
77 * - supports programmable Sub-vendor and Sub-system ID 85 * - supports programmable Sub-vendor and Sub-system ID [24C02 SEEPROM chip]
78 * required for Microsoft's logo compliance (FIXME: where?) 86 * required for Microsoft's logo compliance (FIXME: where?)
79 * At least the Trident 4D Wave DX has one bit somewhere 87 * At least the Trident 4D Wave DX has one bit somewhere
80 * to enable writes to PCI subsystem VID registers, that should be it. 88 * to enable writes to PCI subsystem VID registers, that should be it.
@@ -82,6 +90,7 @@
82 * some custom data starting at 0x80. What kind of config settings 90 * some custom data starting at 0x80. What kind of config settings
83 * are located in our extended PCI space anyway?? 91 * are located in our extended PCI space anyway??
84 * - PCI168 AP(W) card: power amplifier with 4 Watts/channel at 4 Ohms 92 * - PCI168 AP(W) card: power amplifier with 4 Watts/channel at 4 Ohms
93 * [TDA1517P chip]
85 * 94 *
86 * Note that this driver now is actually *better* than the Windows driver, 95 * Note that this driver now is actually *better* than the Windows driver,
87 * since it additionally supports the card's 1MHz DirectX timer - just try 96 * since it additionally supports the card's 1MHz DirectX timer - just try
@@ -146,10 +155,15 @@
146 * to read the Digital Enhanced Game Port. Not sure whether it is fixable. 155 * to read the Digital Enhanced Game Port. Not sure whether it is fixable.
147 * 156 *
148 * TODO 157 * TODO
158 * - use PCI_VDEVICE
159 * - verify driver status on x86_64
160 * - test multi-card driver operation
161 * - (ab)use 1MHz DirectX timer as kernel clocksource
149 * - test MPU401 MIDI playback etc. 162 * - test MPU401 MIDI playback etc.
150 * - add more power micro-management (disable various units of the card 163 * - add more power micro-management (disable various units of the card
151 * as long as they're unused). However this requires more I/O ports which I 164 * as long as they're unused, to improve audio quality and save power).
152 * haven't figured out yet and which thus might not even exist... 165 * However this requires more I/O ports which I haven't figured out yet
166 * and which thus might not even exist...
153 * The standard suspend/resume functionality could probably make use of 167 * The standard suspend/resume functionality could probably make use of
154 * some improvement, too... 168 * some improvement, too...
155 * - figure out what all unknown port bits are responsible for 169 * - figure out what all unknown port bits are responsible for
@@ -185,25 +199,46 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
185#define SUPPORT_GAMEPORT 1 199#define SUPPORT_GAMEPORT 1
186#endif 200#endif
187 201
202/* === Debug settings ===
203 Further diagnostic functionality than the settings below
204 does not need to be provided, since one can easily write a bash script
205 to dump the card's I/O ports (those listed in lspci -v -v):
206 function dump()
207 {
208 local descr=$1; local addr=$2; local count=$3
209
210 echo "${descr}: ${count} @ ${addr}:"
211 dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C
212 }
213 and then use something like
214 "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8",
215 "dump codec00 0xa800 32", "dump mixer 0xb800 64", "dump synth 0xbc00 8",
216 possibly within a "while true; do ... sleep 1; done" loop.
217 Tweaking ports could be done using
218 VALSTRING="`printf "%02x" $value`"
219 printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null
220*/
221
188#define DEBUG_MISC 0 222#define DEBUG_MISC 0
189#define DEBUG_CALLS 0 223#define DEBUG_CALLS 0
190#define DEBUG_MIXER 0 224#define DEBUG_MIXER 0
191#define DEBUG_PLAY_REC 0 225#define DEBUG_CODEC 0
192#define DEBUG_IO 0 226#define DEBUG_IO 0
193#define DEBUG_TIMER 0 227#define DEBUG_TIMER 0
194#define DEBUG_GAME 0 228#define DEBUG_GAME 0
229#define DEBUG_PM 0
195#define MIXER_TESTING 0 230#define MIXER_TESTING 0
196 231
197#if DEBUG_MISC 232#if DEBUG_MISC
198#define snd_azf3328_dbgmisc(format, args...) printk(KERN_ERR format, ##args) 233#define snd_azf3328_dbgmisc(format, args...) printk(KERN_DEBUG format, ##args)
199#else 234#else
200#define snd_azf3328_dbgmisc(format, args...) 235#define snd_azf3328_dbgmisc(format, args...)
201#endif 236#endif
202 237
203#if DEBUG_CALLS 238#if DEBUG_CALLS
204#define snd_azf3328_dbgcalls(format, args...) printk(format, ##args) 239#define snd_azf3328_dbgcalls(format, args...) printk(format, ##args)
205#define snd_azf3328_dbgcallenter() printk(KERN_ERR "--> %s\n", __func__) 240#define snd_azf3328_dbgcallenter() printk(KERN_DEBUG "--> %s\n", __func__)
206#define snd_azf3328_dbgcallleave() printk(KERN_ERR "<-- %s\n", __func__) 241#define snd_azf3328_dbgcallleave() printk(KERN_DEBUG "<-- %s\n", __func__)
207#else 242#else
208#define snd_azf3328_dbgcalls(format, args...) 243#define snd_azf3328_dbgcalls(format, args...)
209#define snd_azf3328_dbgcallenter() 244#define snd_azf3328_dbgcallenter()
@@ -216,10 +251,10 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
216#define snd_azf3328_dbgmixer(format, args...) 251#define snd_azf3328_dbgmixer(format, args...)
217#endif 252#endif
218 253
219#if DEBUG_PLAY_REC 254#if DEBUG_CODEC
220#define snd_azf3328_dbgplay(format, args...) printk(KERN_DEBUG format, ##args) 255#define snd_azf3328_dbgcodec(format, args...) printk(KERN_DEBUG format, ##args)
221#else 256#else
222#define snd_azf3328_dbgplay(format, args...) 257#define snd_azf3328_dbgcodec(format, args...)
223#endif 258#endif
224 259
225#if DEBUG_MISC 260#if DEBUG_MISC
@@ -234,6 +269,12 @@ MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
234#define snd_azf3328_dbggame(format, args...) 269#define snd_azf3328_dbggame(format, args...)
235#endif 270#endif
236 271
272#if DEBUG_PM
273#define snd_azf3328_dbgpm(format, args...) printk(KERN_DEBUG format, ##args)
274#else
275#define snd_azf3328_dbgpm(format, args...)
276#endif
277
237static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ 278static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
238module_param_array(index, int, NULL, 0444); 279module_param_array(index, int, NULL, 0444);
239MODULE_PARM_DESC(index, "Index value for AZF3328 soundcard."); 280MODULE_PARM_DESC(index, "Index value for AZF3328 soundcard.");
@@ -250,22 +291,23 @@ static int seqtimer_scaling = 128;
250module_param(seqtimer_scaling, int, 0444); 291module_param(seqtimer_scaling, int, 0444);
251MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128."); 292MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128.");
252 293
253struct snd_azf3328_audio_stream { 294struct snd_azf3328_codec_data {
295 unsigned long io_base;
254 struct snd_pcm_substream *substream; 296 struct snd_pcm_substream *substream;
255 int enabled; 297 bool running;
256 int running; 298 const char *name;
257 unsigned long portbase;
258}; 299};
259 300
260enum snd_azf3328_stream_index { 301enum snd_azf3328_codec_type {
261 AZF_PLAYBACK = 0, 302 AZF_CODEC_PLAYBACK = 0,
262 AZF_CAPTURE = 1, 303 AZF_CODEC_CAPTURE = 1,
304 AZF_CODEC_I2S_OUT = 2,
263}; 305};
264 306
265struct snd_azf3328 { 307struct snd_azf3328 {
266 /* often-used fields towards beginning, then grouped */ 308 /* often-used fields towards beginning, then grouped */
267 309
268 unsigned long codec_io; /* usually 0xb000, size 128 */ 310 unsigned long ctrl_io; /* usually 0xb000, size 128 */
269 unsigned long game_io; /* usually 0xb400, size 8 */ 311 unsigned long game_io; /* usually 0xb400, size 8 */
270 unsigned long mpu_io; /* usually 0xb800, size 4 */ 312 unsigned long mpu_io; /* usually 0xb800, size 4 */
271 unsigned long opl3_io; /* usually 0xbc00, size 8 */ 313 unsigned long opl3_io; /* usually 0xbc00, size 8 */
@@ -275,15 +317,17 @@ struct snd_azf3328 {
275 317
276 struct snd_timer *timer; 318 struct snd_timer *timer;
277 319
278 struct snd_pcm *pcm; 320 struct snd_pcm *pcm[3];
279 struct snd_azf3328_audio_stream audio_stream[2]; 321
322 /* playback, recording and I2S out codecs */
323 struct snd_azf3328_codec_data codecs[3];
280 324
281 struct snd_card *card; 325 struct snd_card *card;
282 struct snd_rawmidi *rmidi; 326 struct snd_rawmidi *rmidi;
283 327
284#ifdef SUPPORT_GAMEPORT 328#ifdef SUPPORT_GAMEPORT
285 struct gameport *gameport; 329 struct gameport *gameport;
286 int axes[4]; 330 u16 axes[4];
287#endif 331#endif
288 332
289 struct pci_dev *pci; 333 struct pci_dev *pci;
@@ -293,16 +337,16 @@ struct snd_azf3328 {
293 * If we need to add more registers here, then we might try to fold this 337 * If we need to add more registers here, then we might try to fold this
294 * into some transparent combined shadow register handling with 338 * into some transparent combined shadow register handling with
295 * CONFIG_PM register storage below, but that's slightly difficult. */ 339 * CONFIG_PM register storage below, but that's slightly difficult. */
296 u16 shadow_reg_codec_6AH; 340 u16 shadow_reg_ctrl_6AH;
297 341
298#ifdef CONFIG_PM 342#ifdef CONFIG_PM
299 /* register value containers for power management 343 /* register value containers for power management
300 * Note: not always full I/O range preserved (just like Win driver!) */ 344 * Note: not always full I/O range preserved (similar to Win driver!) */
301 u16 saved_regs_codec[AZF_IO_SIZE_CODEC_PM / 2]; 345 u32 saved_regs_ctrl[AZF_ALIGN(AZF_IO_SIZE_CTRL_PM) / 4];
302 u16 saved_regs_game [AZF_IO_SIZE_GAME_PM / 2]; 346 u32 saved_regs_game[AZF_ALIGN(AZF_IO_SIZE_GAME_PM) / 4];
303 u16 saved_regs_mpu [AZF_IO_SIZE_MPU_PM / 2]; 347 u32 saved_regs_mpu[AZF_ALIGN(AZF_IO_SIZE_MPU_PM) / 4];
304 u16 saved_regs_opl3 [AZF_IO_SIZE_OPL3_PM / 2]; 348 u32 saved_regs_opl3[AZF_ALIGN(AZF_IO_SIZE_OPL3_PM) / 4];
305 u16 saved_regs_mixer[AZF_IO_SIZE_MIXER_PM / 2]; 349 u32 saved_regs_mixer[AZF_ALIGN(AZF_IO_SIZE_MIXER_PM) / 4];
306#endif 350#endif
307}; 351};
308 352
@@ -316,7 +360,7 @@ MODULE_DEVICE_TABLE(pci, snd_azf3328_ids);
316 360
317 361
318static int 362static int
319snd_azf3328_io_reg_setb(unsigned reg, u8 mask, int do_set) 363snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set)
320{ 364{
321 u8 prev = inb(reg), new; 365 u8 prev = inb(reg), new;
322 366
@@ -331,39 +375,72 @@ snd_azf3328_io_reg_setb(unsigned reg, u8 mask, int do_set)
331} 375}
332 376
333static inline void 377static inline void
334snd_azf3328_codec_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value) 378snd_azf3328_codec_outb(const struct snd_azf3328_codec_data *codec,
379 unsigned reg,
380 u8 value
381)
335{ 382{
336 outb(value, chip->codec_io + reg); 383 outb(value, codec->io_base + reg);
337} 384}
338 385
339static inline u8 386static inline u8
340snd_azf3328_codec_inb(const struct snd_azf3328 *chip, unsigned reg) 387snd_azf3328_codec_inb(const struct snd_azf3328_codec_data *codec, unsigned reg)
341{ 388{
342 return inb(chip->codec_io + reg); 389 return inb(codec->io_base + reg);
343} 390}
344 391
345static inline void 392static inline void
346snd_azf3328_codec_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value) 393snd_azf3328_codec_outw(const struct snd_azf3328_codec_data *codec,
394 unsigned reg,
395 u16 value
396)
347{ 397{
348 outw(value, chip->codec_io + reg); 398 outw(value, codec->io_base + reg);
349} 399}
350 400
351static inline u16 401static inline u16
352snd_azf3328_codec_inw(const struct snd_azf3328 *chip, unsigned reg) 402snd_azf3328_codec_inw(const struct snd_azf3328_codec_data *codec, unsigned reg)
353{ 403{
354 return inw(chip->codec_io + reg); 404 return inw(codec->io_base + reg);
355} 405}
356 406
357static inline void 407static inline void
358snd_azf3328_codec_outl(const struct snd_azf3328 *chip, unsigned reg, u32 value) 408snd_azf3328_codec_outl(const struct snd_azf3328_codec_data *codec,
409 unsigned reg,
410 u32 value
411)
359{ 412{
360 outl(value, chip->codec_io + reg); 413 outl(value, codec->io_base + reg);
361} 414}
362 415
363static inline u32 416static inline u32
364snd_azf3328_codec_inl(const struct snd_azf3328 *chip, unsigned reg) 417snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg)
418{
419 return inl(codec->io_base + reg);
420}
421
422static inline void
423snd_azf3328_ctrl_outb(const struct snd_azf3328 *chip, unsigned reg, u8 value)
424{
425 outb(value, chip->ctrl_io + reg);
426}
427
428static inline u8
429snd_azf3328_ctrl_inb(const struct snd_azf3328 *chip, unsigned reg)
430{
431 return inb(chip->ctrl_io + reg);
432}
433
434static inline void
435snd_azf3328_ctrl_outw(const struct snd_azf3328 *chip, unsigned reg, u16 value)
436{
437 outw(value, chip->ctrl_io + reg);
438}
439
440static inline void
441snd_azf3328_ctrl_outl(const struct snd_azf3328 *chip, unsigned reg, u32 value)
365{ 442{
366 return inl(chip->codec_io + reg); 443 outl(value, chip->ctrl_io + reg);
367} 444}
368 445
369static inline void 446static inline void
@@ -404,13 +481,13 @@ snd_azf3328_mixer_inw(const struct snd_azf3328 *chip, unsigned reg)
404 481
405#define AZF_MUTE_BIT 0x80 482#define AZF_MUTE_BIT 0x80
406 483
407static int 484static bool
408snd_azf3328_mixer_set_mute(const struct snd_azf3328 *chip, 485snd_azf3328_mixer_set_mute(const struct snd_azf3328 *chip,
409 unsigned reg, int do_mute 486 unsigned reg, bool do_mute
410) 487)
411{ 488{
412 unsigned long portbase = chip->mixer_io + reg + 1; 489 unsigned long portbase = chip->mixer_io + reg + 1;
413 int updated; 490 bool updated;
414 491
415 /* the mute bit is on the *second* (i.e. right) register of a 492 /* the mute bit is on the *second* (i.e. right) register of a
416 * left/right channel setting */ 493 * left/right channel setting */
@@ -569,7 +646,7 @@ snd_azf3328_get_mixer(struct snd_kcontrol *kcontrol,
569{ 646{
570 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol); 647 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
571 struct azf3328_mixer_reg reg; 648 struct azf3328_mixer_reg reg;
572 unsigned int oreg, val; 649 u16 oreg, val;
573 650
574 snd_azf3328_dbgcallenter(); 651 snd_azf3328_dbgcallenter();
575 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value); 652 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
@@ -600,7 +677,7 @@ snd_azf3328_put_mixer(struct snd_kcontrol *kcontrol,
600{ 677{
601 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol); 678 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
602 struct azf3328_mixer_reg reg; 679 struct azf3328_mixer_reg reg;
603 unsigned int oreg, nreg, val; 680 u16 oreg, nreg, val;
604 681
605 snd_azf3328_dbgcallenter(); 682 snd_azf3328_dbgcallenter();
606 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value); 683 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
@@ -709,7 +786,7 @@ snd_azf3328_put_mixer_enum(struct snd_kcontrol *kcontrol,
709{ 786{
710 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol); 787 struct snd_azf3328 *chip = snd_kcontrol_chip(kcontrol);
711 struct azf3328_mixer_reg reg; 788 struct azf3328_mixer_reg reg;
712 unsigned int oreg, nreg, val; 789 u16 oreg, nreg, val;
713 790
714 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value); 791 snd_azf3328_mixer_reg_decode(&reg, kcontrol->private_value);
715 oreg = snd_azf3328_mixer_inw(chip, reg.reg); 792 oreg = snd_azf3328_mixer_inw(chip, reg.reg);
@@ -867,14 +944,15 @@ snd_azf3328_hw_free(struct snd_pcm_substream *substream)
867 944
868static void 945static void
869snd_azf3328_codec_setfmt(struct snd_azf3328 *chip, 946snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
870 unsigned reg, 947 enum snd_azf3328_codec_type codec_type,
871 enum azf_freq_t bitrate, 948 enum azf_freq_t bitrate,
872 unsigned int format_width, 949 unsigned int format_width,
873 unsigned int channels 950 unsigned int channels
874) 951)
875{ 952{
876 u16 val = 0xff00;
877 unsigned long flags; 953 unsigned long flags;
954 const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
955 u16 val = 0xff00;
878 956
879 snd_azf3328_dbgcallenter(); 957 snd_azf3328_dbgcallenter();
880 switch (bitrate) { 958 switch (bitrate) {
@@ -917,7 +995,7 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
917 spin_lock_irqsave(&chip->reg_lock, flags); 995 spin_lock_irqsave(&chip->reg_lock, flags);
918 996
919 /* set bitrate/format */ 997 /* set bitrate/format */
920 snd_azf3328_codec_outw(chip, reg, val); 998 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val);
921 999
922 /* changing the bitrate/format settings switches off the 1000 /* changing the bitrate/format settings switches off the
923 * audio output with an annoying click in case of 8/16bit format change 1001 * audio output with an annoying click in case of 8/16bit format change
@@ -926,11 +1004,11 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
926 * (FIXME: yes, it works, but what exactly am I doing here?? :) 1004 * (FIXME: yes, it works, but what exactly am I doing here?? :)
927 * FIXME: does this have some side effects for full-duplex 1005 * FIXME: does this have some side effects for full-duplex
928 * or other dramatic side effects? */ 1006 * or other dramatic side effects? */
929 if (reg == IDX_IO_PLAY_SOUNDFORMAT) /* only do it for playback */ 1007 if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */
930 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1008 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
931 snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) | 1009 snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) |
932 DMA_PLAY_SOMETHING1 | 1010 DMA_RUN_SOMETHING1 |
933 DMA_PLAY_SOMETHING2 | 1011 DMA_RUN_SOMETHING2 |
934 SOMETHING_ALMOST_ALWAYS_SET | 1012 SOMETHING_ALMOST_ALWAYS_SET |
935 DMA_EPILOGUE_SOMETHING | 1013 DMA_EPILOGUE_SOMETHING |
936 DMA_SOMETHING_ELSE 1014 DMA_SOMETHING_ELSE
@@ -942,112 +1020,134 @@ snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
942 1020
943static inline void 1021static inline void
944snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip, 1022snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip,
945 unsigned reg 1023 enum snd_azf3328_codec_type codec_type
946) 1024)
947{ 1025{
948 /* choose lowest frequency for low power consumption. 1026 /* choose lowest frequency for low power consumption.
949 * While this will cause louder noise due to rather coarse frequency, 1027 * While this will cause louder noise due to rather coarse frequency,
950 * it should never matter since output should always 1028 * it should never matter since output should always
951 * get disabled properly when idle anyway. */ 1029 * get disabled properly when idle anyway. */
952 snd_azf3328_codec_setfmt(chip, reg, AZF_FREQ_4000, 8, 1); 1030 snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1);
953} 1031}
954 1032
955static void 1033static void
956snd_azf3328_codec_reg_6AH_update(struct snd_azf3328 *chip, 1034snd_azf3328_ctrl_reg_6AH_update(struct snd_azf3328 *chip,
957 unsigned bitmask, 1035 unsigned bitmask,
958 int enable 1036 bool enable
959) 1037)
960{ 1038{
961 if (enable) 1039 bool do_mask = !enable;
962 chip->shadow_reg_codec_6AH &= ~bitmask; 1040 if (do_mask)
1041 chip->shadow_reg_ctrl_6AH |= bitmask;
963 else 1042 else
964 chip->shadow_reg_codec_6AH |= bitmask; 1043 chip->shadow_reg_ctrl_6AH &= ~bitmask;
965 snd_azf3328_dbgplay("6AH_update mask 0x%04x enable %d: val 0x%04x\n", 1044 snd_azf3328_dbgcodec("6AH_update mask 0x%04x do_mask %d: val 0x%04x\n",
966 bitmask, enable, chip->shadow_reg_codec_6AH); 1045 bitmask, do_mask, chip->shadow_reg_ctrl_6AH);
967 snd_azf3328_codec_outw(chip, IDX_IO_6AH, chip->shadow_reg_codec_6AH); 1046 snd_azf3328_ctrl_outw(chip, IDX_IO_6AH, chip->shadow_reg_ctrl_6AH);
968} 1047}
969 1048
970static inline void 1049static inline void
971snd_azf3328_codec_enable(struct snd_azf3328 *chip, int enable) 1050snd_azf3328_ctrl_enable_codecs(struct snd_azf3328 *chip, bool enable)
972{ 1051{
973 snd_azf3328_dbgplay("codec_enable %d\n", enable); 1052 snd_azf3328_dbgcodec("codec_enable %d\n", enable);
974 /* no idea what exactly is being done here, but I strongly assume it's 1053 /* no idea what exactly is being done here, but I strongly assume it's
975 * PM related */ 1054 * PM related */
976 snd_azf3328_codec_reg_6AH_update( 1055 snd_azf3328_ctrl_reg_6AH_update(
977 chip, IO_6A_PAUSE_PLAYBACK_BIT8, enable 1056 chip, IO_6A_PAUSE_PLAYBACK_BIT8, enable
978 ); 1057 );
979} 1058}
980 1059
981static void 1060static void
982snd_azf3328_codec_activity(struct snd_azf3328 *chip, 1061snd_azf3328_ctrl_codec_activity(struct snd_azf3328 *chip,
983 enum snd_azf3328_stream_index stream_type, 1062 enum snd_azf3328_codec_type codec_type,
984 int enable 1063 bool enable
985) 1064)
986{ 1065{
987 int need_change = (chip->audio_stream[stream_type].running != enable); 1066 struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
1067 bool need_change = (codec->running != enable);
988 1068
989 snd_azf3328_dbgplay( 1069 snd_azf3328_dbgcodec(
990 "codec_activity: type %d, enable %d, need_change %d\n", 1070 "codec_activity: %s codec, enable %d, need_change %d\n",
991 stream_type, enable, need_change 1071 codec->name, enable, need_change
992 ); 1072 );
993 if (need_change) { 1073 if (need_change) {
994 enum snd_azf3328_stream_index other = 1074 static const struct {
995 (stream_type == AZF_PLAYBACK) ? 1075 enum snd_azf3328_codec_type other1;
996 AZF_CAPTURE : AZF_PLAYBACK; 1076 enum snd_azf3328_codec_type other2;
997 /* small check to prevent shutting down the other party 1077 } peer_codecs[3] =
998 * in case it's active */ 1078 { { AZF_CODEC_CAPTURE, AZF_CODEC_I2S_OUT },
999 if ((enable) || !(chip->audio_stream[other].running)) 1079 { AZF_CODEC_PLAYBACK, AZF_CODEC_I2S_OUT },
1000 snd_azf3328_codec_enable(chip, enable); 1080 { AZF_CODEC_PLAYBACK, AZF_CODEC_CAPTURE } };
1081 bool call_function;
1082
1083 if (enable)
1084 /* if enable codec, call enable_codecs func
1085 to enable codec supply... */
1086 call_function = 1;
1087 else {
1088 /* ...otherwise call enable_codecs func
1089 (which globally shuts down operation of codecs)
1090 only in case the other codecs are currently
1091 not active either! */
1092 call_function =
1093 ((!chip->codecs[peer_codecs[codec_type].other1]
1094 .running)
1095 && (!chip->codecs[peer_codecs[codec_type].other2]
1096 .running));
1097 }
1098 if (call_function)
1099 snd_azf3328_ctrl_enable_codecs(chip, enable);
1001 1100
1002 /* ...and adjust clock, too 1101 /* ...and adjust clock, too
1003 * (reduce noise and power consumption) */ 1102 * (reduce noise and power consumption) */
1004 if (!enable) 1103 if (!enable)
1005 snd_azf3328_codec_setfmt_lowpower( 1104 snd_azf3328_codec_setfmt_lowpower(
1006 chip, 1105 chip,
1007 chip->audio_stream[stream_type].portbase 1106 codec_type
1008 + IDX_IO_PLAY_SOUNDFORMAT
1009 ); 1107 );
1108 codec->running = enable;
1010 } 1109 }
1011 chip->audio_stream[stream_type].running = enable;
1012} 1110}
1013 1111
1014static void 1112static void
1015snd_azf3328_setdmaa(struct snd_azf3328 *chip, 1113snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
1016 long unsigned int addr, 1114 enum snd_azf3328_codec_type codec_type,
1017 unsigned int count, 1115 unsigned long addr,
1018 unsigned int size, 1116 unsigned int count,
1019 enum snd_azf3328_stream_index stream_type 1117 unsigned int size
1020) 1118)
1021{ 1119{
1120 const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
1022 snd_azf3328_dbgcallenter(); 1121 snd_azf3328_dbgcallenter();
1023 if (!chip->audio_stream[stream_type].running) { 1122 if (!codec->running) {
1024 /* AZF3328 uses a two buffer pointer DMA playback approach */ 1123 /* AZF3328 uses a two buffer pointer DMA transfer approach */
1025 1124
1026 unsigned long flags, portbase, addr_area2; 1125 unsigned long flags, addr_area2;
1027 1126
1028 /* width 32bit (prevent overflow): */ 1127 /* width 32bit (prevent overflow): */
1029 unsigned long count_areas, count_tmp; 1128 u32 count_areas, lengths;
1030 1129
1031 portbase = chip->audio_stream[stream_type].portbase;
1032 count_areas = size/2; 1130 count_areas = size/2;
1033 addr_area2 = addr+count_areas; 1131 addr_area2 = addr+count_areas;
1034 count_areas--; /* max. index */ 1132 count_areas--; /* max. index */
1035 snd_azf3328_dbgplay("set DMA: buf1 %08lx[%lu], buf2 %08lx[%lu]\n", addr, count_areas, addr_area2, count_areas); 1133 snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
1134 addr, count_areas, addr_area2, count_areas);
1036 1135
1037 /* build combined I/O buffer length word */ 1136 /* build combined I/O buffer length word */
1038 count_tmp = count_areas; 1137 lengths = (count_areas << 16) | (count_areas);
1039 count_areas |= (count_tmp << 16);
1040 spin_lock_irqsave(&chip->reg_lock, flags); 1138 spin_lock_irqsave(&chip->reg_lock, flags);
1041 outl(addr, portbase + IDX_IO_PLAY_DMA_START_1); 1139 snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr);
1042 outl(addr_area2, portbase + IDX_IO_PLAY_DMA_START_2); 1140 snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2,
1043 outl(count_areas, portbase + IDX_IO_PLAY_DMA_LEN_1); 1141 addr_area2);
1142 snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS,
1143 lengths);
1044 spin_unlock_irqrestore(&chip->reg_lock, flags); 1144 spin_unlock_irqrestore(&chip->reg_lock, flags);
1045 } 1145 }
1046 snd_azf3328_dbgcallleave(); 1146 snd_azf3328_dbgcallleave();
1047} 1147}
1048 1148
1049static int 1149static int
1050snd_azf3328_playback_prepare(struct snd_pcm_substream *substream) 1150snd_azf3328_codec_prepare(struct snd_pcm_substream *substream)
1051{ 1151{
1052#if 0 1152#if 0
1053 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1153 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
@@ -1058,157 +1158,161 @@ snd_azf3328_playback_prepare(struct snd_pcm_substream *substream)
1058 1158
1059 snd_azf3328_dbgcallenter(); 1159 snd_azf3328_dbgcallenter();
1060#if 0 1160#if 0
1061 snd_azf3328_codec_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT, 1161 snd_azf3328_codec_setfmt(chip, AZF_CODEC_...,
1062 runtime->rate, 1162 runtime->rate,
1063 snd_pcm_format_width(runtime->format), 1163 snd_pcm_format_width(runtime->format),
1064 runtime->channels); 1164 runtime->channels);
1065 snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, AZF_PLAYBACK); 1165 snd_azf3328_codec_setdmaa(chip, AZF_CODEC_...,
1166 runtime->dma_addr, count, size);
1066#endif 1167#endif
1067 snd_azf3328_dbgcallleave(); 1168 snd_azf3328_dbgcallleave();
1068 return 0; 1169 return 0;
1069} 1170}
1070 1171
1071static int 1172static int
1072snd_azf3328_capture_prepare(struct snd_pcm_substream *substream) 1173snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type,
1073{ 1174 struct snd_pcm_substream *substream, int cmd)
1074#if 0
1075 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
1076 struct snd_pcm_runtime *runtime = substream->runtime;
1077 unsigned int size = snd_pcm_lib_buffer_bytes(substream);
1078 unsigned int count = snd_pcm_lib_period_bytes(substream);
1079#endif
1080
1081 snd_azf3328_dbgcallenter();
1082#if 0
1083 snd_azf3328_codec_setfmt(chip, IDX_IO_REC_SOUNDFORMAT,
1084 runtime->rate,
1085 snd_pcm_format_width(runtime->format),
1086 runtime->channels);
1087 snd_azf3328_setdmaa(chip, runtime->dma_addr, count, size, AZF_CAPTURE);
1088#endif
1089 snd_azf3328_dbgcallleave();
1090 return 0;
1091}
1092
1093static int
1094snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
1095{ 1175{
1096 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1176 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
1177 const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
1097 struct snd_pcm_runtime *runtime = substream->runtime; 1178 struct snd_pcm_runtime *runtime = substream->runtime;
1098 int result = 0; 1179 int result = 0;
1099 unsigned int status1; 1180 u16 flags1;
1100 int previously_muted; 1181 bool previously_muted = 0;
1182 bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type);
1101 1183
1102 snd_azf3328_dbgcalls("snd_azf3328_playback_trigger cmd %d\n", cmd); 1184 snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd);
1103 1185
1104 switch (cmd) { 1186 switch (cmd) {
1105 case SNDRV_PCM_TRIGGER_START: 1187 case SNDRV_PCM_TRIGGER_START:
1106 snd_azf3328_dbgplay("START PLAYBACK\n"); 1188 snd_azf3328_dbgcodec("START %s\n", codec->name);
1107 1189
1108 /* mute WaveOut (avoid clicking during setup) */ 1190 if (is_playback_codec) {
1109 previously_muted = 1191 /* mute WaveOut (avoid clicking during setup) */
1110 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1); 1192 previously_muted =
1193 snd_azf3328_mixer_set_mute(
1194 chip, IDX_MIXER_WAVEOUT, 1
1195 );
1196 }
1111 1197
1112 snd_azf3328_codec_setfmt(chip, IDX_IO_PLAY_SOUNDFORMAT, 1198 snd_azf3328_codec_setfmt(chip, codec_type,
1113 runtime->rate, 1199 runtime->rate,
1114 snd_pcm_format_width(runtime->format), 1200 snd_pcm_format_width(runtime->format),
1115 runtime->channels); 1201 runtime->channels);
1116 1202
1117 spin_lock(&chip->reg_lock); 1203 spin_lock(&chip->reg_lock);
1118 /* first, remember current value: */ 1204 /* first, remember current value: */
1119 status1 = snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS); 1205 flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
1120 1206
1121 /* stop playback */ 1207 /* stop transfer */
1122 status1 &= ~DMA_RESUME; 1208 flags1 &= ~DMA_RESUME;
1123 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1209 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1124 1210
1125 /* FIXME: clear interrupts or what??? */ 1211 /* FIXME: clear interrupts or what??? */
1126 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_IRQTYPE, 0xffff); 1212 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff);
1127 spin_unlock(&chip->reg_lock); 1213 spin_unlock(&chip->reg_lock);
1128 1214
1129 snd_azf3328_setdmaa(chip, runtime->dma_addr, 1215 snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr,
1130 snd_pcm_lib_period_bytes(substream), 1216 snd_pcm_lib_period_bytes(substream),
1131 snd_pcm_lib_buffer_bytes(substream), 1217 snd_pcm_lib_buffer_bytes(substream)
1132 AZF_PLAYBACK); 1218 );
1133 1219
1134 spin_lock(&chip->reg_lock); 1220 spin_lock(&chip->reg_lock);
1135#ifdef WIN9X 1221#ifdef WIN9X
1136 /* FIXME: enable playback/recording??? */ 1222 /* FIXME: enable playback/recording??? */
1137 status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2; 1223 flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2;
1138 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1224 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1139 1225
1140 /* start playback again */ 1226 /* start transfer again */
1141 /* FIXME: what is this value (0x0010)??? */ 1227 /* FIXME: what is this value (0x0010)??? */
1142 status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING; 1228 flags1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
1143 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1229 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1144#else /* NT4 */ 1230#else /* NT4 */
1145 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1231 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1146 0x0000); 1232 0x0000);
1147 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1233 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1148 DMA_PLAY_SOMETHING1); 1234 DMA_RUN_SOMETHING1);
1149 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1235 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1150 DMA_PLAY_SOMETHING1 | 1236 DMA_RUN_SOMETHING1 |
1151 DMA_PLAY_SOMETHING2); 1237 DMA_RUN_SOMETHING2);
1152 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1238 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1153 DMA_RESUME | 1239 DMA_RESUME |
1154 SOMETHING_ALMOST_ALWAYS_SET | 1240 SOMETHING_ALMOST_ALWAYS_SET |
1155 DMA_EPILOGUE_SOMETHING | 1241 DMA_EPILOGUE_SOMETHING |
1156 DMA_SOMETHING_ELSE); 1242 DMA_SOMETHING_ELSE);
1157#endif 1243#endif
1158 spin_unlock(&chip->reg_lock); 1244 spin_unlock(&chip->reg_lock);
1159 snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 1); 1245 snd_azf3328_ctrl_codec_activity(chip, codec_type, 1);
1160 1246
1161 /* now unmute WaveOut */ 1247 if (is_playback_codec) {
1162 if (!previously_muted) 1248 /* now unmute WaveOut */
1163 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0); 1249 if (!previously_muted)
1250 snd_azf3328_mixer_set_mute(
1251 chip, IDX_MIXER_WAVEOUT, 0
1252 );
1253 }
1164 1254
1165 snd_azf3328_dbgplay("STARTED PLAYBACK\n"); 1255 snd_azf3328_dbgcodec("STARTED %s\n", codec->name);
1166 break; 1256 break;
1167 case SNDRV_PCM_TRIGGER_RESUME: 1257 case SNDRV_PCM_TRIGGER_RESUME:
1168 snd_azf3328_dbgplay("RESUME PLAYBACK\n"); 1258 snd_azf3328_dbgcodec("RESUME %s\n", codec->name);
1169 /* resume playback if we were active */ 1259 /* resume codec if we were active */
1170 spin_lock(&chip->reg_lock); 1260 spin_lock(&chip->reg_lock);
1171 if (chip->audio_stream[AZF_PLAYBACK].running) 1261 if (codec->running)
1172 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1262 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1173 snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) | DMA_RESUME); 1263 snd_azf3328_codec_inw(
1264 codec, IDX_IO_CODEC_DMA_FLAGS
1265 ) | DMA_RESUME
1266 );
1174 spin_unlock(&chip->reg_lock); 1267 spin_unlock(&chip->reg_lock);
1175 break; 1268 break;
1176 case SNDRV_PCM_TRIGGER_STOP: 1269 case SNDRV_PCM_TRIGGER_STOP:
1177 snd_azf3328_dbgplay("STOP PLAYBACK\n"); 1270 snd_azf3328_dbgcodec("STOP %s\n", codec->name);
1178 1271
1179 /* mute WaveOut (avoid clicking during setup) */ 1272 if (is_playback_codec) {
1180 previously_muted = 1273 /* mute WaveOut (avoid clicking during setup) */
1181 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1); 1274 previously_muted =
1275 snd_azf3328_mixer_set_mute(
1276 chip, IDX_MIXER_WAVEOUT, 1
1277 );
1278 }
1182 1279
1183 spin_lock(&chip->reg_lock); 1280 spin_lock(&chip->reg_lock);
1184 /* first, remember current value: */ 1281 /* first, remember current value: */
1185 status1 = snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS); 1282 flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
1186 1283
1187 /* stop playback */ 1284 /* stop transfer */
1188 status1 &= ~DMA_RESUME; 1285 flags1 &= ~DMA_RESUME;
1189 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1286 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1190 1287
1191 /* hmm, is this really required? we're resetting the same bit 1288 /* hmm, is this really required? we're resetting the same bit
1192 * immediately thereafter... */ 1289 * immediately thereafter... */
1193 status1 |= DMA_PLAY_SOMETHING1; 1290 flags1 |= DMA_RUN_SOMETHING1;
1194 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1291 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1195 1292
1196 status1 &= ~DMA_PLAY_SOMETHING1; 1293 flags1 &= ~DMA_RUN_SOMETHING1;
1197 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, status1); 1294 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
1198 spin_unlock(&chip->reg_lock); 1295 spin_unlock(&chip->reg_lock);
1199 snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 0); 1296 snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
1200 1297
1201 /* now unmute WaveOut */ 1298 if (is_playback_codec) {
1202 if (!previously_muted) 1299 /* now unmute WaveOut */
1203 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 0); 1300 if (!previously_muted)
1301 snd_azf3328_mixer_set_mute(
1302 chip, IDX_MIXER_WAVEOUT, 0
1303 );
1304 }
1204 1305
1205 snd_azf3328_dbgplay("STOPPED PLAYBACK\n"); 1306 snd_azf3328_dbgcodec("STOPPED %s\n", codec->name);
1206 break; 1307 break;
1207 case SNDRV_PCM_TRIGGER_SUSPEND: 1308 case SNDRV_PCM_TRIGGER_SUSPEND:
1208 snd_azf3328_dbgplay("SUSPEND PLAYBACK\n"); 1309 snd_azf3328_dbgcodec("SUSPEND %s\n", codec->name);
1209 /* make sure playback is stopped */ 1310 /* make sure codec is stopped */
1210 snd_azf3328_codec_outw(chip, IDX_IO_PLAY_FLAGS, 1311 snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
1211 snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS) & ~DMA_RESUME); 1312 snd_azf3328_codec_inw(
1313 codec, IDX_IO_CODEC_DMA_FLAGS
1314 ) & ~DMA_RESUME
1315 );
1212 break; 1316 break;
1213 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 1317 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1214 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n"); 1318 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
@@ -1217,7 +1321,7 @@ snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
1217 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n"); 1321 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
1218 break; 1322 break;
1219 default: 1323 default:
1220 printk(KERN_ERR "FIXME: unknown trigger mode!\n"); 1324 snd_printk(KERN_ERR "FIXME: unknown trigger mode!\n");
1221 return -EINVAL; 1325 return -EINVAL;
1222 } 1326 }
1223 1327
@@ -1225,172 +1329,74 @@ snd_azf3328_playback_trigger(struct snd_pcm_substream *substream, int cmd)
1225 return result; 1329 return result;
1226} 1330}
1227 1331
1228/* this is just analogous to playback; I'm not quite sure whether recording
1229 * should actually be triggered like that */
1230static int 1332static int
1231snd_azf3328_capture_trigger(struct snd_pcm_substream *substream, int cmd) 1333snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd)
1232{ 1334{
1233 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1335 return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd);
1234 struct snd_pcm_runtime *runtime = substream->runtime; 1336}
1235 int result = 0;
1236 unsigned int status1;
1237
1238 snd_azf3328_dbgcalls("snd_azf3328_capture_trigger cmd %d\n", cmd);
1239
1240 switch (cmd) {
1241 case SNDRV_PCM_TRIGGER_START:
1242
1243 snd_azf3328_dbgplay("START CAPTURE\n");
1244
1245 snd_azf3328_codec_setfmt(chip, IDX_IO_REC_SOUNDFORMAT,
1246 runtime->rate,
1247 snd_pcm_format_width(runtime->format),
1248 runtime->channels);
1249
1250 spin_lock(&chip->reg_lock);
1251 /* first, remember current value: */
1252 status1 = snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS);
1253
1254 /* stop recording */
1255 status1 &= ~DMA_RESUME;
1256 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1257
1258 /* FIXME: clear interrupts or what??? */
1259 snd_azf3328_codec_outw(chip, IDX_IO_REC_IRQTYPE, 0xffff);
1260 spin_unlock(&chip->reg_lock);
1261
1262 snd_azf3328_setdmaa(chip, runtime->dma_addr,
1263 snd_pcm_lib_period_bytes(substream),
1264 snd_pcm_lib_buffer_bytes(substream),
1265 AZF_CAPTURE);
1266
1267 spin_lock(&chip->reg_lock);
1268#ifdef WIN9X
1269 /* FIXME: enable playback/recording??? */
1270 status1 |= DMA_PLAY_SOMETHING1 | DMA_PLAY_SOMETHING2;
1271 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1272
1273 /* start capture again */
1274 /* FIXME: what is this value (0x0010)??? */
1275 status1 |= DMA_RESUME | DMA_EPILOGUE_SOMETHING;
1276 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1277#else
1278 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1279 0x0000);
1280 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1281 DMA_PLAY_SOMETHING1);
1282 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1283 DMA_PLAY_SOMETHING1 |
1284 DMA_PLAY_SOMETHING2);
1285 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1286 DMA_RESUME |
1287 SOMETHING_ALMOST_ALWAYS_SET |
1288 DMA_EPILOGUE_SOMETHING |
1289 DMA_SOMETHING_ELSE);
1290#endif
1291 spin_unlock(&chip->reg_lock);
1292 snd_azf3328_codec_activity(chip, AZF_CAPTURE, 1);
1293
1294 snd_azf3328_dbgplay("STARTED CAPTURE\n");
1295 break;
1296 case SNDRV_PCM_TRIGGER_RESUME:
1297 snd_azf3328_dbgplay("RESUME CAPTURE\n");
1298 /* resume recording if we were active */
1299 spin_lock(&chip->reg_lock);
1300 if (chip->audio_stream[AZF_CAPTURE].running)
1301 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1302 snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS) | DMA_RESUME);
1303 spin_unlock(&chip->reg_lock);
1304 break;
1305 case SNDRV_PCM_TRIGGER_STOP:
1306 snd_azf3328_dbgplay("STOP CAPTURE\n");
1307
1308 spin_lock(&chip->reg_lock);
1309 /* first, remember current value: */
1310 status1 = snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS);
1311
1312 /* stop recording */
1313 status1 &= ~DMA_RESUME;
1314 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1315
1316 status1 |= DMA_PLAY_SOMETHING1;
1317 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1318
1319 status1 &= ~DMA_PLAY_SOMETHING1;
1320 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS, status1);
1321 spin_unlock(&chip->reg_lock);
1322 snd_azf3328_codec_activity(chip, AZF_CAPTURE, 0);
1323 1337
1324 snd_azf3328_dbgplay("STOPPED CAPTURE\n"); 1338static int
1325 break; 1339snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd)
1326 case SNDRV_PCM_TRIGGER_SUSPEND: 1340{
1327 snd_azf3328_dbgplay("SUSPEND CAPTURE\n"); 1341 return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd);
1328 /* make sure recording is stopped */ 1342}
1329 snd_azf3328_codec_outw(chip, IDX_IO_REC_FLAGS,
1330 snd_azf3328_codec_inw(chip, IDX_IO_REC_FLAGS) & ~DMA_RESUME);
1331 break;
1332 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
1333 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_PUSH NIY!\n");
1334 break;
1335 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
1336 snd_printk(KERN_ERR "FIXME: SNDRV_PCM_TRIGGER_PAUSE_RELEASE NIY!\n");
1337 break;
1338 default:
1339 printk(KERN_ERR "FIXME: unknown trigger mode!\n");
1340 return -EINVAL;
1341 }
1342 1343
1343 snd_azf3328_dbgcallleave(); 1344static int
1344 return result; 1345snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd)
1346{
1347 return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd);
1345} 1348}
1346 1349
1347static snd_pcm_uframes_t 1350static snd_pcm_uframes_t
1348snd_azf3328_playback_pointer(struct snd_pcm_substream *substream) 1351snd_azf3328_codec_pointer(struct snd_pcm_substream *substream,
1352 enum snd_azf3328_codec_type codec_type
1353)
1349{ 1354{
1350 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1355 const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
1356 const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
1351 unsigned long bufptr, result; 1357 unsigned long bufptr, result;
1352 snd_pcm_uframes_t frmres; 1358 snd_pcm_uframes_t frmres;
1353 1359
1354#ifdef QUERY_HARDWARE 1360#ifdef QUERY_HARDWARE
1355 bufptr = snd_azf3328_codec_inl(chip, IDX_IO_PLAY_DMA_START_1); 1361 bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
1356#else 1362#else
1357 bufptr = substream->runtime->dma_addr; 1363 bufptr = substream->runtime->dma_addr;
1358#endif 1364#endif
1359 result = snd_azf3328_codec_inl(chip, IDX_IO_PLAY_DMA_CURRPOS); 1365 result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS);
1360 1366
1361 /* calculate offset */ 1367 /* calculate offset */
1362 result -= bufptr; 1368 result -= bufptr;
1363 frmres = bytes_to_frames( substream->runtime, result); 1369 frmres = bytes_to_frames( substream->runtime, result);
1364 snd_azf3328_dbgplay("PLAY @ 0x%8lx, frames %8ld\n", result, frmres); 1370 snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n",
1371 codec->name, result, frmres);
1365 return frmres; 1372 return frmres;
1366} 1373}
1367 1374
1368static snd_pcm_uframes_t 1375static snd_pcm_uframes_t
1369snd_azf3328_capture_pointer(struct snd_pcm_substream *substream) 1376snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream)
1370{ 1377{
1371 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1378 return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK);
1372 unsigned long bufptr, result; 1379}
1373 snd_pcm_uframes_t frmres;
1374 1380
1375#ifdef QUERY_HARDWARE 1381static snd_pcm_uframes_t
1376 bufptr = snd_azf3328_codec_inl(chip, IDX_IO_REC_DMA_START_1); 1382snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream)
1377#else 1383{
1378 bufptr = substream->runtime->dma_addr; 1384 return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE);
1379#endif 1385}
1380 result = snd_azf3328_codec_inl(chip, IDX_IO_REC_DMA_CURRPOS);
1381 1386
1382 /* calculate offset */ 1387static snd_pcm_uframes_t
1383 result -= bufptr; 1388snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream)
1384 frmres = bytes_to_frames( substream->runtime, result); 1389{
1385 snd_azf3328_dbgplay("REC @ 0x%8lx, frames %8ld\n", result, frmres); 1390 return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT);
1386 return frmres;
1387} 1391}
1388 1392
1389/******************************************************************/ 1393/******************************************************************/
1390 1394
1391#ifdef SUPPORT_GAMEPORT 1395#ifdef SUPPORT_GAMEPORT
1392static inline void 1396static inline void
1393snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip, int enable) 1397snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip,
1398 bool enable
1399)
1394{ 1400{
1395 snd_azf3328_io_reg_setb( 1401 snd_azf3328_io_reg_setb(
1396 chip->game_io+IDX_GAME_HWCONFIG, 1402 chip->game_io+IDX_GAME_HWCONFIG,
@@ -1400,7 +1406,9 @@ snd_azf3328_gameport_irq_enable(struct snd_azf3328 *chip, int enable)
1400} 1406}
1401 1407
1402static inline void 1408static inline void
1403snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip, int enable) 1409snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip,
1410 bool enable
1411)
1404{ 1412{
1405 snd_azf3328_io_reg_setb( 1413 snd_azf3328_io_reg_setb(
1406 chip->game_io+IDX_GAME_HWCONFIG, 1414 chip->game_io+IDX_GAME_HWCONFIG,
@@ -1409,10 +1417,27 @@ snd_azf3328_gameport_legacy_address_enable(struct snd_azf3328 *chip, int enable)
1409 ); 1417 );
1410} 1418}
1411 1419
1420static void
1421snd_azf3328_gameport_set_counter_frequency(struct snd_azf3328 *chip,
1422 unsigned int freq_cfg
1423)
1424{
1425 snd_azf3328_io_reg_setb(
1426 chip->game_io+IDX_GAME_HWCONFIG,
1427 0x02,
1428 (freq_cfg & 1) != 0
1429 );
1430 snd_azf3328_io_reg_setb(
1431 chip->game_io+IDX_GAME_HWCONFIG,
1432 0x04,
1433 (freq_cfg & 2) != 0
1434 );
1435}
1436
1412static inline void 1437static inline void
1413snd_azf3328_gameport_axis_circuit_enable(struct snd_azf3328 *chip, int enable) 1438snd_azf3328_gameport_axis_circuit_enable(struct snd_azf3328 *chip, bool enable)
1414{ 1439{
1415 snd_azf3328_codec_reg_6AH_update( 1440 snd_azf3328_ctrl_reg_6AH_update(
1416 chip, IO_6A_SOMETHING2_GAMEPORT, enable 1441 chip, IO_6A_SOMETHING2_GAMEPORT, enable
1417 ); 1442 );
1418} 1443}
@@ -1447,6 +1472,8 @@ snd_azf3328_gameport_open(struct gameport *gameport, int mode)
1447 break; 1472 break;
1448 } 1473 }
1449 1474
1475 snd_azf3328_gameport_set_counter_frequency(chip,
1476 GAME_HWCFG_ADC_COUNTER_FREQ_STD);
1450 snd_azf3328_gameport_axis_circuit_enable(chip, (res == 0)); 1477 snd_azf3328_gameport_axis_circuit_enable(chip, (res == 0));
1451 1478
1452 return res; 1479 return res;
@@ -1458,6 +1485,8 @@ snd_azf3328_gameport_close(struct gameport *gameport)
1458 struct snd_azf3328 *chip = gameport_get_port_data(gameport); 1485 struct snd_azf3328 *chip = gameport_get_port_data(gameport);
1459 1486
1460 snd_azf3328_dbggame("gameport_close\n"); 1487 snd_azf3328_dbggame("gameport_close\n");
1488 snd_azf3328_gameport_set_counter_frequency(chip,
1489 GAME_HWCFG_ADC_COUNTER_FREQ_1_200);
1461 snd_azf3328_gameport_axis_circuit_enable(chip, 0); 1490 snd_azf3328_gameport_axis_circuit_enable(chip, 0);
1462} 1491}
1463 1492
@@ -1491,7 +1520,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport,
1491 1520
1492 val = snd_azf3328_game_inb(chip, IDX_GAME_AXES_CONFIG); 1521 val = snd_azf3328_game_inb(chip, IDX_GAME_AXES_CONFIG);
1493 if (val & GAME_AXES_SAMPLING_READY) { 1522 if (val & GAME_AXES_SAMPLING_READY) {
1494 for (i = 0; i < 4; ++i) { 1523 for (i = 0; i < ARRAY_SIZE(chip->axes); ++i) {
1495 /* configure the axis to read */ 1524 /* configure the axis to read */
1496 val = (i << 4) | 0x0f; 1525 val = (i << 4) | 0x0f;
1497 snd_azf3328_game_outb(chip, IDX_GAME_AXES_CONFIG, val); 1526 snd_azf3328_game_outb(chip, IDX_GAME_AXES_CONFIG, val);
@@ -1514,7 +1543,7 @@ snd_azf3328_gameport_cooked_read(struct gameport *gameport,
1514 snd_azf3328_game_outw(chip, IDX_GAME_AXIS_VALUE, 0xffff); 1543 snd_azf3328_game_outw(chip, IDX_GAME_AXIS_VALUE, 0xffff);
1515 spin_unlock_irqrestore(&chip->reg_lock, flags); 1544 spin_unlock_irqrestore(&chip->reg_lock, flags);
1516 1545
1517 for (i = 0; i < 4; i++) { 1546 for (i = 0; i < ARRAY_SIZE(chip->axes); i++) {
1518 axes[i] = chip->axes[i]; 1547 axes[i] = chip->axes[i];
1519 if (axes[i] == 0xffff) 1548 if (axes[i] == 0xffff)
1520 axes[i] = -1; 1549 axes[i] = -1;
@@ -1552,6 +1581,8 @@ snd_azf3328_gameport(struct snd_azf3328 *chip, int dev)
1552 /* DISABLE legacy address: we don't need it! */ 1581 /* DISABLE legacy address: we don't need it! */
1553 snd_azf3328_gameport_legacy_address_enable(chip, 0); 1582 snd_azf3328_gameport_legacy_address_enable(chip, 0);
1554 1583
1584 snd_azf3328_gameport_set_counter_frequency(chip,
1585 GAME_HWCFG_ADC_COUNTER_FREQ_1_200);
1555 snd_azf3328_gameport_axis_circuit_enable(chip, 0); 1586 snd_azf3328_gameport_axis_circuit_enable(chip, 0);
1556 1587
1557 gameport_register_port(chip->gameport); 1588 gameport_register_port(chip->gameport);
@@ -1585,40 +1616,77 @@ snd_azf3328_gameport_interrupt(struct snd_azf3328 *chip)
1585static inline void 1616static inline void
1586snd_azf3328_irq_log_unknown_type(u8 which) 1617snd_azf3328_irq_log_unknown_type(u8 which)
1587{ 1618{
1588 snd_azf3328_dbgplay( 1619 snd_azf3328_dbgcodec(
1589 "azt3328: unknown IRQ type (%x) occurred, please report!\n", 1620 "azt3328: unknown IRQ type (%x) occurred, please report!\n",
1590 which 1621 which
1591 ); 1622 );
1592} 1623}
1593 1624
1625static inline void
1626snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status)
1627{
1628 u8 which;
1629 enum snd_azf3328_codec_type codec_type;
1630 const struct snd_azf3328_codec_data *codec;
1631
1632 for (codec_type = AZF_CODEC_PLAYBACK;
1633 codec_type <= AZF_CODEC_I2S_OUT;
1634 ++codec_type) {
1635
1636 /* skip codec if there's no interrupt for it */
1637 if (!(status & (1 << codec_type)))
1638 continue;
1639
1640 codec = &chip->codecs[codec_type];
1641
1642 spin_lock(&chip->reg_lock);
1643 which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE);
1644 /* ack all IRQ types immediately */
1645 snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which);
1646 spin_unlock(&chip->reg_lock);
1647
1648 if ((chip->pcm[codec_type]) && (codec->substream)) {
1649 snd_pcm_period_elapsed(codec->substream);
1650 snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n",
1651 codec->name,
1652 which,
1653 snd_azf3328_codec_inl(
1654 codec, IDX_IO_CODEC_DMA_CURRPOS
1655 )
1656 );
1657 } else
1658 printk(KERN_WARNING "azt3328: irq handler problem!\n");
1659 if (which & IRQ_SOMETHING)
1660 snd_azf3328_irq_log_unknown_type(which);
1661 }
1662}
1663
1594static irqreturn_t 1664static irqreturn_t
1595snd_azf3328_interrupt(int irq, void *dev_id) 1665snd_azf3328_interrupt(int irq, void *dev_id)
1596{ 1666{
1597 struct snd_azf3328 *chip = dev_id; 1667 struct snd_azf3328 *chip = dev_id;
1598 u8 status, which; 1668 u8 status;
1599#if DEBUG_PLAY_REC 1669#if DEBUG_CODEC
1600 static unsigned long irq_count; 1670 static unsigned long irq_count;
1601#endif 1671#endif
1602 1672
1603 status = snd_azf3328_codec_inb(chip, IDX_IO_IRQSTATUS); 1673 status = snd_azf3328_ctrl_inb(chip, IDX_IO_IRQSTATUS);
1604 1674
1605 /* fast path out, to ease interrupt sharing */ 1675 /* fast path out, to ease interrupt sharing */
1606 if (!(status & 1676 if (!(status &
1607 (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_GAMEPORT|IRQ_MPU401|IRQ_TIMER) 1677 (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT
1678 |IRQ_GAMEPORT|IRQ_MPU401|IRQ_TIMER)
1608 )) 1679 ))
1609 return IRQ_NONE; /* must be interrupt for another device */ 1680 return IRQ_NONE; /* must be interrupt for another device */
1610 1681
1611 snd_azf3328_dbgplay( 1682 snd_azf3328_dbgcodec(
1612 "irq_count %ld! IDX_IO_PLAY_FLAGS %04x, " 1683 "irq_count %ld! IDX_IO_IRQSTATUS %04x\n",
1613 "IDX_IO_PLAY_IRQTYPE %04x, IDX_IO_IRQSTATUS %04x\n",
1614 irq_count++ /* debug-only */, 1684 irq_count++ /* debug-only */,
1615 snd_azf3328_codec_inw(chip, IDX_IO_PLAY_FLAGS),
1616 snd_azf3328_codec_inw(chip, IDX_IO_PLAY_IRQTYPE),
1617 status 1685 status
1618 ); 1686 );
1619 1687
1620 if (status & IRQ_TIMER) { 1688 if (status & IRQ_TIMER) {
1621 /* snd_azf3328_dbgplay("timer %ld\n", 1689 /* snd_azf3328_dbgcodec("timer %ld\n",
1622 snd_azf3328_codec_inl(chip, IDX_IO_TIMER_VALUE) 1690 snd_azf3328_codec_inl(chip, IDX_IO_TIMER_VALUE)
1623 & TIMER_VALUE_MASK 1691 & TIMER_VALUE_MASK
1624 ); */ 1692 ); */
@@ -1626,71 +1694,36 @@ snd_azf3328_interrupt(int irq, void *dev_id)
1626 snd_timer_interrupt(chip->timer, chip->timer->sticks); 1694 snd_timer_interrupt(chip->timer, chip->timer->sticks);
1627 /* ACK timer */ 1695 /* ACK timer */
1628 spin_lock(&chip->reg_lock); 1696 spin_lock(&chip->reg_lock);
1629 snd_azf3328_codec_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x07); 1697 snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x07);
1630 spin_unlock(&chip->reg_lock); 1698 spin_unlock(&chip->reg_lock);
1631 snd_azf3328_dbgplay("azt3328: timer IRQ\n"); 1699 snd_azf3328_dbgcodec("azt3328: timer IRQ\n");
1632 } 1700 }
1633 if (status & IRQ_PLAYBACK) {
1634 spin_lock(&chip->reg_lock);
1635 which = snd_azf3328_codec_inb(chip, IDX_IO_PLAY_IRQTYPE);
1636 /* ack all IRQ types immediately */
1637 snd_azf3328_codec_outb(chip, IDX_IO_PLAY_IRQTYPE, which);
1638 spin_unlock(&chip->reg_lock);
1639 1701
1640 if (chip->pcm && chip->audio_stream[AZF_PLAYBACK].substream) { 1702 if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT))
1641 snd_pcm_period_elapsed( 1703 snd_azf3328_codec_interrupt(chip, status);
1642 chip->audio_stream[AZF_PLAYBACK].substream
1643 );
1644 snd_azf3328_dbgplay("PLAY period done (#%x), @ %x\n",
1645 which,
1646 snd_azf3328_codec_inl(
1647 chip, IDX_IO_PLAY_DMA_CURRPOS
1648 )
1649 );
1650 } else
1651 printk(KERN_WARNING "azt3328: irq handler problem!\n");
1652 if (which & IRQ_PLAY_SOMETHING)
1653 snd_azf3328_irq_log_unknown_type(which);
1654 }
1655 if (status & IRQ_RECORDING) {
1656 spin_lock(&chip->reg_lock);
1657 which = snd_azf3328_codec_inb(chip, IDX_IO_REC_IRQTYPE);
1658 /* ack all IRQ types immediately */
1659 snd_azf3328_codec_outb(chip, IDX_IO_REC_IRQTYPE, which);
1660 spin_unlock(&chip->reg_lock);
1661 1704
1662 if (chip->pcm && chip->audio_stream[AZF_CAPTURE].substream) {
1663 snd_pcm_period_elapsed(
1664 chip->audio_stream[AZF_CAPTURE].substream
1665 );
1666 snd_azf3328_dbgplay("REC period done (#%x), @ %x\n",
1667 which,
1668 snd_azf3328_codec_inl(
1669 chip, IDX_IO_REC_DMA_CURRPOS
1670 )
1671 );
1672 } else
1673 printk(KERN_WARNING "azt3328: irq handler problem!\n");
1674 if (which & IRQ_REC_SOMETHING)
1675 snd_azf3328_irq_log_unknown_type(which);
1676 }
1677 if (status & IRQ_GAMEPORT) 1705 if (status & IRQ_GAMEPORT)
1678 snd_azf3328_gameport_interrupt(chip); 1706 snd_azf3328_gameport_interrupt(chip);
1707
1679 /* MPU401 has less critical IRQ requirements 1708 /* MPU401 has less critical IRQ requirements
1680 * than timer and playback/recording, right? */ 1709 * than timer and playback/recording, right? */
1681 if (status & IRQ_MPU401) { 1710 if (status & IRQ_MPU401) {
1682 snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); 1711 snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data);
1683 1712
1684 /* hmm, do we have to ack the IRQ here somehow? 1713 /* hmm, do we have to ack the IRQ here somehow?
1685 * If so, then I don't know how... */ 1714 * If so, then I don't know how yet... */
1686 snd_azf3328_dbgplay("azt3328: MPU401 IRQ\n"); 1715 snd_azf3328_dbgcodec("azt3328: MPU401 IRQ\n");
1687 } 1716 }
1688 return IRQ_HANDLED; 1717 return IRQ_HANDLED;
1689} 1718}
1690 1719
1691/*****************************************************************/ 1720/*****************************************************************/
1692 1721
1693static const struct snd_pcm_hardware snd_azf3328_playback = 1722/* as long as we think we have identical snd_pcm_hardware parameters
1723 for playback, capture and i2s out, we can use the same physical struct
1724 since the struct is simply being copied into a member.
1725*/
1726static const struct snd_pcm_hardware snd_azf3328_hardware =
1694{ 1727{
1695 /* FIXME!! Correct? */ 1728 /* FIXME!! Correct? */
1696 .info = SNDRV_PCM_INFO_MMAP | 1729 .info = SNDRV_PCM_INFO_MMAP |
@@ -1718,31 +1751,6 @@ static const struct snd_pcm_hardware snd_azf3328_playback =
1718 .fifo_size = 0, 1751 .fifo_size = 0,
1719}; 1752};
1720 1753
1721static const struct snd_pcm_hardware snd_azf3328_capture =
1722{
1723 /* FIXME */
1724 .info = SNDRV_PCM_INFO_MMAP |
1725 SNDRV_PCM_INFO_INTERLEAVED |
1726 SNDRV_PCM_INFO_MMAP_VALID,
1727 .formats = SNDRV_PCM_FMTBIT_S8 |
1728 SNDRV_PCM_FMTBIT_U8 |
1729 SNDRV_PCM_FMTBIT_S16_LE |
1730 SNDRV_PCM_FMTBIT_U16_LE,
1731 .rates = SNDRV_PCM_RATE_5512 |
1732 SNDRV_PCM_RATE_8000_48000 |
1733 SNDRV_PCM_RATE_KNOT,
1734 .rate_min = AZF_FREQ_4000,
1735 .rate_max = AZF_FREQ_66200,
1736 .channels_min = 1,
1737 .channels_max = 2,
1738 .buffer_bytes_max = 65536,
1739 .period_bytes_min = 64,
1740 .period_bytes_max = 65536,
1741 .periods_min = 1,
1742 .periods_max = 1024,
1743 .fifo_size = 0,
1744};
1745
1746 1754
1747static unsigned int snd_azf3328_fixed_rates[] = { 1755static unsigned int snd_azf3328_fixed_rates[] = {
1748 AZF_FREQ_4000, 1756 AZF_FREQ_4000,
@@ -1770,14 +1778,19 @@ static struct snd_pcm_hw_constraint_list snd_azf3328_hw_constraints_rates = {
1770/*****************************************************************/ 1778/*****************************************************************/
1771 1779
1772static int 1780static int
1773snd_azf3328_playback_open(struct snd_pcm_substream *substream) 1781snd_azf3328_pcm_open(struct snd_pcm_substream *substream,
1782 enum snd_azf3328_codec_type codec_type
1783)
1774{ 1784{
1775 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1785 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
1776 struct snd_pcm_runtime *runtime = substream->runtime; 1786 struct snd_pcm_runtime *runtime = substream->runtime;
1777 1787
1778 snd_azf3328_dbgcallenter(); 1788 snd_azf3328_dbgcallenter();
1779 chip->audio_stream[AZF_PLAYBACK].substream = substream; 1789 chip->codecs[codec_type].substream = substream;
1780 runtime->hw = snd_azf3328_playback; 1790
1791 /* same parameters for all our codecs - at least we think so... */
1792 runtime->hw = snd_azf3328_hardware;
1793
1781 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 1794 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
1782 &snd_azf3328_hw_constraints_rates); 1795 &snd_azf3328_hw_constraints_rates);
1783 snd_azf3328_dbgcallleave(); 1796 snd_azf3328_dbgcallleave();
@@ -1785,40 +1798,52 @@ snd_azf3328_playback_open(struct snd_pcm_substream *substream)
1785} 1798}
1786 1799
1787static int 1800static int
1801snd_azf3328_playback_open(struct snd_pcm_substream *substream)
1802{
1803 return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK);
1804}
1805
1806static int
1788snd_azf3328_capture_open(struct snd_pcm_substream *substream) 1807snd_azf3328_capture_open(struct snd_pcm_substream *substream)
1789{ 1808{
1790 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1809 return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE);
1791 struct snd_pcm_runtime *runtime = substream->runtime; 1810}
1792 1811
1793 snd_azf3328_dbgcallenter(); 1812static int
1794 chip->audio_stream[AZF_CAPTURE].substream = substream; 1813snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream)
1795 runtime->hw = snd_azf3328_capture; 1814{
1796 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, 1815 return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT);
1797 &snd_azf3328_hw_constraints_rates);
1798 snd_azf3328_dbgcallleave();
1799 return 0;
1800} 1816}
1801 1817
1802static int 1818static int
1803snd_azf3328_playback_close(struct snd_pcm_substream *substream) 1819snd_azf3328_pcm_close(struct snd_pcm_substream *substream,
1820 enum snd_azf3328_codec_type codec_type
1821)
1804{ 1822{
1805 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1823 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
1806 1824
1807 snd_azf3328_dbgcallenter(); 1825 snd_azf3328_dbgcallenter();
1808 chip->audio_stream[AZF_PLAYBACK].substream = NULL; 1826 chip->codecs[codec_type].substream = NULL;
1809 snd_azf3328_dbgcallleave(); 1827 snd_azf3328_dbgcallleave();
1810 return 0; 1828 return 0;
1811} 1829}
1812 1830
1813static int 1831static int
1832snd_azf3328_playback_close(struct snd_pcm_substream *substream)
1833{
1834 return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK);
1835}
1836
1837static int
1814snd_azf3328_capture_close(struct snd_pcm_substream *substream) 1838snd_azf3328_capture_close(struct snd_pcm_substream *substream)
1815{ 1839{
1816 struct snd_azf3328 *chip = snd_pcm_substream_chip(substream); 1840 return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE);
1841}
1817 1842
1818 snd_azf3328_dbgcallenter(); 1843static int
1819 chip->audio_stream[AZF_CAPTURE].substream = NULL; 1844snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream)
1820 snd_azf3328_dbgcallleave(); 1845{
1821 return 0; 1846 return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT);
1822} 1847}
1823 1848
1824/******************************************************************/ 1849/******************************************************************/
@@ -1829,9 +1854,9 @@ static struct snd_pcm_ops snd_azf3328_playback_ops = {
1829 .ioctl = snd_pcm_lib_ioctl, 1854 .ioctl = snd_pcm_lib_ioctl,
1830 .hw_params = snd_azf3328_hw_params, 1855 .hw_params = snd_azf3328_hw_params,
1831 .hw_free = snd_azf3328_hw_free, 1856 .hw_free = snd_azf3328_hw_free,
1832 .prepare = snd_azf3328_playback_prepare, 1857 .prepare = snd_azf3328_codec_prepare,
1833 .trigger = snd_azf3328_playback_trigger, 1858 .trigger = snd_azf3328_codec_playback_trigger,
1834 .pointer = snd_azf3328_playback_pointer 1859 .pointer = snd_azf3328_codec_playback_pointer
1835}; 1860};
1836 1861
1837static struct snd_pcm_ops snd_azf3328_capture_ops = { 1862static struct snd_pcm_ops snd_azf3328_capture_ops = {
@@ -1840,30 +1865,67 @@ static struct snd_pcm_ops snd_azf3328_capture_ops = {
1840 .ioctl = snd_pcm_lib_ioctl, 1865 .ioctl = snd_pcm_lib_ioctl,
1841 .hw_params = snd_azf3328_hw_params, 1866 .hw_params = snd_azf3328_hw_params,
1842 .hw_free = snd_azf3328_hw_free, 1867 .hw_free = snd_azf3328_hw_free,
1843 .prepare = snd_azf3328_capture_prepare, 1868 .prepare = snd_azf3328_codec_prepare,
1844 .trigger = snd_azf3328_capture_trigger, 1869 .trigger = snd_azf3328_codec_capture_trigger,
1845 .pointer = snd_azf3328_capture_pointer 1870 .pointer = snd_azf3328_codec_capture_pointer
1871};
1872
1873static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
1874 .open = snd_azf3328_i2s_out_open,
1875 .close = snd_azf3328_i2s_out_close,
1876 .ioctl = snd_pcm_lib_ioctl,
1877 .hw_params = snd_azf3328_hw_params,
1878 .hw_free = snd_azf3328_hw_free,
1879 .prepare = snd_azf3328_codec_prepare,
1880 .trigger = snd_azf3328_codec_i2s_out_trigger,
1881 .pointer = snd_azf3328_codec_i2s_out_pointer
1846}; 1882};
1847 1883
1848static int __devinit 1884static int __devinit
1849snd_azf3328_pcm(struct snd_azf3328 *chip, int device) 1885snd_azf3328_pcm(struct snd_azf3328 *chip)
1850{ 1886{
1887enum { AZF_PCMDEV_STD, AZF_PCMDEV_I2S_OUT, NUM_AZF_PCMDEVS }; /* pcm devices */
1888
1851 struct snd_pcm *pcm; 1889 struct snd_pcm *pcm;
1852 int err; 1890 int err;
1853 1891
1854 snd_azf3328_dbgcallenter(); 1892 snd_azf3328_dbgcallenter();
1855 if ((err = snd_pcm_new(chip->card, "AZF3328 DSP", device, 1, 1, &pcm)) < 0) 1893
1894 err = snd_pcm_new(chip->card, "AZF3328 DSP", AZF_PCMDEV_STD,
1895 1, 1, &pcm);
1896 if (err < 0)
1856 return err; 1897 return err;
1857 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_azf3328_playback_ops); 1898 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
1858 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_azf3328_capture_ops); 1899 &snd_azf3328_playback_ops);
1900 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
1901 &snd_azf3328_capture_ops);
1859 1902
1860 pcm->private_data = chip; 1903 pcm->private_data = chip;
1861 pcm->info_flags = 0; 1904 pcm->info_flags = 0;
1862 strcpy(pcm->name, chip->card->shortname); 1905 strcpy(pcm->name, chip->card->shortname);
1863 chip->pcm = pcm; 1906 /* same pcm object for playback/capture (see snd_pcm_new() above) */
1907 chip->pcm[AZF_CODEC_PLAYBACK] = pcm;
1908 chip->pcm[AZF_CODEC_CAPTURE] = pcm;
1864 1909
1865 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, 1910 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
1866 snd_dma_pci_data(chip->pci), 64*1024, 64*1024); 1911 snd_dma_pci_data(chip->pci),
1912 64*1024, 64*1024);
1913
1914 err = snd_pcm_new(chip->card, "AZF3328 I2S OUT", AZF_PCMDEV_I2S_OUT,
1915 1, 0, &pcm);
1916 if (err < 0)
1917 return err;
1918 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
1919 &snd_azf3328_i2s_out_ops);
1920
1921 pcm->private_data = chip;
1922 pcm->info_flags = 0;
1923 strcpy(pcm->name, chip->card->shortname);
1924 chip->pcm[AZF_CODEC_I2S_OUT] = pcm;
1925
1926 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
1927 snd_dma_pci_data(chip->pci),
1928 64*1024, 64*1024);
1867 1929
1868 snd_azf3328_dbgcallleave(); 1930 snd_azf3328_dbgcallleave();
1869 return 0; 1931 return 0;
@@ -1902,7 +1964,7 @@ snd_azf3328_timer_start(struct snd_timer *timer)
1902 snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay); 1964 snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay);
1903 delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE; 1965 delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE;
1904 spin_lock_irqsave(&chip->reg_lock, flags); 1966 spin_lock_irqsave(&chip->reg_lock, flags);
1905 snd_azf3328_codec_outl(chip, IDX_IO_TIMER_VALUE, delay); 1967 snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay);
1906 spin_unlock_irqrestore(&chip->reg_lock, flags); 1968 spin_unlock_irqrestore(&chip->reg_lock, flags);
1907 snd_azf3328_dbgcallleave(); 1969 snd_azf3328_dbgcallleave();
1908 return 0; 1970 return 0;
@@ -1919,7 +1981,7 @@ snd_azf3328_timer_stop(struct snd_timer *timer)
1919 spin_lock_irqsave(&chip->reg_lock, flags); 1981 spin_lock_irqsave(&chip->reg_lock, flags);
1920 /* disable timer countdown and interrupt */ 1982 /* disable timer countdown and interrupt */
1921 /* FIXME: should we write TIMER_IRQ_ACK here? */ 1983 /* FIXME: should we write TIMER_IRQ_ACK here? */
1922 snd_azf3328_codec_outb(chip, IDX_IO_TIMER_VALUE + 3, 0); 1984 snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0);
1923 spin_unlock_irqrestore(&chip->reg_lock, flags); 1985 spin_unlock_irqrestore(&chip->reg_lock, flags);
1924 snd_azf3328_dbgcallleave(); 1986 snd_azf3328_dbgcallleave();
1925 return 0; 1987 return 0;
@@ -2035,7 +2097,7 @@ snd_azf3328_test_bit(unsigned unsigned reg, int bit)
2035 2097
2036 outb(val, reg); 2098 outb(val, reg);
2037 2099
2038 printk(KERN_ERR "reg %04x bit %d: %02x %02x %02x\n", 2100 printk(KERN_DEBUG "reg %04x bit %d: %02x %02x %02x\n",
2039 reg, bit, val, valoff, valon 2101 reg, bit, val, valoff, valon
2040 ); 2102 );
2041} 2103}
@@ -2048,9 +2110,9 @@ snd_azf3328_debug_show_ports(const struct snd_azf3328 *chip)
2048 u16 tmp; 2110 u16 tmp;
2049 2111
2050 snd_azf3328_dbgmisc( 2112 snd_azf3328_dbgmisc(
2051 "codec_io 0x%lx, game_io 0x%lx, mpu_io 0x%lx, " 2113 "ctrl_io 0x%lx, game_io 0x%lx, mpu_io 0x%lx, "
2052 "opl3_io 0x%lx, mixer_io 0x%lx, irq %d\n", 2114 "opl3_io 0x%lx, mixer_io 0x%lx, irq %d\n",
2053 chip->codec_io, chip->game_io, chip->mpu_io, 2115 chip->ctrl_io, chip->game_io, chip->mpu_io,
2054 chip->opl3_io, chip->mixer_io, chip->irq 2116 chip->opl3_io, chip->mixer_io, chip->irq
2055 ); 2117 );
2056 2118
@@ -2083,9 +2145,9 @@ snd_azf3328_debug_show_ports(const struct snd_azf3328 *chip)
2083 inb(0x38c + tmp) 2145 inb(0x38c + tmp)
2084 ); 2146 );
2085 2147
2086 for (tmp = 0; tmp < AZF_IO_SIZE_CODEC; tmp += 2) 2148 for (tmp = 0; tmp < AZF_IO_SIZE_CTRL; tmp += 2)
2087 snd_azf3328_dbgmisc("codec 0x%02x: 0x%04x\n", 2149 snd_azf3328_dbgmisc("ctrl 0x%02x: 0x%04x\n",
2088 tmp, snd_azf3328_codec_inw(chip, tmp) 2150 tmp, snd_azf3328_ctrl_inw(chip, tmp)
2089 ); 2151 );
2090 2152
2091 for (tmp = 0; tmp < AZF_IO_SIZE_MIXER; tmp += 2) 2153 for (tmp = 0; tmp < AZF_IO_SIZE_MIXER; tmp += 2)
@@ -2106,7 +2168,8 @@ snd_azf3328_create(struct snd_card *card,
2106 static struct snd_device_ops ops = { 2168 static struct snd_device_ops ops = {
2107 .dev_free = snd_azf3328_dev_free, 2169 .dev_free = snd_azf3328_dev_free,
2108 }; 2170 };
2109 u16 tmp; 2171 u8 dma_init;
2172 enum snd_azf3328_codec_type codec_type;
2110 2173
2111 *rchip = NULL; 2174 *rchip = NULL;
2112 2175
@@ -2138,14 +2201,21 @@ snd_azf3328_create(struct snd_card *card,
2138 if (err < 0) 2201 if (err < 0)
2139 goto out_err; 2202 goto out_err;
2140 2203
2141 chip->codec_io = pci_resource_start(pci, 0); 2204 chip->ctrl_io = pci_resource_start(pci, 0);
2142 chip->game_io = pci_resource_start(pci, 1); 2205 chip->game_io = pci_resource_start(pci, 1);
2143 chip->mpu_io = pci_resource_start(pci, 2); 2206 chip->mpu_io = pci_resource_start(pci, 2);
2144 chip->opl3_io = pci_resource_start(pci, 3); 2207 chip->opl3_io = pci_resource_start(pci, 3);
2145 chip->mixer_io = pci_resource_start(pci, 4); 2208 chip->mixer_io = pci_resource_start(pci, 4);
2146 2209
2147 chip->audio_stream[AZF_PLAYBACK].portbase = chip->codec_io + 0x00; 2210 chip->codecs[AZF_CODEC_PLAYBACK].io_base =
2148 chip->audio_stream[AZF_CAPTURE].portbase = chip->codec_io + 0x20; 2211 chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
2212 chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK";
2213 chip->codecs[AZF_CODEC_CAPTURE].io_base =
2214 chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
2215 chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE";
2216 chip->codecs[AZF_CODEC_I2S_OUT].io_base =
2217 chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
2218 chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT";
2149 2219
2150 if (request_irq(pci->irq, snd_azf3328_interrupt, 2220 if (request_irq(pci->irq, snd_azf3328_interrupt,
2151 IRQF_SHARED, card->shortname, chip)) { 2221 IRQF_SHARED, card->shortname, chip)) {
@@ -2168,20 +2238,25 @@ snd_azf3328_create(struct snd_card *card,
2168 if (err < 0) 2238 if (err < 0)
2169 goto out_err; 2239 goto out_err;
2170 2240
2171 /* shutdown codecs to save power */ 2241 /* standard codec init stuff */
2172 /* have snd_azf3328_codec_activity() act properly */ 2242 /* default DMA init value */
2173 chip->audio_stream[AZF_PLAYBACK].running = 1; 2243 dma_init = DMA_RUN_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE;
2174 snd_azf3328_codec_activity(chip, AZF_PLAYBACK, 0);
2175 2244
2176 /* standard chip init stuff */ 2245 for (codec_type = AZF_CODEC_PLAYBACK;
2177 /* default IRQ init value */ 2246 codec_type <= AZF_CODEC_I2S_OUT; ++codec_type) {
2178 tmp = DMA_PLAY_SOMETHING2|DMA_EPILOGUE_SOMETHING|DMA_SOMETHING_ELSE; 2247 struct snd_azf3328_codec_data *codec =
2248 &chip->codecs[codec_type];
2179 2249
2180 spin_lock_irq(&chip->reg_lock); 2250 /* shutdown codecs to save power */
2181 snd_azf3328_codec_outb(chip, IDX_IO_PLAY_FLAGS, tmp); 2251 /* have ...ctrl_codec_activity() act properly */
2182 snd_azf3328_codec_outb(chip, IDX_IO_REC_FLAGS, tmp); 2252 codec->running = 1;
2183 snd_azf3328_codec_outb(chip, IDX_IO_SOMETHING_FLAGS, tmp); 2253 snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
2184 spin_unlock_irq(&chip->reg_lock); 2254
2255 spin_lock_irq(&chip->reg_lock);
2256 snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS,
2257 dma_init);
2258 spin_unlock_irq(&chip->reg_lock);
2259 }
2185 2260
2186 snd_card_set_dev(card, &pci->dev); 2261 snd_card_set_dev(card, &pci->dev);
2187 2262
@@ -2229,8 +2304,11 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2229 2304
2230 card->private_data = chip; 2305 card->private_data = chip;
2231 2306
2307 /* chose to use MPU401_HW_AZT2320 ID instead of MPU401_HW_MPU401,
2308 since our hardware ought to be similar, thus use same ID. */
2232 err = snd_mpu401_uart_new( 2309 err = snd_mpu401_uart_new(
2233 card, 0, MPU401_HW_MPU401, chip->mpu_io, MPU401_INFO_INTEGRATED, 2310 card, 0,
2311 MPU401_HW_AZT2320, chip->mpu_io, MPU401_INFO_INTEGRATED,
2234 pci->irq, 0, &chip->rmidi 2312 pci->irq, 0, &chip->rmidi
2235 ); 2313 );
2236 if (err < 0) { 2314 if (err < 0) {
@@ -2244,7 +2322,7 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2244 if (err < 0) 2322 if (err < 0)
2245 goto out_err; 2323 goto out_err;
2246 2324
2247 err = snd_azf3328_pcm(chip, 0); 2325 err = snd_azf3328_pcm(chip);
2248 if (err < 0) 2326 if (err < 0)
2249 goto out_err; 2327 goto out_err;
2250 2328
@@ -2266,14 +2344,14 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
2266 opl3->private_data = chip; 2344 opl3->private_data = chip;
2267 2345
2268 sprintf(card->longname, "%s at 0x%lx, irq %i", 2346 sprintf(card->longname, "%s at 0x%lx, irq %i",
2269 card->shortname, chip->codec_io, chip->irq); 2347 card->shortname, chip->ctrl_io, chip->irq);
2270 2348
2271 err = snd_card_register(card); 2349 err = snd_card_register(card);
2272 if (err < 0) 2350 if (err < 0)
2273 goto out_err; 2351 goto out_err;
2274 2352
2275#ifdef MODULE 2353#ifdef MODULE
2276 printk( 2354 printk(KERN_INFO
2277"azt3328: Sound driver for Aztech AZF3328-based soundcards such as PCI168.\n" 2355"azt3328: Sound driver for Aztech AZF3328-based soundcards such as PCI168.\n"
2278"azt3328: Hardware was completely undocumented, unfortunately.\n" 2356"azt3328: Hardware was completely undocumented, unfortunately.\n"
2279"azt3328: Feel free to contact andi AT lisas.de for bug reports etc.!\n" 2357"azt3328: Feel free to contact andi AT lisas.de for bug reports etc.!\n"
@@ -2308,36 +2386,52 @@ snd_azf3328_remove(struct pci_dev *pci)
2308} 2386}
2309 2387
2310#ifdef CONFIG_PM 2388#ifdef CONFIG_PM
2389static inline void
2390snd_azf3328_suspend_regs(unsigned long io_addr, unsigned count, u32 *saved_regs)
2391{
2392 unsigned reg;
2393
2394 for (reg = 0; reg < count; ++reg) {
2395 *saved_regs = inl(io_addr);
2396 snd_azf3328_dbgpm("suspend: io 0x%04lx: 0x%08x\n",
2397 io_addr, *saved_regs);
2398 ++saved_regs;
2399 io_addr += sizeof(*saved_regs);
2400 }
2401}
2402
2311static int 2403static int
2312snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state) 2404snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
2313{ 2405{
2314 struct snd_card *card = pci_get_drvdata(pci); 2406 struct snd_card *card = pci_get_drvdata(pci);
2315 struct snd_azf3328 *chip = card->private_data; 2407 struct snd_azf3328 *chip = card->private_data;
2316 unsigned reg; 2408 u16 *saved_regs_ctrl_u16;
2317 2409
2318 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 2410 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
2319 2411
2320 snd_pcm_suspend_all(chip->pcm); 2412 snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
2413 snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
2321 2414
2322 for (reg = 0; reg < AZF_IO_SIZE_MIXER_PM / 2; ++reg) 2415 snd_azf3328_suspend_regs(chip->mixer_io,
2323 chip->saved_regs_mixer[reg] = inw(chip->mixer_io + reg * 2); 2416 ARRAY_SIZE(chip->saved_regs_mixer), chip->saved_regs_mixer);
2324 2417
2325 /* make sure to disable master volume etc. to prevent looping sound */ 2418 /* make sure to disable master volume etc. to prevent looping sound */
2326 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1); 2419 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_PLAY_MASTER, 1);
2327 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1); 2420 snd_azf3328_mixer_set_mute(chip, IDX_MIXER_WAVEOUT, 1);
2328 2421
2329 for (reg = 0; reg < AZF_IO_SIZE_CODEC_PM / 2; ++reg) 2422 snd_azf3328_suspend_regs(chip->ctrl_io,
2330 chip->saved_regs_codec[reg] = inw(chip->codec_io + reg * 2); 2423 ARRAY_SIZE(chip->saved_regs_ctrl), chip->saved_regs_ctrl);
2331 2424
2332 /* manually store the one currently relevant write-only reg, too */ 2425 /* manually store the one currently relevant write-only reg, too */
2333 chip->saved_regs_codec[IDX_IO_6AH / 2] = chip->shadow_reg_codec_6AH; 2426 saved_regs_ctrl_u16 = (u16 *)chip->saved_regs_ctrl;
2427 saved_regs_ctrl_u16[IDX_IO_6AH / 2] = chip->shadow_reg_ctrl_6AH;
2334 2428
2335 for (reg = 0; reg < AZF_IO_SIZE_GAME_PM / 2; ++reg) 2429 snd_azf3328_suspend_regs(chip->game_io,
2336 chip->saved_regs_game[reg] = inw(chip->game_io + reg * 2); 2430 ARRAY_SIZE(chip->saved_regs_game), chip->saved_regs_game);
2337 for (reg = 0; reg < AZF_IO_SIZE_MPU_PM / 2; ++reg) 2431 snd_azf3328_suspend_regs(chip->mpu_io,
2338 chip->saved_regs_mpu[reg] = inw(chip->mpu_io + reg * 2); 2432 ARRAY_SIZE(chip->saved_regs_mpu), chip->saved_regs_mpu);
2339 for (reg = 0; reg < AZF_IO_SIZE_OPL3_PM / 2; ++reg) 2433 snd_azf3328_suspend_regs(chip->opl3_io,
2340 chip->saved_regs_opl3[reg] = inw(chip->opl3_io + reg * 2); 2434 ARRAY_SIZE(chip->saved_regs_opl3), chip->saved_regs_opl3);
2341 2435
2342 pci_disable_device(pci); 2436 pci_disable_device(pci);
2343 pci_save_state(pci); 2437 pci_save_state(pci);
@@ -2345,12 +2439,28 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
2345 return 0; 2439 return 0;
2346} 2440}
2347 2441
2442static inline void
2443snd_azf3328_resume_regs(const u32 *saved_regs,
2444 unsigned long io_addr,
2445 unsigned count
2446)
2447{
2448 unsigned reg;
2449
2450 for (reg = 0; reg < count; ++reg) {
2451 outl(*saved_regs, io_addr);
2452 snd_azf3328_dbgpm("resume: io 0x%04lx: 0x%08x --> 0x%08x\n",
2453 io_addr, *saved_regs, inl(io_addr));
2454 ++saved_regs;
2455 io_addr += sizeof(*saved_regs);
2456 }
2457}
2458
2348static int 2459static int
2349snd_azf3328_resume(struct pci_dev *pci) 2460snd_azf3328_resume(struct pci_dev *pci)
2350{ 2461{
2351 struct snd_card *card = pci_get_drvdata(pci); 2462 struct snd_card *card = pci_get_drvdata(pci);
2352 struct snd_azf3328 *chip = card->private_data; 2463 const struct snd_azf3328 *chip = card->private_data;
2353 unsigned reg;
2354 2464
2355 pci_set_power_state(pci, PCI_D0); 2465 pci_set_power_state(pci, PCI_D0);
2356 pci_restore_state(pci); 2466 pci_restore_state(pci);
@@ -2362,16 +2472,24 @@ snd_azf3328_resume(struct pci_dev *pci)
2362 } 2472 }
2363 pci_set_master(pci); 2473 pci_set_master(pci);
2364 2474
2365 for (reg = 0; reg < AZF_IO_SIZE_GAME_PM / 2; ++reg) 2475 snd_azf3328_resume_regs(chip->saved_regs_game, chip->game_io,
2366 outw(chip->saved_regs_game[reg], chip->game_io + reg * 2); 2476 ARRAY_SIZE(chip->saved_regs_game));
2367 for (reg = 0; reg < AZF_IO_SIZE_MPU_PM / 2; ++reg) 2477 snd_azf3328_resume_regs(chip->saved_regs_mpu, chip->mpu_io,
2368 outw(chip->saved_regs_mpu[reg], chip->mpu_io + reg * 2); 2478 ARRAY_SIZE(chip->saved_regs_mpu));
2369 for (reg = 0; reg < AZF_IO_SIZE_OPL3_PM / 2; ++reg) 2479 snd_azf3328_resume_regs(chip->saved_regs_opl3, chip->opl3_io,
2370 outw(chip->saved_regs_opl3[reg], chip->opl3_io + reg * 2); 2480 ARRAY_SIZE(chip->saved_regs_opl3));
2371 for (reg = 0; reg < AZF_IO_SIZE_MIXER_PM / 2; ++reg) 2481
2372 outw(chip->saved_regs_mixer[reg], chip->mixer_io + reg * 2); 2482 snd_azf3328_resume_regs(chip->saved_regs_mixer, chip->mixer_io,
2373 for (reg = 0; reg < AZF_IO_SIZE_CODEC_PM / 2; ++reg) 2483 ARRAY_SIZE(chip->saved_regs_mixer));
2374 outw(chip->saved_regs_codec[reg], chip->codec_io + reg * 2); 2484
2485 /* unfortunately with 32bit transfers, IDX_MIXER_PLAY_MASTER (0x02)
2486 and IDX_MIXER_RESET (offset 0x00) get touched at the same time,
2487 resulting in a mixer reset condition persisting until _after_
2488 master vol was restored. Thus master vol needs an extra restore. */
2489 outw(((u16 *)chip->saved_regs_mixer)[1], chip->mixer_io + 2);
2490
2491 snd_azf3328_resume_regs(chip->saved_regs_ctrl, chip->ctrl_io,
2492 ARRAY_SIZE(chip->saved_regs_ctrl));
2375 2493
2376 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 2494 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
2377 return 0; 2495 return 0;
diff --git a/sound/pci/azt3328.h b/sound/pci/azt3328.h
index 974e05122f00..6f46b97650cc 100644
--- a/sound/pci/azt3328.h
+++ b/sound/pci/azt3328.h
@@ -6,50 +6,59 @@
6 6
7/*** main I/O area port indices ***/ 7/*** main I/O area port indices ***/
8/* (only 0x70 of 0x80 bytes saved/restored by Windows driver) */ 8/* (only 0x70 of 0x80 bytes saved/restored by Windows driver) */
9#define AZF_IO_SIZE_CODEC 0x80 9#define AZF_IO_SIZE_CTRL 0x80
10#define AZF_IO_SIZE_CODEC_PM 0x70 10#define AZF_IO_SIZE_CTRL_PM 0x70
11 11
12/* the driver initialisation suggests a layout of 4 main areas: 12/* the driver initialisation suggests a layout of 4 areas
13 * from 0x00 (playback), from 0x20 (recording) and from 0x40 (maybe MPU401??). 13 * within the main card control I/O:
14 * from 0x00 (playback codec), from 0x20 (recording codec)
15 * and from 0x40 (most certainly I2S out codec).
14 * And another area from 0x60 to 0x6f (DirectX timer, IRQ management, 16 * And another area from 0x60 to 0x6f (DirectX timer, IRQ management,
15 * power management etc.???). */ 17 * power management etc.???). */
16 18
17/** playback area **/ 19#define AZF_IO_OFFS_CODEC_PLAYBACK 0x00
18#define IDX_IO_PLAY_FLAGS 0x00 /* PU:0x0000 */ 20#define AZF_IO_OFFS_CODEC_CAPTURE 0x20
21#define AZF_IO_OFFS_CODEC_I2S_OUT 0x40
22
23#define IDX_IO_CODEC_DMA_FLAGS 0x00 /* PU:0x0000 */
19 /* able to reactivate output after output muting due to 8/16bit 24 /* able to reactivate output after output muting due to 8/16bit
20 * output change, just like 0x0002. 25 * output change, just like 0x0002.
21 * 0x0001 is the only bit that's able to start the DMA counter */ 26 * 0x0001 is the only bit that's able to start the DMA counter */
22 #define DMA_RESUME 0x0001 /* paused if cleared ? */ 27 #define DMA_RESUME 0x0001 /* paused if cleared? */
23 /* 0x0002 *temporarily* set during DMA stopping. hmm 28 /* 0x0002 *temporarily* set during DMA stopping. hmm
24 * both 0x0002 and 0x0004 set in playback setup. */ 29 * both 0x0002 and 0x0004 set in playback setup. */
25 /* able to reactivate output after output muting due to 8/16bit 30 /* able to reactivate output after output muting due to 8/16bit
26 * output change, just like 0x0001. */ 31 * output change, just like 0x0001. */
27 #define DMA_PLAY_SOMETHING1 0x0002 /* \ alternated (toggled) */ 32 #define DMA_RUN_SOMETHING1 0x0002 /* \ alternated (toggled) */
28 /* 0x0004: NOT able to reactivate output */ 33 /* 0x0004: NOT able to reactivate output */
29 #define DMA_PLAY_SOMETHING2 0x0004 /* / bits */ 34 #define DMA_RUN_SOMETHING2 0x0004 /* / bits */
30 #define SOMETHING_ALMOST_ALWAYS_SET 0x0008 /* ???; can be modified */ 35 #define SOMETHING_ALMOST_ALWAYS_SET 0x0008 /* ???; can be modified */
31 #define DMA_EPILOGUE_SOMETHING 0x0010 36 #define DMA_EPILOGUE_SOMETHING 0x0010
32 #define DMA_SOMETHING_ELSE 0x0020 /* ??? */ 37 #define DMA_SOMETHING_ELSE 0x0020 /* ??? */
33 #define SOMETHING_UNMODIFIABLE 0xffc0 /* unused ? not modifiable */ 38 #define SOMETHING_UNMODIFIABLE 0xffc0 /* unused? not modifiable */
34#define IDX_IO_PLAY_IRQTYPE 0x02 /* PU:0x0001 */ 39#define IDX_IO_CODEC_IRQTYPE 0x02 /* PU:0x0001 */
35 /* write back to flags in case flags are set, in order to ACK IRQ in handler 40 /* write back to flags in case flags are set, in order to ACK IRQ in handler
36 * (bit 1 of port 0x64 indicates interrupt for one of these three types) 41 * (bit 1 of port 0x64 indicates interrupt for one of these three types)
37 * sometimes in this case it just writes 0xffff to globally ACK all IRQs 42 * sometimes in this case it just writes 0xffff to globally ACK all IRQs
38 * settings written are not reflected when reading back, though. 43 * settings written are not reflected when reading back, though.
39 * seems to be IRQ, too (frequently used: port |= 0x07 !), but who knows ? */ 44 * seems to be IRQ, too (frequently used: port |= 0x07 !), but who knows? */
40 #define IRQ_PLAY_SOMETHING 0x0001 /* something & ACK */ 45 #define IRQ_SOMETHING 0x0001 /* something & ACK */
41 #define IRQ_FINISHED_PLAYBUF_1 0x0002 /* 1st dmabuf finished & ACK */ 46 #define IRQ_FINISHED_DMABUF_1 0x0002 /* 1st dmabuf finished & ACK */
42 #define IRQ_FINISHED_PLAYBUF_2 0x0004 /* 2nd dmabuf finished & ACK */ 47 #define IRQ_FINISHED_DMABUF_2 0x0004 /* 2nd dmabuf finished & ACK */
43 #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */ 48 #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
44 #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */ 49 #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
45 #define IRQMASK_UNMODIFIABLE 0xffe0 /* unused ? not modifiable */ 50 #define IRQMASK_UNMODIFIABLE 0xffe0 /* unused? not modifiable */
46#define IDX_IO_PLAY_DMA_START_1 0x04 /* start address of 1st DMA play area, PU:0x00000000 */ 51 /* start address of 1st DMA transfer area, PU:0x00000000 */
47#define IDX_IO_PLAY_DMA_START_2 0x08 /* start address of 2nd DMA play area, PU:0x00000000 */ 52#define IDX_IO_CODEC_DMA_START_1 0x04
48#define IDX_IO_PLAY_DMA_LEN_1 0x0c /* length of 1st DMA play area, PU:0x0000 */ 53 /* start address of 2nd DMA transfer area, PU:0x00000000 */
49#define IDX_IO_PLAY_DMA_LEN_2 0x0e /* length of 2nd DMA play area, PU:0x0000 */ 54#define IDX_IO_CODEC_DMA_START_2 0x08
50#define IDX_IO_PLAY_DMA_CURRPOS 0x10 /* current DMA position, PU:0x00000000 */ 55 /* both lengths of DMA transfer areas, PU:0x00000000
51#define IDX_IO_PLAY_DMA_CURROFS 0x14 /* offset within current DMA play area, PU:0x0000 */ 56 length1: offset 0x0c, length2: offset 0x0e */
52#define IDX_IO_PLAY_SOUNDFORMAT 0x16 /* PU:0x0010 */ 57#define IDX_IO_CODEC_DMA_LENGTHS 0x0c
58#define IDX_IO_CODEC_DMA_CURRPOS 0x10 /* current DMA position, PU:0x00000000 */
59 /* offset within current DMA transfer area, PU:0x0000 */
60#define IDX_IO_CODEC_DMA_CURROFS 0x14
61#define IDX_IO_CODEC_SOUNDFORMAT 0x16 /* PU:0x0010 */
53 /* all unspecified bits can't be modified */ 62 /* all unspecified bits can't be modified */
54 #define SOUNDFORMAT_FREQUENCY_MASK 0x000f 63 #define SOUNDFORMAT_FREQUENCY_MASK 0x000f
55 #define SOUNDFORMAT_XTAL1 0x00 64 #define SOUNDFORMAT_XTAL1 0x00
@@ -76,6 +85,7 @@
76 #define SOUNDFORMAT_FLAG_16BIT 0x0010 85 #define SOUNDFORMAT_FLAG_16BIT 0x0010
77 #define SOUNDFORMAT_FLAG_2CHANNELS 0x0020 86 #define SOUNDFORMAT_FLAG_2CHANNELS 0x0020
78 87
88
79/* define frequency helpers, for maximum value safety */ 89/* define frequency helpers, for maximum value safety */
80enum azf_freq_t { 90enum azf_freq_t {
81#define AZF_FREQ(rate) AZF_FREQ_##rate = rate 91#define AZF_FREQ(rate) AZF_FREQ_##rate = rate
@@ -96,29 +106,6 @@ enum azf_freq_t {
96#undef AZF_FREQ 106#undef AZF_FREQ
97}; 107};
98 108
99/** recording area (see also: playback bit flag definitions) **/
100#define IDX_IO_REC_FLAGS 0x20 /* ??, PU:0x0000 */
101#define IDX_IO_REC_IRQTYPE 0x22 /* ??, PU:0x0000 */
102 #define IRQ_REC_SOMETHING 0x0001 /* something & ACK */
103 #define IRQ_FINISHED_RECBUF_1 0x0002 /* 1st dmabuf finished & ACK */
104 #define IRQ_FINISHED_RECBUF_2 0x0004 /* 2nd dmabuf finished & ACK */
105 /* hmm, maybe these are just the corresponding *recording* flags ?
106 * but OTOH they are most likely at port 0x22 instead */
107 #define IRQMASK_SOME_STATUS_1 0x0008 /* \ related bits */
108 #define IRQMASK_SOME_STATUS_2 0x0010 /* / (checked together in loop) */
109#define IDX_IO_REC_DMA_START_1 0x24 /* PU:0x00000000 */
110#define IDX_IO_REC_DMA_START_2 0x28 /* PU:0x00000000 */
111#define IDX_IO_REC_DMA_LEN_1 0x2c /* PU:0x0000 */
112#define IDX_IO_REC_DMA_LEN_2 0x2e /* PU:0x0000 */
113#define IDX_IO_REC_DMA_CURRPOS 0x30 /* PU:0x00000000 */
114#define IDX_IO_REC_DMA_CURROFS 0x34 /* PU:0x00000000 */
115#define IDX_IO_REC_SOUNDFORMAT 0x36 /* PU:0x0000 */
116
117/** hmm, what is this I/O area for? MPU401?? or external DAC via I2S?? (after playback, recording, ???, timer) **/
118#define IDX_IO_SOMETHING_FLAGS 0x40 /* gets set to 0x34 just like port 0x0 and 0x20 on card init, PU:0x0000 */
119/* general */
120#define IDX_IO_42H 0x42 /* PU:0x0001 */
121
122/** DirectX timer, main interrupt area (FIXME: and something else?) **/ 109/** DirectX timer, main interrupt area (FIXME: and something else?) **/
123#define IDX_IO_TIMER_VALUE 0x60 /* found this timer area by pure luck :-) */ 110#define IDX_IO_TIMER_VALUE 0x60 /* found this timer area by pure luck :-) */
124 /* timer countdown value; triggers IRQ when timer is finished */ 111 /* timer countdown value; triggers IRQ when timer is finished */
@@ -133,17 +120,19 @@ enum azf_freq_t {
133#define IDX_IO_IRQSTATUS 0x64 120#define IDX_IO_IRQSTATUS 0x64
134 /* some IRQ bit in here might also be used to signal a power-management timer 121 /* some IRQ bit in here might also be used to signal a power-management timer
135 * timeout, to request shutdown of the chip (e.g. AD1815JS has such a thing). 122 * timeout, to request shutdown of the chip (e.g. AD1815JS has such a thing).
136 * Some OPL3 hardware (e.g. in LM4560) has some special timer hardware which 123 * OPL3 hardware contains several timers which confusingly in most cases
137 * can trigger an OPL3 timer IRQ, so maybe there's such a thing as well... */ 124 * are NOT routed to an IRQ, but some designs (e.g. LM4560) DO support that,
125 * so I wouldn't be surprised at all to discover that AZF3328
126 * supports that thing as well... */
138 127
139 #define IRQ_PLAYBACK 0x0001 128 #define IRQ_PLAYBACK 0x0001
140 #define IRQ_RECORDING 0x0002 129 #define IRQ_RECORDING 0x0002
141 #define IRQ_UNKNOWN1 0x0004 /* most probably I2S port */ 130 #define IRQ_I2S_OUT 0x0004 /* this IS I2S, right!? (untested) */
142 #define IRQ_GAMEPORT 0x0008 /* Interrupt of Digital(ly) Enhanced Game Port */ 131 #define IRQ_GAMEPORT 0x0008 /* Interrupt of Digital(ly) Enhanced Game Port */
143 #define IRQ_MPU401 0x0010 132 #define IRQ_MPU401 0x0010
144 #define IRQ_TIMER 0x0020 /* DirectX timer */ 133 #define IRQ_TIMER 0x0020 /* DirectX timer */
145 #define IRQ_UNKNOWN2 0x0040 /* probably unused, or possibly I2S port? */ 134 #define IRQ_UNKNOWN2 0x0040 /* probably unused, or possibly OPL3 timer? */
146 #define IRQ_UNKNOWN3 0x0080 /* probably unused, or possibly I2S port? */ 135 #define IRQ_UNKNOWN3 0x0080 /* probably unused, or possibly OPL3 timer? */
147#define IDX_IO_66H 0x66 /* writing 0xffff returns 0x0000 */ 136#define IDX_IO_66H 0x66 /* writing 0xffff returns 0x0000 */
148 /* this is set to e.g. 0x3ff or 0x300, and writable; 137 /* this is set to e.g. 0x3ff or 0x300, and writable;
149 * maybe some buffer limit, but I couldn't find out more, PU:0x00ff: */ 138 * maybe some buffer limit, but I couldn't find out more, PU:0x00ff: */
@@ -206,7 +195,7 @@ enum azf_freq_t {
206/*** Gameport area port indices ***/ 195/*** Gameport area port indices ***/
207/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */ 196/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */
208#define AZF_IO_SIZE_GAME 0x08 197#define AZF_IO_SIZE_GAME 0x08
209#define AZF_IO_SIZE_GAME_PM 0x06 198#define AZF_IO_SIZE_GAME_PM 0x06
210 199
211enum { 200enum {
212 AZF_GAME_LEGACY_IO_PORT = 0x200 201 AZF_GAME_LEGACY_IO_PORT = 0x200
@@ -272,6 +261,12 @@ enum {
272 * 11 --> 1/200: */ 261 * 11 --> 1/200: */
273 #define GAME_HWCFG_ADC_COUNTER_FREQ_MASK 0x06 262 #define GAME_HWCFG_ADC_COUNTER_FREQ_MASK 0x06
274 263
264 /* FIXME: these values might be reversed... */
265 #define GAME_HWCFG_ADC_COUNTER_FREQ_STD 0
266 #define GAME_HWCFG_ADC_COUNTER_FREQ_1_2 1
267 #define GAME_HWCFG_ADC_COUNTER_FREQ_1_20 2
268 #define GAME_HWCFG_ADC_COUNTER_FREQ_1_200 3
269
275 /* enable gameport legacy I/O address (0x200) 270 /* enable gameport legacy I/O address (0x200)
276 * I was unable to locate any configurability for a different address: */ 271 * I was unable to locate any configurability for a different address: */
277 #define GAME_HWCFG_LEGACY_ADDRESS_ENABLE 0x08 272 #define GAME_HWCFG_LEGACY_ADDRESS_ENABLE 0x08
@@ -281,6 +276,7 @@ enum {
281#define AZF_IO_SIZE_MPU_PM 0x04 276#define AZF_IO_SIZE_MPU_PM 0x04
282 277
283/*** OPL3 synth ***/ 278/*** OPL3 synth ***/
279/* (only 0x06 of 0x08 bytes saved/restored by Windows driver) */
284#define AZF_IO_SIZE_OPL3 0x08 280#define AZF_IO_SIZE_OPL3 0x08
285#define AZF_IO_SIZE_OPL3_PM 0x06 281#define AZF_IO_SIZE_OPL3_PM 0x06
286/* hmm, given that a standard OPL3 has 4 registers only, 282/* hmm, given that a standard OPL3 has 4 registers only,
@@ -340,4 +336,7 @@ enum {
340#define SET_CHAN_LEFT 1 336#define SET_CHAN_LEFT 1
341#define SET_CHAN_RIGHT 2 337#define SET_CHAN_RIGHT 2
342 338
339/* helper macro to align I/O port ranges to 32bit I/O width */
340#define AZF_ALIGN(x) (((x) + 3) & (~3))
341
343#endif /* __SOUND_AZT3328_H */ 342#endif /* __SOUND_AZT3328_H */
diff --git a/sound/pci/cs46xx/cs46xx_lib.h b/sound/pci/cs46xx/cs46xx_lib.h
index 4eb55aa33612..b5189495d58a 100644
--- a/sound/pci/cs46xx/cs46xx_lib.h
+++ b/sound/pci/cs46xx/cs46xx_lib.h
@@ -35,7 +35,7 @@
35 35
36 36
37#ifdef CONFIG_SND_CS46XX_NEW_DSP 37#ifdef CONFIG_SND_CS46XX_NEW_DSP
38#define CS46XX_MIN_PERIOD_SIZE 1 38#define CS46XX_MIN_PERIOD_SIZE 64
39#define CS46XX_MAX_PERIOD_SIZE 1024*1024 39#define CS46XX_MAX_PERIOD_SIZE 1024*1024
40#else 40#else
41#define CS46XX_MIN_PERIOD_SIZE 2048 41#define CS46XX_MIN_PERIOD_SIZE 2048
diff --git a/sound/pci/ctxfi/ct20k2reg.h b/sound/pci/ctxfi/ct20k2reg.h
index 2d07986f57cc..e0394e3996e8 100644
--- a/sound/pci/ctxfi/ct20k2reg.h
+++ b/sound/pci/ctxfi/ct20k2reg.h
@@ -11,9 +11,12 @@
11 11
12 12
13/* Timer Registers */ 13/* Timer Registers */
14#define TIMER_TIMR 0x1B7004 14#define WC 0x1b7000
15#define INTERRUPT_GIP 0x1B7010 15#define TIMR 0x1b7004
16#define INTERRUPT_GIE 0x1B7014 16# define TIMR_IE (1<<15)
17# define TIMR_IP (1<<14)
18#define GIP 0x1b7010
19#define GIE 0x1b7014
17 20
18/* I2C Registers */ 21/* I2C Registers */
19#define I2C_IF_ADDRESS 0x1B9000 22#define I2C_IF_ADDRESS 0x1B9000
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
index a7f4a671f7b7..fee35cfc0c7f 100644
--- a/sound/pci/ctxfi/ctamixer.c
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -63,7 +63,7 @@ static int amixer_set_input(struct amixer *amixer, struct rsc *rsc)
63 hw = amixer->rsc.hw; 63 hw = amixer->rsc.hw;
64 hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE); 64 hw->amixer_set_mode(amixer->rsc.ctrl_blk, AMIXER_Y_IMMEDIATE);
65 amixer->input = rsc; 65 amixer->input = rsc;
66 if (NULL == rsc) 66 if (!rsc)
67 hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT); 67 hw->amixer_set_x(amixer->rsc.ctrl_blk, BLANK_SLOT);
68 else 68 else
69 hw->amixer_set_x(amixer->rsc.ctrl_blk, 69 hw->amixer_set_x(amixer->rsc.ctrl_blk,
@@ -99,7 +99,7 @@ static int amixer_set_sum(struct amixer *amixer, struct sum *sum)
99 99
100 hw = amixer->rsc.hw; 100 hw = amixer->rsc.hw;
101 amixer->sum = sum; 101 amixer->sum = sum;
102 if (NULL == sum) { 102 if (!sum) {
103 hw->amixer_set_se(amixer->rsc.ctrl_blk, 0); 103 hw->amixer_set_se(amixer->rsc.ctrl_blk, 0);
104 } else { 104 } else {
105 hw->amixer_set_se(amixer->rsc.ctrl_blk, 1); 105 hw->amixer_set_se(amixer->rsc.ctrl_blk, 1);
@@ -124,20 +124,20 @@ static int amixer_commit_write(struct amixer *amixer)
124 124
125 /* Program master and conjugate resources */ 125 /* Program master and conjugate resources */
126 amixer->rsc.ops->master(&amixer->rsc); 126 amixer->rsc.ops->master(&amixer->rsc);
127 if (NULL != input) 127 if (input)
128 input->ops->master(input); 128 input->ops->master(input);
129 129
130 if (NULL != sum) 130 if (sum)
131 sum->rsc.ops->master(&sum->rsc); 131 sum->rsc.ops->master(&sum->rsc);
132 132
133 for (i = 0; i < amixer->rsc.msr; i++) { 133 for (i = 0; i < amixer->rsc.msr; i++) {
134 hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk); 134 hw->amixer_set_dirty_all(amixer->rsc.ctrl_blk);
135 if (NULL != input) { 135 if (input) {
136 hw->amixer_set_x(amixer->rsc.ctrl_blk, 136 hw->amixer_set_x(amixer->rsc.ctrl_blk,
137 input->ops->output_slot(input)); 137 input->ops->output_slot(input));
138 input->ops->next_conj(input); 138 input->ops->next_conj(input);
139 } 139 }
140 if (NULL != sum) { 140 if (sum) {
141 hw->amixer_set_sadr(amixer->rsc.ctrl_blk, 141 hw->amixer_set_sadr(amixer->rsc.ctrl_blk,
142 sum->rsc.ops->index(&sum->rsc)); 142 sum->rsc.ops->index(&sum->rsc));
143 sum->rsc.ops->next_conj(&sum->rsc); 143 sum->rsc.ops->next_conj(&sum->rsc);
@@ -147,10 +147,10 @@ static int amixer_commit_write(struct amixer *amixer)
147 amixer->rsc.ops->next_conj(&amixer->rsc); 147 amixer->rsc.ops->next_conj(&amixer->rsc);
148 } 148 }
149 amixer->rsc.ops->master(&amixer->rsc); 149 amixer->rsc.ops->master(&amixer->rsc);
150 if (NULL != input) 150 if (input)
151 input->ops->master(input); 151 input->ops->master(input);
152 152
153 if (NULL != sum) 153 if (sum)
154 sum->rsc.ops->master(&sum->rsc); 154 sum->rsc.ops->master(&sum->rsc);
155 155
156 return 0; 156 return 0;
@@ -303,7 +303,7 @@ int amixer_mgr_create(void *hw, struct amixer_mgr **ramixer_mgr)
303 303
304 *ramixer_mgr = NULL; 304 *ramixer_mgr = NULL;
305 amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL); 305 amixer_mgr = kzalloc(sizeof(*amixer_mgr), GFP_KERNEL);
306 if (NULL == amixer_mgr) 306 if (!amixer_mgr)
307 return -ENOMEM; 307 return -ENOMEM;
308 308
309 err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw); 309 err = rsc_mgr_init(&amixer_mgr->mgr, AMIXER, AMIXER_RESOURCE_NUM, hw);
@@ -456,7 +456,7 @@ int sum_mgr_create(void *hw, struct sum_mgr **rsum_mgr)
456 456
457 *rsum_mgr = NULL; 457 *rsum_mgr = NULL;
458 sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL); 458 sum_mgr = kzalloc(sizeof(*sum_mgr), GFP_KERNEL);
459 if (NULL == sum_mgr) 459 if (!sum_mgr)
460 return -ENOMEM; 460 return -ENOMEM;
461 461
462 err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw); 462 err = rsc_mgr_init(&sum_mgr->mgr, SUM, SUM_RESOURCE_NUM, hw);
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index a49c76647307..b1b3a644f738 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -136,7 +136,7 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
136 struct snd_pcm_runtime *runtime; 136 struct snd_pcm_runtime *runtime;
137 struct ct_vm *vm; 137 struct ct_vm *vm;
138 138
139 if (NULL == apcm->substream) 139 if (!apcm->substream)
140 return 0; 140 return 0;
141 141
142 runtime = apcm->substream->runtime; 142 runtime = apcm->substream->runtime;
@@ -144,7 +144,7 @@ static int ct_map_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
144 144
145 apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes); 145 apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes);
146 146
147 if (NULL == apcm->vm_block) 147 if (!apcm->vm_block)
148 return -ENOENT; 148 return -ENOENT;
149 149
150 return 0; 150 return 0;
@@ -154,7 +154,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
154{ 154{
155 struct ct_vm *vm; 155 struct ct_vm *vm;
156 156
157 if (NULL == apcm->vm_block) 157 if (!apcm->vm_block)
158 return; 158 return;
159 159
160 vm = atc->vm; 160 vm = atc->vm;
@@ -231,16 +231,16 @@ atc_get_pitch(unsigned int input_rate, unsigned int output_rate)
231 231
232static int select_rom(unsigned int pitch) 232static int select_rom(unsigned int pitch)
233{ 233{
234 if ((pitch > 0x00428f5c) && (pitch < 0x01b851ec)) { 234 if (pitch > 0x00428f5c && pitch < 0x01b851ec) {
235 /* 0.26 <= pitch <= 1.72 */ 235 /* 0.26 <= pitch <= 1.72 */
236 return 1; 236 return 1;
237 } else if ((0x01d66666 == pitch) || (0x01d66667 == pitch)) { 237 } else if (pitch == 0x01d66666 || pitch == 0x01d66667) {
238 /* pitch == 1.8375 */ 238 /* pitch == 1.8375 */
239 return 2; 239 return 2;
240 } else if (0x02000000 == pitch) { 240 } else if (pitch == 0x02000000) {
241 /* pitch == 2 */ 241 /* pitch == 2 */
242 return 3; 242 return 3;
243 } else if ((pitch >= 0x0) && (pitch <= 0x08000000)) { 243 } else if (pitch >= 0x0 && pitch <= 0x08000000) {
244 /* 0 <= pitch <= 8 */ 244 /* 0 <= pitch <= 8 */
245 return 0; 245 return 0;
246 } else { 246 } else {
@@ -283,7 +283,7 @@ static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
283 /* Get AMIXER resource */ 283 /* Get AMIXER resource */
284 n_amixer = (n_amixer < 2) ? 2 : n_amixer; 284 n_amixer = (n_amixer < 2) ? 2 : n_amixer;
285 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL); 285 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
286 if (NULL == apcm->amixers) { 286 if (!apcm->amixers) {
287 err = -ENOMEM; 287 err = -ENOMEM;
288 goto error1; 288 goto error1;
289 } 289 }
@@ -311,7 +311,7 @@ static int atc_pcm_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
311 INIT_VOL, atc->pcm[i+device*2]); 311 INIT_VOL, atc->pcm[i+device*2]);
312 mutex_unlock(&atc->atc_mutex); 312 mutex_unlock(&atc->atc_mutex);
313 src = src->ops->next_interleave(src); 313 src = src->ops->next_interleave(src);
314 if (NULL == src) 314 if (!src)
315 src = apcm->src; 315 src = apcm->src;
316 } 316 }
317 317
@@ -334,7 +334,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
334 struct srcimp *srcimp; 334 struct srcimp *srcimp;
335 int i; 335 int i;
336 336
337 if (NULL != apcm->srcimps) { 337 if (apcm->srcimps) {
338 for (i = 0; i < apcm->n_srcimp; i++) { 338 for (i = 0; i < apcm->n_srcimp; i++) {
339 srcimp = apcm->srcimps[i]; 339 srcimp = apcm->srcimps[i];
340 srcimp->ops->unmap(srcimp); 340 srcimp->ops->unmap(srcimp);
@@ -345,7 +345,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
345 apcm->srcimps = NULL; 345 apcm->srcimps = NULL;
346 } 346 }
347 347
348 if (NULL != apcm->srccs) { 348 if (apcm->srccs) {
349 for (i = 0; i < apcm->n_srcc; i++) { 349 for (i = 0; i < apcm->n_srcc; i++) {
350 src_mgr->put_src(src_mgr, apcm->srccs[i]); 350 src_mgr->put_src(src_mgr, apcm->srccs[i]);
351 apcm->srccs[i] = NULL; 351 apcm->srccs[i] = NULL;
@@ -354,7 +354,7 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
354 apcm->srccs = NULL; 354 apcm->srccs = NULL;
355 } 355 }
356 356
357 if (NULL != apcm->amixers) { 357 if (apcm->amixers) {
358 for (i = 0; i < apcm->n_amixer; i++) { 358 for (i = 0; i < apcm->n_amixer; i++) {
359 amixer_mgr->put_amixer(amixer_mgr, apcm->amixers[i]); 359 amixer_mgr->put_amixer(amixer_mgr, apcm->amixers[i]);
360 apcm->amixers[i] = NULL; 360 apcm->amixers[i] = NULL;
@@ -363,17 +363,17 @@ atc_pcm_release_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
363 apcm->amixers = NULL; 363 apcm->amixers = NULL;
364 } 364 }
365 365
366 if (NULL != apcm->mono) { 366 if (apcm->mono) {
367 sum_mgr->put_sum(sum_mgr, apcm->mono); 367 sum_mgr->put_sum(sum_mgr, apcm->mono);
368 apcm->mono = NULL; 368 apcm->mono = NULL;
369 } 369 }
370 370
371 if (NULL != apcm->src) { 371 if (apcm->src) {
372 src_mgr->put_src(src_mgr, apcm->src); 372 src_mgr->put_src(src_mgr, apcm->src);
373 apcm->src = NULL; 373 apcm->src = NULL;
374 } 374 }
375 375
376 if (NULL != apcm->vm_block) { 376 if (apcm->vm_block) {
377 /* Undo device virtual mem map */ 377 /* Undo device virtual mem map */
378 ct_unmap_audio_buffer(atc, apcm); 378 ct_unmap_audio_buffer(atc, apcm);
379 apcm->vm_block = NULL; 379 apcm->vm_block = NULL;
@@ -419,7 +419,7 @@ static int atc_pcm_stop(struct ct_atc *atc, struct ct_atc_pcm *apcm)
419 src->ops->set_state(src, SRC_STATE_OFF); 419 src->ops->set_state(src, SRC_STATE_OFF);
420 src->ops->commit_write(src); 420 src->ops->commit_write(src);
421 421
422 if (NULL != apcm->srccs) { 422 if (apcm->srccs) {
423 for (i = 0; i < apcm->n_srcc; i++) { 423 for (i = 0; i < apcm->n_srcc; i++) {
424 src = apcm->srccs[i]; 424 src = apcm->srccs[i];
425 src->ops->set_bm(src, 0); 425 src->ops->set_bm(src, 0);
@@ -544,18 +544,18 @@ atc_pcm_capture_get_resources(struct ct_atc *atc, struct ct_atc_pcm *apcm)
544 544
545 if (n_srcc) { 545 if (n_srcc) {
546 apcm->srccs = kzalloc(sizeof(void *)*n_srcc, GFP_KERNEL); 546 apcm->srccs = kzalloc(sizeof(void *)*n_srcc, GFP_KERNEL);
547 if (NULL == apcm->srccs) 547 if (!apcm->srccs)
548 return -ENOMEM; 548 return -ENOMEM;
549 } 549 }
550 if (n_amixer) { 550 if (n_amixer) {
551 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL); 551 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
552 if (NULL == apcm->amixers) { 552 if (!apcm->amixers) {
553 err = -ENOMEM; 553 err = -ENOMEM;
554 goto error1; 554 goto error1;
555 } 555 }
556 } 556 }
557 apcm->srcimps = kzalloc(sizeof(void *)*n_srcimp, GFP_KERNEL); 557 apcm->srcimps = kzalloc(sizeof(void *)*n_srcimp, GFP_KERNEL);
558 if (NULL == apcm->srcimps) { 558 if (!apcm->srcimps) {
559 err = -ENOMEM; 559 err = -ENOMEM;
560 goto error1; 560 goto error1;
561 } 561 }
@@ -818,7 +818,7 @@ static int spdif_passthru_playback_get_resources(struct ct_atc *atc,
818 /* Get AMIXER resource */ 818 /* Get AMIXER resource */
819 n_amixer = (n_amixer < 2) ? 2 : n_amixer; 819 n_amixer = (n_amixer < 2) ? 2 : n_amixer;
820 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL); 820 apcm->amixers = kzalloc(sizeof(void *)*n_amixer, GFP_KERNEL);
821 if (NULL == apcm->amixers) { 821 if (!apcm->amixers) {
822 err = -ENOMEM; 822 err = -ENOMEM;
823 goto error1; 823 goto error1;
824 } 824 }
@@ -919,7 +919,7 @@ spdif_passthru_playback_prepare(struct ct_atc *atc, struct ct_atc_pcm *apcm)
919 amixer = apcm->amixers[i]; 919 amixer = apcm->amixers[i];
920 amixer->ops->setup(amixer, &src->rsc, INIT_VOL, NULL); 920 amixer->ops->setup(amixer, &src->rsc, INIT_VOL, NULL);
921 src = src->ops->next_interleave(src); 921 src = src->ops->next_interleave(src);
922 if (NULL == src) 922 if (!src)
923 src = apcm->src; 923 src = apcm->src;
924 } 924 }
925 /* Connect to SPDIFOO */ 925 /* Connect to SPDIFOO */
@@ -1121,7 +1121,7 @@ static int atc_release_resources(struct ct_atc *atc)
1121 struct ct_mixer *mixer = NULL; 1121 struct ct_mixer *mixer = NULL;
1122 1122
1123 /* disconnect internal mixer objects */ 1123 /* disconnect internal mixer objects */
1124 if (NULL != atc->mixer) { 1124 if (atc->mixer) {
1125 mixer = atc->mixer; 1125 mixer = atc->mixer;
1126 mixer->set_input_left(mixer, MIX_LINE_IN, NULL); 1126 mixer->set_input_left(mixer, MIX_LINE_IN, NULL);
1127 mixer->set_input_right(mixer, MIX_LINE_IN, NULL); 1127 mixer->set_input_right(mixer, MIX_LINE_IN, NULL);
@@ -1131,7 +1131,7 @@ static int atc_release_resources(struct ct_atc *atc)
1131 mixer->set_input_right(mixer, MIX_SPDIF_IN, NULL); 1131 mixer->set_input_right(mixer, MIX_SPDIF_IN, NULL);
1132 } 1132 }
1133 1133
1134 if (NULL != atc->daios) { 1134 if (atc->daios) {
1135 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO]; 1135 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
1136 for (i = 0; i < atc->n_daio; i++) { 1136 for (i = 0; i < atc->n_daio; i++) {
1137 daio = atc->daios[i]; 1137 daio = atc->daios[i];
@@ -1149,7 +1149,7 @@ static int atc_release_resources(struct ct_atc *atc)
1149 atc->daios = NULL; 1149 atc->daios = NULL;
1150 } 1150 }
1151 1151
1152 if (NULL != atc->pcm) { 1152 if (atc->pcm) {
1153 sum_mgr = atc->rsc_mgrs[SUM]; 1153 sum_mgr = atc->rsc_mgrs[SUM];
1154 for (i = 0; i < atc->n_pcm; i++) 1154 for (i = 0; i < atc->n_pcm; i++)
1155 sum_mgr->put_sum(sum_mgr, atc->pcm[i]); 1155 sum_mgr->put_sum(sum_mgr, atc->pcm[i]);
@@ -1158,7 +1158,7 @@ static int atc_release_resources(struct ct_atc *atc)
1158 atc->pcm = NULL; 1158 atc->pcm = NULL;
1159 } 1159 }
1160 1160
1161 if (NULL != atc->srcs) { 1161 if (atc->srcs) {
1162 src_mgr = atc->rsc_mgrs[SRC]; 1162 src_mgr = atc->rsc_mgrs[SRC];
1163 for (i = 0; i < atc->n_src; i++) 1163 for (i = 0; i < atc->n_src; i++)
1164 src_mgr->put_src(src_mgr, atc->srcs[i]); 1164 src_mgr->put_src(src_mgr, atc->srcs[i]);
@@ -1167,7 +1167,7 @@ static int atc_release_resources(struct ct_atc *atc)
1167 atc->srcs = NULL; 1167 atc->srcs = NULL;
1168 } 1168 }
1169 1169
1170 if (NULL != atc->srcimps) { 1170 if (atc->srcimps) {
1171 srcimp_mgr = atc->rsc_mgrs[SRCIMP]; 1171 srcimp_mgr = atc->rsc_mgrs[SRCIMP];
1172 for (i = 0; i < atc->n_srcimp; i++) { 1172 for (i = 0; i < atc->n_srcimp; i++) {
1173 srcimp = atc->srcimps[i]; 1173 srcimp = atc->srcimps[i];
@@ -1185,7 +1185,7 @@ static int ct_atc_destroy(struct ct_atc *atc)
1185{ 1185{
1186 int i = 0; 1186 int i = 0;
1187 1187
1188 if (NULL == atc) 1188 if (!atc)
1189 return 0; 1189 return 0;
1190 1190
1191 if (atc->timer) { 1191 if (atc->timer) {
@@ -1196,21 +1196,20 @@ static int ct_atc_destroy(struct ct_atc *atc)
1196 atc_release_resources(atc); 1196 atc_release_resources(atc);
1197 1197
1198 /* Destroy internal mixer objects */ 1198 /* Destroy internal mixer objects */
1199 if (NULL != atc->mixer) 1199 if (atc->mixer)
1200 ct_mixer_destroy(atc->mixer); 1200 ct_mixer_destroy(atc->mixer);
1201 1201
1202 for (i = 0; i < NUM_RSCTYP; i++) { 1202 for (i = 0; i < NUM_RSCTYP; i++) {
1203 if ((NULL != rsc_mgr_funcs[i].destroy) && 1203 if (rsc_mgr_funcs[i].destroy && atc->rsc_mgrs[i])
1204 (NULL != atc->rsc_mgrs[i]))
1205 rsc_mgr_funcs[i].destroy(atc->rsc_mgrs[i]); 1204 rsc_mgr_funcs[i].destroy(atc->rsc_mgrs[i]);
1206 1205
1207 } 1206 }
1208 1207
1209 if (NULL != atc->hw) 1208 if (atc->hw)
1210 destroy_hw_obj((struct hw *)atc->hw); 1209 destroy_hw_obj((struct hw *)atc->hw);
1211 1210
1212 /* Destroy device virtual memory manager object */ 1211 /* Destroy device virtual memory manager object */
1213 if (NULL != atc->vm) { 1212 if (atc->vm) {
1214 ct_vm_destroy(atc->vm); 1213 ct_vm_destroy(atc->vm);
1215 atc->vm = NULL; 1214 atc->vm = NULL;
1216 } 1215 }
@@ -1275,7 +1274,7 @@ int __devinit ct_atc_create_alsa_devs(struct ct_atc *atc)
1275 alsa_dev_funcs[MIXER].public_name = atc->chip_name; 1274 alsa_dev_funcs[MIXER].public_name = atc->chip_name;
1276 1275
1277 for (i = 0; i < NUM_CTALSADEVS; i++) { 1276 for (i = 0; i < NUM_CTALSADEVS; i++) {
1278 if (NULL == alsa_dev_funcs[i].create) 1277 if (!alsa_dev_funcs[i].create)
1279 continue; 1278 continue;
1280 1279
1281 err = alsa_dev_funcs[i].create(atc, i, 1280 err = alsa_dev_funcs[i].create(atc, i,
@@ -1312,7 +1311,7 @@ static int __devinit atc_create_hw_devs(struct ct_atc *atc)
1312 return err; 1311 return err;
1313 1312
1314 for (i = 0; i < NUM_RSCTYP; i++) { 1313 for (i = 0; i < NUM_RSCTYP; i++) {
1315 if (NULL == rsc_mgr_funcs[i].create) 1314 if (!rsc_mgr_funcs[i].create)
1316 continue; 1315 continue;
1317 1316
1318 err = rsc_mgr_funcs[i].create(atc->hw, &atc->rsc_mgrs[i]); 1317 err = rsc_mgr_funcs[i].create(atc->hw, &atc->rsc_mgrs[i]);
@@ -1339,19 +1338,19 @@ static int atc_get_resources(struct ct_atc *atc)
1339 int err, i; 1338 int err, i;
1340 1339
1341 atc->daios = kzalloc(sizeof(void *)*(DAIONUM), GFP_KERNEL); 1340 atc->daios = kzalloc(sizeof(void *)*(DAIONUM), GFP_KERNEL);
1342 if (NULL == atc->daios) 1341 if (!atc->daios)
1343 return -ENOMEM; 1342 return -ENOMEM;
1344 1343
1345 atc->srcs = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL); 1344 atc->srcs = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
1346 if (NULL == atc->srcs) 1345 if (!atc->srcs)
1347 return -ENOMEM; 1346 return -ENOMEM;
1348 1347
1349 atc->srcimps = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL); 1348 atc->srcimps = kzalloc(sizeof(void *)*(2*2), GFP_KERNEL);
1350 if (NULL == atc->srcimps) 1349 if (!atc->srcimps)
1351 return -ENOMEM; 1350 return -ENOMEM;
1352 1351
1353 atc->pcm = kzalloc(sizeof(void *)*(2*4), GFP_KERNEL); 1352 atc->pcm = kzalloc(sizeof(void *)*(2*4), GFP_KERNEL);
1354 if (NULL == atc->pcm) 1353 if (!atc->pcm)
1355 return -ENOMEM; 1354 return -ENOMEM;
1356 1355
1357 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO]; 1356 daio_mgr = (struct daio_mgr *)atc->rsc_mgrs[DAIO];
@@ -1648,7 +1647,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
1648 *ratc = NULL; 1647 *ratc = NULL;
1649 1648
1650 atc = kzalloc(sizeof(*atc), GFP_KERNEL); 1649 atc = kzalloc(sizeof(*atc), GFP_KERNEL);
1651 if (NULL == atc) 1650 if (!atc)
1652 return -ENOMEM; 1651 return -ENOMEM;
1653 1652
1654 /* Set operations */ 1653 /* Set operations */
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index deb6cfa73600..af56eb949bde 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -173,7 +173,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input)
173 int i; 173 int i;
174 174
175 entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL); 175 entry = kzalloc((sizeof(*entry) * daio->rscl.msr), GFP_KERNEL);
176 if (NULL == entry) 176 if (!entry)
177 return -ENOMEM; 177 return -ENOMEM;
178 178
179 /* Program master and conjugate resources */ 179 /* Program master and conjugate resources */
@@ -201,7 +201,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input)
201 int i; 201 int i;
202 202
203 entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL); 203 entry = kzalloc((sizeof(*entry) * daio->rscr.msr), GFP_KERNEL);
204 if (NULL == entry) 204 if (!entry)
205 return -ENOMEM; 205 return -ENOMEM;
206 206
207 /* Program master and conjugate resources */ 207 /* Program master and conjugate resources */
@@ -228,7 +228,7 @@ static int dao_clear_left_input(struct dao *dao)
228 struct daio *daio = &dao->daio; 228 struct daio *daio = &dao->daio;
229 int i; 229 int i;
230 230
231 if (NULL == dao->imappers[0]) 231 if (!dao->imappers[0])
232 return 0; 232 return 0;
233 233
234 entry = dao->imappers[0]; 234 entry = dao->imappers[0];
@@ -252,7 +252,7 @@ static int dao_clear_right_input(struct dao *dao)
252 struct daio *daio = &dao->daio; 252 struct daio *daio = &dao->daio;
253 int i; 253 int i;
254 254
255 if (NULL == dao->imappers[daio->rscl.msr]) 255 if (!dao->imappers[daio->rscl.msr])
256 return 0; 256 return 0;
257 257
258 entry = dao->imappers[daio->rscl.msr]; 258 entry = dao->imappers[daio->rscl.msr];
@@ -408,7 +408,7 @@ static int dao_rsc_init(struct dao *dao,
408 return err; 408 return err;
409 409
410 dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL); 410 dao->imappers = kzalloc(sizeof(void *)*desc->msr*2, GFP_KERNEL);
411 if (NULL == dao->imappers) { 411 if (!dao->imappers) {
412 err = -ENOMEM; 412 err = -ENOMEM;
413 goto error1; 413 goto error1;
414 } 414 }
@@ -442,11 +442,11 @@ error1:
442 442
443static int dao_rsc_uninit(struct dao *dao) 443static int dao_rsc_uninit(struct dao *dao)
444{ 444{
445 if (NULL != dao->imappers) { 445 if (dao->imappers) {
446 if (NULL != dao->imappers[0]) 446 if (dao->imappers[0])
447 dao_clear_left_input(dao); 447 dao_clear_left_input(dao);
448 448
449 if (NULL != dao->imappers[dao->daio.rscl.msr]) 449 if (dao->imappers[dao->daio.rscl.msr])
450 dao_clear_right_input(dao); 450 dao_clear_right_input(dao);
451 451
452 kfree(dao->imappers); 452 kfree(dao->imappers);
@@ -555,7 +555,7 @@ static int get_daio_rsc(struct daio_mgr *mgr,
555 /* Allocate mem for daio resource */ 555 /* Allocate mem for daio resource */
556 if (desc->type <= DAIO_OUT_MAX) { 556 if (desc->type <= DAIO_OUT_MAX) {
557 dao = kzalloc(sizeof(*dao), GFP_KERNEL); 557 dao = kzalloc(sizeof(*dao), GFP_KERNEL);
558 if (NULL == dao) { 558 if (!dao) {
559 err = -ENOMEM; 559 err = -ENOMEM;
560 goto error; 560 goto error;
561 } 561 }
@@ -566,7 +566,7 @@ static int get_daio_rsc(struct daio_mgr *mgr,
566 *rdaio = &dao->daio; 566 *rdaio = &dao->daio;
567 } else { 567 } else {
568 dai = kzalloc(sizeof(*dai), GFP_KERNEL); 568 dai = kzalloc(sizeof(*dai), GFP_KERNEL);
569 if (NULL == dai) { 569 if (!dai) {
570 err = -ENOMEM; 570 err = -ENOMEM;
571 goto error; 571 goto error;
572 } 572 }
@@ -583,9 +583,9 @@ static int get_daio_rsc(struct daio_mgr *mgr,
583 return 0; 583 return 0;
584 584
585error: 585error:
586 if (NULL != dao) 586 if (dao)
587 kfree(dao); 587 kfree(dao);
588 else if (NULL != dai) 588 else if (dai)
589 kfree(dai); 589 kfree(dai);
590 590
591 spin_lock_irqsave(&mgr->mgr_lock, flags); 591 spin_lock_irqsave(&mgr->mgr_lock, flags);
@@ -663,7 +663,7 @@ static int daio_imap_add(struct daio_mgr *mgr, struct imapper *entry)
663 int err; 663 int err;
664 664
665 spin_lock_irqsave(&mgr->imap_lock, flags); 665 spin_lock_irqsave(&mgr->imap_lock, flags);
666 if ((0 == entry->addr) && (mgr->init_imap_added)) { 666 if (!entry->addr && mgr->init_imap_added) {
667 input_mapper_delete(&mgr->imappers, mgr->init_imap, 667 input_mapper_delete(&mgr->imappers, mgr->init_imap,
668 daio_map_op, mgr); 668 daio_map_op, mgr);
669 mgr->init_imap_added = 0; 669 mgr->init_imap_added = 0;
@@ -707,7 +707,7 @@ int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
707 707
708 *rdaio_mgr = NULL; 708 *rdaio_mgr = NULL;
709 daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL); 709 daio_mgr = kzalloc(sizeof(*daio_mgr), GFP_KERNEL);
710 if (NULL == daio_mgr) 710 if (!daio_mgr)
711 return -ENOMEM; 711 return -ENOMEM;
712 712
713 err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw); 713 err = rsc_mgr_init(&daio_mgr->mgr, DAIO, DAIO_RESOURCE_NUM, hw);
@@ -718,7 +718,7 @@ int daio_mgr_create(void *hw, struct daio_mgr **rdaio_mgr)
718 spin_lock_init(&daio_mgr->imap_lock); 718 spin_lock_init(&daio_mgr->imap_lock);
719 INIT_LIST_HEAD(&daio_mgr->imappers); 719 INIT_LIST_HEAD(&daio_mgr->imappers);
720 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 720 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
721 if (NULL == entry) { 721 if (!entry) {
722 err = -ENOMEM; 722 err = -ENOMEM;
723 goto error2; 723 goto error2;
724 } 724 }
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ad3e1d144464..0cf400f879f9 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -168,7 +168,7 @@ static int src_get_rsc_ctrl_blk(void **rblk)
168 168
169 *rblk = NULL; 169 *rblk = NULL;
170 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 170 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
171 if (NULL == blk) 171 if (!blk)
172 return -ENOMEM; 172 return -ENOMEM;
173 173
174 *rblk = blk; 174 *rblk = blk;
@@ -494,7 +494,7 @@ static int src_mgr_get_ctrl_blk(void **rblk)
494 494
495 *rblk = NULL; 495 *rblk = NULL;
496 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 496 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
497 if (NULL == blk) 497 if (!blk)
498 return -ENOMEM; 498 return -ENOMEM;
499 499
500 *rblk = blk; 500 *rblk = blk;
@@ -515,7 +515,7 @@ static int srcimp_mgr_get_ctrl_blk(void **rblk)
515 515
516 *rblk = NULL; 516 *rblk = NULL;
517 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 517 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
518 if (NULL == blk) 518 if (!blk)
519 return -ENOMEM; 519 return -ENOMEM;
520 520
521 *rblk = blk; 521 *rblk = blk;
@@ -702,7 +702,7 @@ static int amixer_rsc_get_ctrl_blk(void **rblk)
702 702
703 *rblk = NULL; 703 *rblk = NULL;
704 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 704 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
705 if (NULL == blk) 705 if (!blk)
706 return -ENOMEM; 706 return -ENOMEM;
707 707
708 *rblk = blk; 708 *rblk = blk;
@@ -723,7 +723,7 @@ static int amixer_mgr_get_ctrl_blk(void **rblk)
723 723
724 *rblk = NULL; 724 *rblk = NULL;
725 /*blk = kzalloc(sizeof(*blk), GFP_KERNEL); 725 /*blk = kzalloc(sizeof(*blk), GFP_KERNEL);
726 if (NULL == blk) 726 if (!blk)
727 return -ENOMEM; 727 return -ENOMEM;
728 728
729 *rblk = blk;*/ 729 *rblk = blk;*/
@@ -909,7 +909,7 @@ static int dai_get_ctrl_blk(void **rblk)
909 909
910 *rblk = NULL; 910 *rblk = NULL;
911 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 911 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
912 if (NULL == blk) 912 if (!blk)
913 return -ENOMEM; 913 return -ENOMEM;
914 914
915 *rblk = blk; 915 *rblk = blk;
@@ -958,7 +958,7 @@ static int dao_get_ctrl_blk(void **rblk)
958 958
959 *rblk = NULL; 959 *rblk = NULL;
960 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 960 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
961 if (NULL == blk) 961 if (!blk)
962 return -ENOMEM; 962 return -ENOMEM;
963 963
964 *rblk = blk; 964 *rblk = blk;
@@ -1152,7 +1152,7 @@ static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
1152 1152
1153 *rblk = NULL; 1153 *rblk = NULL;
1154 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 1154 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
1155 if (NULL == blk) 1155 if (!blk)
1156 return -ENOMEM; 1156 return -ENOMEM;
1157 1157
1158 blk->i2sctl = hw_read_20kx(hw, I2SCTL); 1158 blk->i2sctl = hw_read_20kx(hw, I2SCTL);
@@ -1808,7 +1808,7 @@ static int uaa_to_xfi(struct pci_dev *pci)
1808 /* By default, Hendrix card UAA Bar0 should be using memory... */ 1808 /* By default, Hendrix card UAA Bar0 should be using memory... */
1809 io_base = pci_resource_start(pci, 0); 1809 io_base = pci_resource_start(pci, 0);
1810 mem_base = ioremap(io_base, pci_resource_len(pci, 0)); 1810 mem_base = ioremap(io_base, pci_resource_len(pci, 0));
1811 if (NULL == mem_base) 1811 if (!mem_base)
1812 return -ENOENT; 1812 return -ENOENT;
1813 1813
1814 /* Read current mode from Mode Change Register */ 1814 /* Read current mode from Mode Change Register */
@@ -1977,7 +1977,7 @@ static int hw_card_shutdown(struct hw *hw)
1977 1977
1978 hw->irq = -1; 1978 hw->irq = -1;
1979 1979
1980 if (NULL != ((void *)hw->mem_base)) 1980 if (hw->mem_base)
1981 iounmap((void *)hw->mem_base); 1981 iounmap((void *)hw->mem_base);
1982 1982
1983 hw->mem_base = (unsigned long)NULL; 1983 hw->mem_base = (unsigned long)NULL;
@@ -2274,7 +2274,7 @@ int __devinit create_20k1_hw_obj(struct hw **rhw)
2274 2274
2275 *rhw = NULL; 2275 *rhw = NULL;
2276 hw20k1 = kzalloc(sizeof(*hw20k1), GFP_KERNEL); 2276 hw20k1 = kzalloc(sizeof(*hw20k1), GFP_KERNEL);
2277 if (NULL == hw20k1) 2277 if (!hw20k1)
2278 return -ENOMEM; 2278 return -ENOMEM;
2279 2279
2280 spin_lock_init(&hw20k1->reg_20k1_lock); 2280 spin_lock_init(&hw20k1->reg_20k1_lock);
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index dec46d04b041..b6b11bfe7574 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -166,7 +166,7 @@ static int src_get_rsc_ctrl_blk(void **rblk)
166 166
167 *rblk = NULL; 167 *rblk = NULL;
168 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 168 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
169 if (NULL == blk) 169 if (!blk)
170 return -ENOMEM; 170 return -ENOMEM;
171 171
172 *rblk = blk; 172 *rblk = blk;
@@ -492,7 +492,7 @@ static int src_mgr_get_ctrl_blk(void **rblk)
492 492
493 *rblk = NULL; 493 *rblk = NULL;
494 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 494 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
495 if (NULL == blk) 495 if (!blk)
496 return -ENOMEM; 496 return -ENOMEM;
497 497
498 *rblk = blk; 498 *rblk = blk;
@@ -513,7 +513,7 @@ static int srcimp_mgr_get_ctrl_blk(void **rblk)
513 513
514 *rblk = NULL; 514 *rblk = NULL;
515 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 515 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
516 if (NULL == blk) 516 if (!blk)
517 return -ENOMEM; 517 return -ENOMEM;
518 518
519 *rblk = blk; 519 *rblk = blk;
@@ -702,7 +702,7 @@ static int amixer_rsc_get_ctrl_blk(void **rblk)
702 702
703 *rblk = NULL; 703 *rblk = NULL;
704 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 704 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
705 if (NULL == blk) 705 if (!blk)
706 return -ENOMEM; 706 return -ENOMEM;
707 707
708 *rblk = blk; 708 *rblk = blk;
@@ -891,7 +891,7 @@ static int dai_get_ctrl_blk(void **rblk)
891 891
892 *rblk = NULL; 892 *rblk = NULL;
893 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 893 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
894 if (NULL == blk) 894 if (!blk)
895 return -ENOMEM; 895 return -ENOMEM;
896 896
897 *rblk = blk; 897 *rblk = blk;
@@ -941,7 +941,7 @@ static int dao_get_ctrl_blk(void **rblk)
941 941
942 *rblk = NULL; 942 *rblk = NULL;
943 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 943 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
944 if (NULL == blk) 944 if (!blk)
945 return -ENOMEM; 945 return -ENOMEM;
946 946
947 *rblk = blk; 947 *rblk = blk;
@@ -1092,7 +1092,7 @@ static int daio_mgr_get_ctrl_blk(struct hw *hw, void **rblk)
1092 1092
1093 *rblk = NULL; 1093 *rblk = NULL;
1094 blk = kzalloc(sizeof(*blk), GFP_KERNEL); 1094 blk = kzalloc(sizeof(*blk), GFP_KERNEL);
1095 if (NULL == blk) 1095 if (!blk)
1096 return -ENOMEM; 1096 return -ENOMEM;
1097 1097
1098 for (i = 0; i < 8; i++) { 1098 for (i = 0; i < 8; i++) {
@@ -1112,6 +1112,26 @@ static int daio_mgr_put_ctrl_blk(void *blk)
1112 return 0; 1112 return 0;
1113} 1113}
1114 1114
1115/* Timer interrupt */
1116static int set_timer_irq(struct hw *hw, int enable)
1117{
1118 hw_write_20kx(hw, GIE, enable ? IT_INT : 0);
1119 return 0;
1120}
1121
1122static int set_timer_tick(struct hw *hw, unsigned int ticks)
1123{
1124 if (ticks)
1125 ticks |= TIMR_IE | TIMR_IP;
1126 hw_write_20kx(hw, TIMR, ticks);
1127 return 0;
1128}
1129
1130static unsigned int get_wc(struct hw *hw)
1131{
1132 return hw_read_20kx(hw, WC);
1133}
1134
1115/* Card hardware initialization block */ 1135/* Card hardware initialization block */
1116struct dac_conf { 1136struct dac_conf {
1117 unsigned int msr; /* master sample rate in rsrs */ 1137 unsigned int msr; /* master sample rate in rsrs */
@@ -1841,6 +1861,22 @@ static int hw_have_digit_io_switch(struct hw *hw)
1841 return 0; 1861 return 0;
1842} 1862}
1843 1863
1864static irqreturn_t ct_20k2_interrupt(int irq, void *dev_id)
1865{
1866 struct hw *hw = dev_id;
1867 unsigned int status;
1868
1869 status = hw_read_20kx(hw, GIP);
1870 if (!status)
1871 return IRQ_NONE;
1872
1873 if (hw->irq_callback)
1874 hw->irq_callback(hw->irq_callback_data, status);
1875
1876 hw_write_20kx(hw, GIP, status);
1877 return IRQ_HANDLED;
1878}
1879
1844static int hw_card_start(struct hw *hw) 1880static int hw_card_start(struct hw *hw)
1845{ 1881{
1846 int err = 0; 1882 int err = 0;
@@ -1868,7 +1904,7 @@ static int hw_card_start(struct hw *hw)
1868 hw->io_base = pci_resource_start(hw->pci, 2); 1904 hw->io_base = pci_resource_start(hw->pci, 2);
1869 hw->mem_base = (unsigned long)ioremap(hw->io_base, 1905 hw->mem_base = (unsigned long)ioremap(hw->io_base,
1870 pci_resource_len(hw->pci, 2)); 1906 pci_resource_len(hw->pci, 2));
1871 if (NULL == (void *)hw->mem_base) { 1907 if (!hw->mem_base) {
1872 err = -ENOENT; 1908 err = -ENOENT;
1873 goto error2; 1909 goto error2;
1874 } 1910 }
@@ -1879,12 +1915,15 @@ static int hw_card_start(struct hw *hw)
1879 set_field(&gctl, GCTL_UAA, 0); 1915 set_field(&gctl, GCTL_UAA, 0);
1880 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl); 1916 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1881 1917
1882 /*if ((err = request_irq(pci->irq, ct_atc_interrupt, IRQF_SHARED, 1918 if (hw->irq < 0) {
1883 atc->chip_details->nm_card, hw))) { 1919 err = request_irq(pci->irq, ct_20k2_interrupt, IRQF_SHARED,
1884 goto error3; 1920 "ctxfi", hw);
1921 if (err < 0) {
1922 printk(KERN_ERR "XFi: Cannot get irq %d\n", pci->irq);
1923 goto error2;
1924 }
1925 hw->irq = pci->irq;
1885 } 1926 }
1886 hw->irq = pci->irq;
1887 */
1888 1927
1889 pci_set_master(pci); 1928 pci_set_master(pci);
1890 1929
@@ -1923,7 +1962,7 @@ static int hw_card_shutdown(struct hw *hw)
1923 1962
1924 hw->irq = -1; 1963 hw->irq = -1;
1925 1964
1926 if (NULL != ((void *)hw->mem_base)) 1965 if (hw->mem_base)
1927 iounmap((void *)hw->mem_base); 1966 iounmap((void *)hw->mem_base);
1928 1967
1929 hw->mem_base = (unsigned long)NULL; 1968 hw->mem_base = (unsigned long)NULL;
@@ -1972,7 +2011,7 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
1972 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl); 2011 hw_write_20kx(hw, GLOBAL_CNTL_GCTL, gctl);
1973 2012
1974 /* Reset all global pending interrupts */ 2013 /* Reset all global pending interrupts */
1975 hw_write_20kx(hw, INTERRUPT_GIE, 0); 2014 hw_write_20kx(hw, GIE, 0);
1976 /* Reset all SRC pending interrupts */ 2015 /* Reset all SRC pending interrupts */
1977 hw_write_20kx(hw, SRC_IP, 0); 2016 hw_write_20kx(hw, SRC_IP, 0);
1978 2017
@@ -2149,6 +2188,10 @@ static struct hw ct20k2_preset __devinitdata = {
2149 .daio_mgr_set_imapnxt = daio_mgr_set_imapnxt, 2188 .daio_mgr_set_imapnxt = daio_mgr_set_imapnxt,
2150 .daio_mgr_set_imapaddr = daio_mgr_set_imapaddr, 2189 .daio_mgr_set_imapaddr = daio_mgr_set_imapaddr,
2151 .daio_mgr_commit_write = daio_mgr_commit_write, 2190 .daio_mgr_commit_write = daio_mgr_commit_write,
2191
2192 .set_timer_irq = set_timer_irq,
2193 .set_timer_tick = set_timer_tick,
2194 .get_wc = get_wc,
2152}; 2195};
2153 2196
2154int __devinit create_20k2_hw_obj(struct hw **rhw) 2197int __devinit create_20k2_hw_obj(struct hw **rhw)
diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c
index f26d7cd9db9f..15c1e7271ea8 100644
--- a/sound/pci/ctxfi/ctmixer.c
+++ b/sound/pci/ctxfi/ctmixer.c
@@ -654,7 +654,7 @@ ct_mixer_kcontrol_new(struct ct_mixer *mixer, struct snd_kcontrol_new *new)
654 int err; 654 int err;
655 655
656 kctl = snd_ctl_new1(new, mixer->atc); 656 kctl = snd_ctl_new1(new, mixer->atc);
657 if (NULL == kctl) 657 if (!kctl)
658 return -ENOMEM; 658 return -ENOMEM;
659 659
660 if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface) 660 if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface)
@@ -837,17 +837,17 @@ static int ct_mixer_get_mem(struct ct_mixer **rmixer)
837 *rmixer = NULL; 837 *rmixer = NULL;
838 /* Allocate mem for mixer obj */ 838 /* Allocate mem for mixer obj */
839 mixer = kzalloc(sizeof(*mixer), GFP_KERNEL); 839 mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
840 if (NULL == mixer) 840 if (!mixer)
841 return -ENOMEM; 841 return -ENOMEM;
842 842
843 mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM), 843 mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM),
844 GFP_KERNEL); 844 GFP_KERNEL);
845 if (NULL == mixer->amixers) { 845 if (!mixer->amixers) {
846 err = -ENOMEM; 846 err = -ENOMEM;
847 goto error1; 847 goto error1;
848 } 848 }
849 mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL); 849 mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL);
850 if (NULL == mixer->sums) { 850 if (!mixer->sums) {
851 err = -ENOMEM; 851 err = -ENOMEM;
852 goto error2; 852 goto error2;
853 } 853 }
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 60ea23180acb..d0dc227fbdd3 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -97,7 +97,7 @@ static void ct_atc_pcm_interrupt(struct ct_atc_pcm *atc_pcm)
97{ 97{
98 struct ct_atc_pcm *apcm = atc_pcm; 98 struct ct_atc_pcm *apcm = atc_pcm;
99 99
100 if (NULL == apcm->substream) 100 if (!apcm->substream)
101 return; 101 return;
102 102
103 snd_pcm_period_elapsed(apcm->substream); 103 snd_pcm_period_elapsed(apcm->substream);
@@ -123,7 +123,7 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream)
123 int err; 123 int err;
124 124
125 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 125 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
126 if (NULL == apcm) 126 if (!apcm)
127 return -ENOMEM; 127 return -ENOMEM;
128 128
129 apcm->substream = substream; 129 apcm->substream = substream;
@@ -271,7 +271,7 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream)
271 int err; 271 int err;
272 272
273 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL); 273 apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
274 if (NULL == apcm) 274 if (!apcm)
275 return -ENOMEM; 275 return -ENOMEM;
276 276
277 apcm->started = 0; 277 apcm->started = 0;
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
index 889c495bb7d1..7dfaf67344d4 100644
--- a/sound/pci/ctxfi/ctresource.c
+++ b/sound/pci/ctxfi/ctresource.c
@@ -144,7 +144,7 @@ int rsc_init(struct rsc *rsc, u32 idx, enum RSCTYP type, u32 msr, void *hw)
144 rsc->msr = msr; 144 rsc->msr = msr;
145 rsc->hw = hw; 145 rsc->hw = hw;
146 rsc->ops = &rsc_generic_ops; 146 rsc->ops = &rsc_generic_ops;
147 if (NULL == hw) { 147 if (!hw) {
148 rsc->ctrl_blk = NULL; 148 rsc->ctrl_blk = NULL;
149 return 0; 149 return 0;
150 } 150 }
@@ -216,7 +216,7 @@ int rsc_mgr_init(struct rsc_mgr *mgr, enum RSCTYP type,
216 mgr->type = NUM_RSCTYP; 216 mgr->type = NUM_RSCTYP;
217 217
218 mgr->rscs = kzalloc(((amount + 8 - 1) / 8), GFP_KERNEL); 218 mgr->rscs = kzalloc(((amount + 8 - 1) / 8), GFP_KERNEL);
219 if (NULL == mgr->rscs) 219 if (!mgr->rscs)
220 return -ENOMEM; 220 return -ENOMEM;
221 221
222 switch (type) { 222 switch (type) {
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
index df43a5cd3938..c749fa720889 100644
--- a/sound/pci/ctxfi/ctsrc.c
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -441,7 +441,7 @@ get_src_rsc(struct src_mgr *mgr, const struct src_desc *desc, struct src **rsrc)
441 else 441 else
442 src = kzalloc(sizeof(*src), GFP_KERNEL); 442 src = kzalloc(sizeof(*src), GFP_KERNEL);
443 443
444 if (NULL == src) { 444 if (!src) {
445 err = -ENOMEM; 445 err = -ENOMEM;
446 goto error1; 446 goto error1;
447 } 447 }
@@ -550,7 +550,7 @@ int src_mgr_create(void *hw, struct src_mgr **rsrc_mgr)
550 550
551 *rsrc_mgr = NULL; 551 *rsrc_mgr = NULL;
552 src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL); 552 src_mgr = kzalloc(sizeof(*src_mgr), GFP_KERNEL);
553 if (NULL == src_mgr) 553 if (!src_mgr)
554 return -ENOMEM; 554 return -ENOMEM;
555 555
556 err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw); 556 err = rsc_mgr_init(&src_mgr->mgr, SRC, SRC_RESOURCE_NUM, hw);
@@ -679,7 +679,7 @@ static int srcimp_rsc_init(struct srcimp *srcimp,
679 /* Reserve memory for imapper nodes */ 679 /* Reserve memory for imapper nodes */
680 srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr, 680 srcimp->imappers = kzalloc(sizeof(struct imapper)*desc->msr,
681 GFP_KERNEL); 681 GFP_KERNEL);
682 if (NULL == srcimp->imappers) { 682 if (!srcimp->imappers) {
683 err = -ENOMEM; 683 err = -ENOMEM;
684 goto error1; 684 goto error1;
685 } 685 }
@@ -833,7 +833,7 @@ int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
833 833
834 *rsrcimp_mgr = NULL; 834 *rsrcimp_mgr = NULL;
835 srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL); 835 srcimp_mgr = kzalloc(sizeof(*srcimp_mgr), GFP_KERNEL);
836 if (NULL == srcimp_mgr) 836 if (!srcimp_mgr)
837 return -ENOMEM; 837 return -ENOMEM;
838 838
839 err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw); 839 err = rsc_mgr_init(&srcimp_mgr->mgr, SRCIMP, SRCIMP_RESOURCE_NUM, hw);
@@ -844,7 +844,7 @@ int srcimp_mgr_create(void *hw, struct srcimp_mgr **rsrcimp_mgr)
844 spin_lock_init(&srcimp_mgr->imap_lock); 844 spin_lock_init(&srcimp_mgr->imap_lock);
845 INIT_LIST_HEAD(&srcimp_mgr->imappers); 845 INIT_LIST_HEAD(&srcimp_mgr->imappers);
846 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 846 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
847 if (NULL == entry) { 847 if (!entry) {
848 err = -ENOMEM; 848 err = -ENOMEM;
849 goto error2; 849 goto error2;
850 } 850 }
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 67665a7e43c6..6b78752e9503 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -60,7 +60,7 @@ get_vm_block(struct ct_vm *vm, unsigned int size)
60 } 60 }
61 61
62 block = kzalloc(sizeof(*block), GFP_KERNEL); 62 block = kzalloc(sizeof(*block), GFP_KERNEL);
63 if (NULL == block) 63 if (!block)
64 goto out; 64 goto out;
65 65
66 block->addr = entry->addr; 66 block->addr = entry->addr;
@@ -181,7 +181,7 @@ int ct_vm_create(struct ct_vm **rvm)
181 *rvm = NULL; 181 *rvm = NULL;
182 182
183 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 183 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
184 if (NULL == vm) 184 if (!vm)
185 return -ENOMEM; 185 return -ENOMEM;
186 186
187 mutex_init(&vm->lock); 187 mutex_init(&vm->lock);
@@ -189,7 +189,7 @@ int ct_vm_create(struct ct_vm **rvm)
189 /* Allocate page table pages */ 189 /* Allocate page table pages */
190 for (i = 0; i < CT_PTP_NUM; i++) { 190 for (i = 0; i < CT_PTP_NUM; i++) {
191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL); 191 vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
192 if (NULL == vm->ptp[i]) 192 if (!vm->ptp[i])
193 break; 193 break;
194 } 194 }
195 if (!i) { 195 if (!i) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 04438f1d682d..55545e0818b5 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -46,6 +46,20 @@ config SND_HDA_INPUT_JACK
46 Say Y here to enable the jack plugging notification via 46 Say Y here to enable the jack plugging notification via
47 input layer. 47 input layer.
48 48
49config SND_HDA_PATCH_LOADER
50 bool "Support initialization patch loading for HD-audio"
51 depends on EXPERIMENTAL
52 select FW_LOADER
53 select SND_HDA_HWDEP
54 select SND_HDA_RECONFIG
55 help
56 Say Y here to allow the HD-audio driver to load a pseudo
57 firmware file ("patch") for overriding the BIOS setup at
58 start up. The "patch" file can be specified via patch module
59 option, such as patch=hda-init.
60
61 This option turns on hwdep and reconfig features automatically.
62
49config SND_HDA_CODEC_REALTEK 63config SND_HDA_CODEC_REALTEK
50 bool "Build Realtek HD-audio codec support" 64 bool "Build Realtek HD-audio codec support"
51 default y 65 default y
@@ -134,6 +148,19 @@ config SND_HDA_ELD
134 def_bool y 148 def_bool y
135 depends on SND_HDA_CODEC_INTELHDMI 149 depends on SND_HDA_CODEC_INTELHDMI
136 150
151config SND_HDA_CODEC_CIRRUS
152 bool "Build Cirrus Logic codec support"
153 depends on SND_HDA_INTEL
154 default y
155 help
156 Say Y here to include Cirrus Logic codec support in
157 snd-hda-intel driver, such as CS4206.
158
159 When the HD-audio driver is built as a module, the codec
160 support code is also built as another module,
161 snd-hda-codec-cirrus.
162 This module is automatically loaded at probing.
163
137config SND_HDA_CODEC_CONEXANT 164config SND_HDA_CODEC_CONEXANT
138 bool "Build Conexant HD-audio codec support" 165 bool "Build Conexant HD-audio codec support"
139 default y 166 default y
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index e3081d4586cc..315a1c4f8998 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -13,6 +13,7 @@ snd-hda-codec-analog-objs := patch_analog.o
13snd-hda-codec-idt-objs := patch_sigmatel.o 13snd-hda-codec-idt-objs := patch_sigmatel.o
14snd-hda-codec-si3054-objs := patch_si3054.o 14snd-hda-codec-si3054-objs := patch_si3054.o
15snd-hda-codec-atihdmi-objs := patch_atihdmi.o 15snd-hda-codec-atihdmi-objs := patch_atihdmi.o
16snd-hda-codec-cirrus-objs := patch_cirrus.o
16snd-hda-codec-ca0110-objs := patch_ca0110.o 17snd-hda-codec-ca0110-objs := patch_ca0110.o
17snd-hda-codec-conexant-objs := patch_conexant.o 18snd-hda-codec-conexant-objs := patch_conexant.o
18snd-hda-codec-via-objs := patch_via.o 19snd-hda-codec-via-objs := patch_via.o
@@ -41,6 +42,9 @@ endif
41ifdef CONFIG_SND_HDA_CODEC_ATIHDMI 42ifdef CONFIG_SND_HDA_CODEC_ATIHDMI
42obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-atihdmi.o 43obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-atihdmi.o
43endif 44endif
45ifdef CONFIG_SND_HDA_CODEC_CIRRUS
46obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-cirrus.o
47endif
44ifdef CONFIG_SND_HDA_CODEC_CA0110 48ifdef CONFIG_SND_HDA_CODEC_CA0110
45obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-ca0110.o 49obj-$(CONFIG_SND_HDA_INTEL) += snd-hda-codec-ca0110.o
46endif 50endif
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index b0275a050870..3f51a981e604 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -24,6 +24,7 @@
24#include <linux/workqueue.h> 24#include <linux/workqueue.h>
25#include <sound/core.h> 25#include <sound/core.h>
26#include "hda_beep.h" 26#include "hda_beep.h"
27#include "hda_local.h"
27 28
28enum { 29enum {
29 DIGBEEP_HZ_STEP = 46875, /* 46.875 Hz */ 30 DIGBEEP_HZ_STEP = 46875, /* 46.875 Hz */
@@ -118,6 +119,9 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
118 struct hda_beep *beep; 119 struct hda_beep *beep;
119 int err; 120 int err;
120 121
122 if (!snd_hda_get_bool_hint(codec, "beep"))
123 return 0; /* disabled explicitly */
124
121 beep = kzalloc(sizeof(*beep), GFP_KERNEL); 125 beep = kzalloc(sizeof(*beep), GFP_KERNEL);
122 if (beep == NULL) 126 if (beep == NULL)
123 return -ENOMEM; 127 return -ENOMEM;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index c7df01b72cac..af989f660cca 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -44,6 +44,7 @@ struct hda_vendor_id {
44/* codec vendor labels */ 44/* codec vendor labels */
45static struct hda_vendor_id hda_vendor_ids[] = { 45static struct hda_vendor_id hda_vendor_ids[] = {
46 { 0x1002, "ATI" }, 46 { 0x1002, "ATI" },
47 { 0x1013, "Cirrus Logic" },
47 { 0x1057, "Motorola" }, 48 { 0x1057, "Motorola" },
48 { 0x1095, "Silicon Image" }, 49 { 0x1095, "Silicon Image" },
49 { 0x10de, "Nvidia" }, 50 { 0x10de, "Nvidia" },
@@ -150,7 +151,14 @@ make_codec_cmd(struct hda_codec *codec, hda_nid_t nid, int direct,
150{ 151{
151 u32 val; 152 u32 val;
152 153
153 val = (u32)(codec->addr & 0x0f) << 28; 154 if ((codec->addr & ~0xf) || (direct & ~1) || (nid & ~0x7f) ||
155 (verb & ~0xfff) || (parm & ~0xffff)) {
156 printk(KERN_ERR "hda-codec: out of range cmd %x:%x:%x:%x:%x\n",
157 codec->addr, direct, nid, verb, parm);
158 return ~0;
159 }
160
161 val = (u32)codec->addr << 28;
154 val |= (u32)direct << 27; 162 val |= (u32)direct << 27;
155 val |= (u32)nid << 20; 163 val |= (u32)nid << 20;
156 val |= verb << 8; 164 val |= verb << 8;
@@ -167,6 +175,9 @@ static int codec_exec_verb(struct hda_codec *codec, unsigned int cmd,
167 struct hda_bus *bus = codec->bus; 175 struct hda_bus *bus = codec->bus;
168 int err; 176 int err;
169 177
178 if (cmd == ~0)
179 return -1;
180
170 if (res) 181 if (res)
171 *res = -1; 182 *res = -1;
172 again: 183 again:
@@ -291,11 +302,20 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
291 unsigned int parm; 302 unsigned int parm;
292 int i, conn_len, conns; 303 int i, conn_len, conns;
293 unsigned int shift, num_elems, mask; 304 unsigned int shift, num_elems, mask;
305 unsigned int wcaps;
294 hda_nid_t prev_nid; 306 hda_nid_t prev_nid;
295 307
296 if (snd_BUG_ON(!conn_list || max_conns <= 0)) 308 if (snd_BUG_ON(!conn_list || max_conns <= 0))
297 return -EINVAL; 309 return -EINVAL;
298 310
311 wcaps = get_wcaps(codec, nid);
312 if (!(wcaps & AC_WCAP_CONN_LIST) &&
313 get_wcaps_type(wcaps) != AC_WID_VOL_KNB) {
314 snd_printk(KERN_WARNING "hda_codec: "
315 "connection list not available for 0x%x\n", nid);
316 return -EINVAL;
317 }
318
299 parm = snd_hda_param_read(codec, nid, AC_PAR_CONNLIST_LEN); 319 parm = snd_hda_param_read(codec, nid, AC_PAR_CONNLIST_LEN);
300 if (parm & AC_CLIST_LONG) { 320 if (parm & AC_CLIST_LONG) {
301 /* long form */ 321 /* long form */
@@ -316,6 +336,8 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
316 /* single connection */ 336 /* single connection */
317 parm = snd_hda_codec_read(codec, nid, 0, 337 parm = snd_hda_codec_read(codec, nid, 0,
318 AC_VERB_GET_CONNECT_LIST, 0); 338 AC_VERB_GET_CONNECT_LIST, 0);
339 if (parm == -1 && codec->bus->rirb_error)
340 return -EIO;
319 conn_list[0] = parm & mask; 341 conn_list[0] = parm & mask;
320 return 1; 342 return 1;
321 } 343 }
@@ -327,9 +349,12 @@ int snd_hda_get_connections(struct hda_codec *codec, hda_nid_t nid,
327 int range_val; 349 int range_val;
328 hda_nid_t val, n; 350 hda_nid_t val, n;
329 351
330 if (i % num_elems == 0) 352 if (i % num_elems == 0) {
331 parm = snd_hda_codec_read(codec, nid, 0, 353 parm = snd_hda_codec_read(codec, nid, 0,
332 AC_VERB_GET_CONNECT_LIST, i); 354 AC_VERB_GET_CONNECT_LIST, i);
355 if (parm == -1 && codec->bus->rirb_error)
356 return -EIO;
357 }
333 range_val = !!(parm & (1 << (shift-1))); /* ranges */ 358 range_val = !!(parm & (1 << (shift-1))); /* ranges */
334 val = parm & mask; 359 val = parm & mask;
335 if (val == 0) { 360 if (val == 0) {
@@ -727,8 +752,7 @@ static int read_pin_defaults(struct hda_codec *codec)
727 for (i = 0; i < codec->num_nodes; i++, nid++) { 752 for (i = 0; i < codec->num_nodes; i++, nid++) {
728 struct hda_pincfg *pin; 753 struct hda_pincfg *pin;
729 unsigned int wcaps = get_wcaps(codec, nid); 754 unsigned int wcaps = get_wcaps(codec, nid);
730 unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >> 755 unsigned int wid_type = get_wcaps_type(wcaps);
731 AC_WCAP_TYPE_SHIFT;
732 if (wid_type != AC_WID_PIN) 756 if (wid_type != AC_WID_PIN)
733 continue; 757 continue;
734 pin = snd_array_new(&codec->init_pins); 758 pin = snd_array_new(&codec->init_pins);
@@ -891,7 +915,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
891 * Returns 0 if successful, or a negative error code. 915 * Returns 0 if successful, or a negative error code.
892 */ 916 */
893int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr, 917int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
894 int do_init, struct hda_codec **codecp) 918 struct hda_codec **codecp)
895{ 919{
896 struct hda_codec *codec; 920 struct hda_codec *codec;
897 char component[31]; 921 char component[31];
@@ -984,11 +1008,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
984 codec->afg ? codec->afg : codec->mfg, 1008 codec->afg ? codec->afg : codec->mfg,
985 AC_PWRST_D0); 1009 AC_PWRST_D0);
986 1010
987 if (do_init) {
988 err = snd_hda_codec_configure(codec);
989 if (err < 0)
990 goto error;
991 }
992 snd_hda_codec_proc_new(codec); 1011 snd_hda_codec_proc_new(codec);
993 1012
994 snd_hda_create_hwdep(codec); 1013 snd_hda_create_hwdep(codec);
@@ -1042,6 +1061,7 @@ int snd_hda_codec_configure(struct hda_codec *codec)
1042 err = init_unsol_queue(codec->bus); 1061 err = init_unsol_queue(codec->bus);
1043 return err; 1062 return err;
1044} 1063}
1064EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
1045 1065
1046/** 1066/**
1047 * snd_hda_codec_setup_stream - set up the codec for streaming 1067 * snd_hda_codec_setup_stream - set up the codec for streaming
@@ -2356,16 +2376,20 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
2356 hda_nid_t nid; 2376 hda_nid_t nid;
2357 int i; 2377 int i;
2358 2378
2359 snd_hda_codec_write(codec, fg, 0, AC_VERB_SET_POWER_STATE, 2379 /* this delay seems necessary to avoid click noise at power-down */
2380 if (power_state == AC_PWRST_D3)
2381 msleep(100);
2382 snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
2360 power_state); 2383 power_state);
2361 msleep(10); /* partial workaround for "azx_get_response timeout" */ 2384 /* partial workaround for "azx_get_response timeout" */
2385 if (power_state == AC_PWRST_D0)
2386 msleep(10);
2362 2387
2363 nid = codec->start_nid; 2388 nid = codec->start_nid;
2364 for (i = 0; i < codec->num_nodes; i++, nid++) { 2389 for (i = 0; i < codec->num_nodes; i++, nid++) {
2365 unsigned int wcaps = get_wcaps(codec, nid); 2390 unsigned int wcaps = get_wcaps(codec, nid);
2366 if (wcaps & AC_WCAP_POWER) { 2391 if (wcaps & AC_WCAP_POWER) {
2367 unsigned int wid_type = (wcaps & AC_WCAP_TYPE) >> 2392 unsigned int wid_type = get_wcaps_type(wcaps);
2368 AC_WCAP_TYPE_SHIFT;
2369 if (power_state == AC_PWRST_D3 && 2393 if (power_state == AC_PWRST_D3 &&
2370 wid_type == AC_WID_PIN) { 2394 wid_type == AC_WID_PIN) {
2371 unsigned int pincap; 2395 unsigned int pincap;
@@ -2573,7 +2597,7 @@ unsigned int snd_hda_calc_stream_format(unsigned int rate,
2573 case 20: 2597 case 20:
2574 case 24: 2598 case 24:
2575 case 32: 2599 case 32:
2576 if (maxbps >= 32) 2600 if (maxbps >= 32 || format == SNDRV_PCM_FORMAT_FLOAT_LE)
2577 val |= 0x40; 2601 val |= 0x40;
2578 else if (maxbps >= 24) 2602 else if (maxbps >= 24)
2579 val |= 0x30; 2603 val |= 0x30;
@@ -2700,11 +2724,12 @@ static int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
2700 bps = 20; 2724 bps = 20;
2701 } 2725 }
2702 } 2726 }
2703 else if (streams == AC_SUPFMT_FLOAT32) { 2727 if (streams & AC_SUPFMT_FLOAT32) {
2704 /* should be exclusive */
2705 formats |= SNDRV_PCM_FMTBIT_FLOAT_LE; 2728 formats |= SNDRV_PCM_FMTBIT_FLOAT_LE;
2706 bps = 32; 2729 if (!bps)
2707 } else if (streams == AC_SUPFMT_AC3) { 2730 bps = 32;
2731 }
2732 if (streams == AC_SUPFMT_AC3) {
2708 /* should be exclusive */ 2733 /* should be exclusive */
2709 /* temporary hack: we have still no proper support 2734 /* temporary hack: we have still no proper support
2710 * for the direct AC3 stream... 2735 * for the direct AC3 stream...
@@ -3102,7 +3127,7 @@ int snd_hda_check_board_codec_sid_config(struct hda_codec *codec,
3102 tbl = q; 3127 tbl = q;
3103 3128
3104 if (tbl->value >= 0 && tbl->value < num_configs) { 3129 if (tbl->value >= 0 && tbl->value < num_configs) {
3105#ifdef CONFIG_SND_DEBUG_DETECT 3130#ifdef CONFIG_SND_DEBUG_VERBOSE
3106 char tmp[10]; 3131 char tmp[10];
3107 const char *model = NULL; 3132 const char *model = NULL;
3108 if (models) 3133 if (models)
@@ -3655,8 +3680,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
3655 end_nid = codec->start_nid + codec->num_nodes; 3680 end_nid = codec->start_nid + codec->num_nodes;
3656 for (nid = codec->start_nid; nid < end_nid; nid++) { 3681 for (nid = codec->start_nid; nid < end_nid; nid++) {
3657 unsigned int wid_caps = get_wcaps(codec, nid); 3682 unsigned int wid_caps = get_wcaps(codec, nid);
3658 unsigned int wid_type = 3683 unsigned int wid_type = get_wcaps_type(wid_caps);
3659 (wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
3660 unsigned int def_conf; 3684 unsigned int def_conf;
3661 short assoc, loc; 3685 short assoc, loc;
3662 3686
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 1b75f28ed092..99552fb5f756 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -830,7 +830,8 @@ enum {
830int snd_hda_bus_new(struct snd_card *card, const struct hda_bus_template *temp, 830int snd_hda_bus_new(struct snd_card *card, const struct hda_bus_template *temp,
831 struct hda_bus **busp); 831 struct hda_bus **busp);
832int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr, 832int snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr,
833 int do_init, struct hda_codec **codecp); 833 struct hda_codec **codecp);
834int snd_hda_codec_configure(struct hda_codec *codec);
834 835
835/* 836/*
836 * low level functions 837 * low level functions
@@ -938,6 +939,13 @@ static inline void snd_hda_power_down(struct hda_codec *codec) {}
938#define snd_hda_codec_needs_resume(codec) 1 939#define snd_hda_codec_needs_resume(codec) 1
939#endif 940#endif
940 941
942#ifdef CONFIG_SND_HDA_PATCH_LOADER
943/*
944 * patch firmware
945 */
946int snd_hda_load_patch(struct hda_bus *bus, const char *patch);
947#endif
948
941/* 949/*
942 * Codec modularization 950 * Codec modularization
943 */ 951 */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 1d5797a96682..b36f6c5a92df 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -121,11 +121,17 @@ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid
121 if (node == NULL) 121 if (node == NULL)
122 return -ENOMEM; 122 return -ENOMEM;
123 node->nid = nid; 123 node->nid = nid;
124 nconns = snd_hda_get_connections(codec, nid, conn_list, 124 node->wid_caps = get_wcaps(codec, nid);
125 HDA_MAX_CONNECTIONS); 125 node->type = get_wcaps_type(node->wid_caps);
126 if (nconns < 0) { 126 if (node->wid_caps & AC_WCAP_CONN_LIST) {
127 kfree(node); 127 nconns = snd_hda_get_connections(codec, nid, conn_list,
128 return nconns; 128 HDA_MAX_CONNECTIONS);
129 if (nconns < 0) {
130 kfree(node);
131 return nconns;
132 }
133 } else {
134 nconns = 0;
129 } 135 }
130 if (nconns <= ARRAY_SIZE(node->slist)) 136 if (nconns <= ARRAY_SIZE(node->slist))
131 node->conn_list = node->slist; 137 node->conn_list = node->slist;
@@ -140,8 +146,6 @@ static int add_new_node(struct hda_codec *codec, struct hda_gspec *spec, hda_nid
140 } 146 }
141 memcpy(node->conn_list, conn_list, nconns * sizeof(hda_nid_t)); 147 memcpy(node->conn_list, conn_list, nconns * sizeof(hda_nid_t));
142 node->nconns = nconns; 148 node->nconns = nconns;
143 node->wid_caps = get_wcaps(codec, nid);
144 node->type = (node->wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
145 149
146 if (node->type == AC_WID_PIN) { 150 if (node->type == AC_WID_PIN) {
147 node->pin_caps = snd_hda_query_pin_caps(codec, node->nid); 151 node->pin_caps = snd_hda_query_pin_caps(codec, node->nid);
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 6812fbe80fa4..cc24e6721d74 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -24,6 +24,7 @@
24#include <linux/compat.h> 24#include <linux/compat.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <linux/firmware.h>
27#include <sound/core.h> 28#include <sound/core.h>
28#include "hda_codec.h" 29#include "hda_codec.h"
29#include "hda_local.h" 30#include "hda_local.h"
@@ -312,12 +313,8 @@ static ssize_t init_verbs_show(struct device *dev,
312 return len; 313 return len;
313} 314}
314 315
315static ssize_t init_verbs_store(struct device *dev, 316static int parse_init_verbs(struct hda_codec *codec, const char *buf)
316 struct device_attribute *attr,
317 const char *buf, size_t count)
318{ 317{
319 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
320 struct hda_codec *codec = hwdep->private_data;
321 struct hda_verb *v; 318 struct hda_verb *v;
322 int nid, verb, param; 319 int nid, verb, param;
323 320
@@ -331,6 +328,18 @@ static ssize_t init_verbs_store(struct device *dev,
331 v->nid = nid; 328 v->nid = nid;
332 v->verb = verb; 329 v->verb = verb;
333 v->param = param; 330 v->param = param;
331 return 0;
332}
333
334static ssize_t init_verbs_store(struct device *dev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
338 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
339 struct hda_codec *codec = hwdep->private_data;
340 int err = parse_init_verbs(codec, buf);
341 if (err < 0)
342 return err;
334 return count; 343 return count;
335} 344}
336 345
@@ -376,19 +385,15 @@ static void remove_trail_spaces(char *str)
376 385
377#define MAX_HINTS 1024 386#define MAX_HINTS 1024
378 387
379static ssize_t hints_store(struct device *dev, 388static int parse_hints(struct hda_codec *codec, const char *buf)
380 struct device_attribute *attr,
381 const char *buf, size_t count)
382{ 389{
383 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
384 struct hda_codec *codec = hwdep->private_data;
385 char *key, *val; 390 char *key, *val;
386 struct hda_hint *hint; 391 struct hda_hint *hint;
387 392
388 while (isspace(*buf)) 393 while (isspace(*buf))
389 buf++; 394 buf++;
390 if (!*buf || *buf == '#' || *buf == '\n') 395 if (!*buf || *buf == '#' || *buf == '\n')
391 return count; 396 return 0;
392 if (*buf == '=') 397 if (*buf == '=')
393 return -EINVAL; 398 return -EINVAL;
394 key = kstrndup_noeol(buf, 1024); 399 key = kstrndup_noeol(buf, 1024);
@@ -411,7 +416,7 @@ static ssize_t hints_store(struct device *dev,
411 kfree(hint->key); 416 kfree(hint->key);
412 hint->key = key; 417 hint->key = key;
413 hint->val = val; 418 hint->val = val;
414 return count; 419 return 0;
415 } 420 }
416 /* allocate a new hint entry */ 421 /* allocate a new hint entry */
417 if (codec->hints.used >= MAX_HINTS) 422 if (codec->hints.used >= MAX_HINTS)
@@ -424,6 +429,18 @@ static ssize_t hints_store(struct device *dev,
424 } 429 }
425 hint->key = key; 430 hint->key = key;
426 hint->val = val; 431 hint->val = val;
432 return 0;
433}
434
435static ssize_t hints_store(struct device *dev,
436 struct device_attribute *attr,
437 const char *buf, size_t count)
438{
439 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
440 struct hda_codec *codec = hwdep->private_data;
441 int err = parse_hints(codec, buf);
442 if (err < 0)
443 return err;
427 return count; 444 return count;
428} 445}
429 446
@@ -469,20 +486,24 @@ static ssize_t driver_pin_configs_show(struct device *dev,
469 486
470#define MAX_PIN_CONFIGS 32 487#define MAX_PIN_CONFIGS 32
471 488
472static ssize_t user_pin_configs_store(struct device *dev, 489static int parse_user_pin_configs(struct hda_codec *codec, const char *buf)
473 struct device_attribute *attr,
474 const char *buf, size_t count)
475{ 490{
476 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
477 struct hda_codec *codec = hwdep->private_data;
478 int nid, cfg; 491 int nid, cfg;
479 int err;
480 492
481 if (sscanf(buf, "%i %i", &nid, &cfg) != 2) 493 if (sscanf(buf, "%i %i", &nid, &cfg) != 2)
482 return -EINVAL; 494 return -EINVAL;
483 if (!nid) 495 if (!nid)
484 return -EINVAL; 496 return -EINVAL;
485 err = snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg); 497 return snd_hda_add_pincfg(codec, &codec->user_pins, nid, cfg);
498}
499
500static ssize_t user_pin_configs_store(struct device *dev,
501 struct device_attribute *attr,
502 const char *buf, size_t count)
503{
504 struct snd_hwdep *hwdep = dev_get_drvdata(dev);
505 struct hda_codec *codec = hwdep->private_data;
506 int err = parse_user_pin_configs(codec, buf);
486 if (err < 0) 507 if (err < 0)
487 return err; 508 return err;
488 return count; 509 return count;
@@ -553,3 +574,180 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
553EXPORT_SYMBOL_HDA(snd_hda_get_bool_hint); 574EXPORT_SYMBOL_HDA(snd_hda_get_bool_hint);
554 575
555#endif /* CONFIG_SND_HDA_RECONFIG */ 576#endif /* CONFIG_SND_HDA_RECONFIG */
577
578#ifdef CONFIG_SND_HDA_PATCH_LOADER
579
580/* parser mode */
581enum {
582 LINE_MODE_NONE,
583 LINE_MODE_CODEC,
584 LINE_MODE_MODEL,
585 LINE_MODE_PINCFG,
586 LINE_MODE_VERB,
587 LINE_MODE_HINT,
588 NUM_LINE_MODES,
589};
590
591static inline int strmatch(const char *a, const char *b)
592{
593 return strnicmp(a, b, strlen(b)) == 0;
594}
595
596/* parse the contents after the line "[codec]"
597 * accept only the line with three numbers, and assign the current codec
598 */
599static void parse_codec_mode(char *buf, struct hda_bus *bus,
600 struct hda_codec **codecp)
601{
602 unsigned int vendorid, subid, caddr;
603 struct hda_codec *codec;
604
605 *codecp = NULL;
606 if (sscanf(buf, "%i %i %i", &vendorid, &subid, &caddr) == 3) {
607 list_for_each_entry(codec, &bus->codec_list, list) {
608 if (codec->addr == caddr) {
609 *codecp = codec;
610 break;
611 }
612 }
613 }
614}
615
616/* parse the contents after the other command tags, [pincfg], [verb],
617 * [hint] and [model]
618 * just pass to the sysfs helper (only when any codec was specified)
619 */
620static void parse_pincfg_mode(char *buf, struct hda_bus *bus,
621 struct hda_codec **codecp)
622{
623 if (!*codecp)
624 return;
625 parse_user_pin_configs(*codecp, buf);
626}
627
628static void parse_verb_mode(char *buf, struct hda_bus *bus,
629 struct hda_codec **codecp)
630{
631 if (!*codecp)
632 return;
633 parse_init_verbs(*codecp, buf);
634}
635
636static void parse_hint_mode(char *buf, struct hda_bus *bus,
637 struct hda_codec **codecp)
638{
639 if (!*codecp)
640 return;
641 parse_hints(*codecp, buf);
642}
643
644static void parse_model_mode(char *buf, struct hda_bus *bus,
645 struct hda_codec **codecp)
646{
647 if (!*codecp)
648 return;
649 kfree((*codecp)->modelname);
650 (*codecp)->modelname = kstrdup(buf, GFP_KERNEL);
651}
652
653struct hda_patch_item {
654 const char *tag;
655 void (*parser)(char *buf, struct hda_bus *bus, struct hda_codec **retc);
656};
657
658static struct hda_patch_item patch_items[NUM_LINE_MODES] = {
659 [LINE_MODE_CODEC] = { "[codec]", parse_codec_mode },
660 [LINE_MODE_MODEL] = { "[model]", parse_model_mode },
661 [LINE_MODE_VERB] = { "[verb]", parse_verb_mode },
662 [LINE_MODE_PINCFG] = { "[pincfg]", parse_pincfg_mode },
663 [LINE_MODE_HINT] = { "[hint]", parse_hint_mode },
664};
665
666/* check the line starting with '[' -- change the parser mode accodingly */
667static int parse_line_mode(char *buf, struct hda_bus *bus)
668{
669 int i;
670 for (i = 0; i < ARRAY_SIZE(patch_items); i++) {
671 if (!patch_items[i].tag)
672 continue;
673 if (strmatch(buf, patch_items[i].tag))
674 return i;
675 }
676 return LINE_MODE_NONE;
677}
678
679/* copy one line from the buffer in fw, and update the fields in fw
680 * return zero if it reaches to the end of the buffer, or non-zero
681 * if successfully copied a line
682 *
683 * the spaces at the beginning and the end of the line are stripped
684 */
685static int get_line_from_fw(char *buf, int size, struct firmware *fw)
686{
687 int len;
688 const char *p = fw->data;
689 while (isspace(*p) && fw->size) {
690 p++;
691 fw->size--;
692 }
693 if (!fw->size)
694 return 0;
695 if (size < fw->size)
696 size = fw->size;
697
698 for (len = 0; len < fw->size; len++) {
699 if (!*p)
700 break;
701 if (*p == '\n') {
702 p++;
703 len++;
704 break;
705 }
706 if (len < size)
707 *buf++ = *p++;
708 }
709 *buf = 0;
710 fw->size -= len;
711 fw->data = p;
712 remove_trail_spaces(buf);
713 return 1;
714}
715
716/*
717 * load a "patch" firmware file and parse it
718 */
719int snd_hda_load_patch(struct hda_bus *bus, const char *patch)
720{
721 int err;
722 const struct firmware *fw;
723 struct firmware tmp;
724 char buf[128];
725 struct hda_codec *codec;
726 int line_mode;
727 struct device *dev = bus->card->dev;
728
729 if (snd_BUG_ON(!dev))
730 return -ENODEV;
731 err = request_firmware(&fw, patch, dev);
732 if (err < 0) {
733 printk(KERN_ERR "hda-codec: Cannot load the patch '%s'\n",
734 patch);
735 return err;
736 }
737
738 tmp = *fw;
739 line_mode = LINE_MODE_NONE;
740 codec = NULL;
741 while (get_line_from_fw(buf, sizeof(buf) - 1, &tmp)) {
742 if (!*buf || *buf == '#' || *buf == '\n')
743 continue;
744 if (*buf == '[')
745 line_mode = parse_line_mode(buf, bus);
746 else if (patch_items[line_mode].parser)
747 patch_items[line_mode].parser(buf, bus, &codec);
748 }
749 release_firmware(fw);
750 return 0;
751}
752EXPORT_SYMBOL_HDA(snd_hda_load_patch);
753#endif /* CONFIG_SND_HDA_PATCH_LOADER */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 175f07a381ba..20a66f85f0a4 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -61,6 +61,9 @@ static int probe_mask[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] = -1};
61static int probe_only[SNDRV_CARDS]; 61static int probe_only[SNDRV_CARDS];
62static int single_cmd; 62static int single_cmd;
63static int enable_msi; 63static int enable_msi;
64#ifdef CONFIG_SND_HDA_PATCH_LOADER
65static char *patch[SNDRV_CARDS];
66#endif
64 67
65module_param_array(index, int, NULL, 0444); 68module_param_array(index, int, NULL, 0444);
66MODULE_PARM_DESC(index, "Index value for Intel HD audio interface."); 69MODULE_PARM_DESC(index, "Index value for Intel HD audio interface.");
@@ -84,6 +87,10 @@ MODULE_PARM_DESC(single_cmd, "Use single command to communicate with codecs "
84 "(for debugging only)."); 87 "(for debugging only).");
85module_param(enable_msi, int, 0444); 88module_param(enable_msi, int, 0444);
86MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)"); 89MODULE_PARM_DESC(enable_msi, "Enable Message Signaled Interrupt (MSI)");
90#ifdef CONFIG_SND_HDA_PATCH_LOADER
91module_param_array(patch, charp, NULL, 0444);
92MODULE_PARM_DESC(patch, "Patch file for Intel HD audio interface.");
93#endif
87 94
88#ifdef CONFIG_SND_HDA_POWER_SAVE 95#ifdef CONFIG_SND_HDA_POWER_SAVE
89static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; 96static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
@@ -1331,8 +1338,7 @@ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
1331 [AZX_DRIVER_TERA] = 1, 1338 [AZX_DRIVER_TERA] = 1,
1332}; 1339};
1333 1340
1334static int __devinit azx_codec_create(struct azx *chip, const char *model, 1341static int __devinit azx_codec_create(struct azx *chip, const char *model)
1335 int no_init)
1336{ 1342{
1337 struct hda_bus_template bus_temp; 1343 struct hda_bus_template bus_temp;
1338 int c, codecs, err; 1344 int c, codecs, err;
@@ -1391,7 +1397,7 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
1391 for (c = 0; c < max_slots; c++) { 1397 for (c = 0; c < max_slots; c++) {
1392 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) { 1398 if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
1393 struct hda_codec *codec; 1399 struct hda_codec *codec;
1394 err = snd_hda_codec_new(chip->bus, c, !no_init, &codec); 1400 err = snd_hda_codec_new(chip->bus, c, &codec);
1395 if (err < 0) 1401 if (err < 0)
1396 continue; 1402 continue;
1397 codecs++; 1403 codecs++;
@@ -1401,7 +1407,16 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model,
1401 snd_printk(KERN_ERR SFX "no codecs initialized\n"); 1407 snd_printk(KERN_ERR SFX "no codecs initialized\n");
1402 return -ENXIO; 1408 return -ENXIO;
1403 } 1409 }
1410 return 0;
1411}
1404 1412
1413/* configure each codec instance */
1414static int __devinit azx_codec_configure(struct azx *chip)
1415{
1416 struct hda_codec *codec;
1417 list_for_each_entry(codec, &chip->bus->codec_list, list) {
1418 snd_hda_codec_configure(codec);
1419 }
1405 return 0; 1420 return 0;
1406} 1421}
1407 1422
@@ -2284,6 +2299,30 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
2284 } 2299 }
2285} 2300}
2286 2301
2302/*
2303 * white-list for enable_msi
2304 */
2305static struct snd_pci_quirk msi_white_list[] __devinitdata = {
2306 SND_PCI_QUIRK(0x103c, 0x3607, "HP Compa CQ40", 1),
2307 {}
2308};
2309
2310static void __devinit check_msi(struct azx *chip)
2311{
2312 const struct snd_pci_quirk *q;
2313
2314 chip->msi = enable_msi;
2315 if (chip->msi)
2316 return;
2317 q = snd_pci_quirk_lookup(chip->pci, msi_white_list);
2318 if (q) {
2319 printk(KERN_INFO
2320 "hda_intel: msi for device %04x:%04x set to %d\n",
2321 q->subvendor, q->subdevice, q->value);
2322 chip->msi = q->value;
2323 }
2324}
2325
2287 2326
2288/* 2327/*
2289 * constructor 2328 * constructor
@@ -2318,7 +2357,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2318 chip->pci = pci; 2357 chip->pci = pci;
2319 chip->irq = -1; 2358 chip->irq = -1;
2320 chip->driver_type = driver_type; 2359 chip->driver_type = driver_type;
2321 chip->msi = enable_msi; 2360 check_msi(chip);
2322 chip->dev_index = dev; 2361 chip->dev_index = dev;
2323 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); 2362 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
2324 2363
@@ -2526,15 +2565,32 @@ static int __devinit azx_probe(struct pci_dev *pci,
2526 return err; 2565 return err;
2527 } 2566 }
2528 2567
2568 /* set this here since it's referred in snd_hda_load_patch() */
2569 snd_card_set_dev(card, &pci->dev);
2570
2529 err = azx_create(card, pci, dev, pci_id->driver_data, &chip); 2571 err = azx_create(card, pci, dev, pci_id->driver_data, &chip);
2530 if (err < 0) 2572 if (err < 0)
2531 goto out_free; 2573 goto out_free;
2532 card->private_data = chip; 2574 card->private_data = chip;
2533 2575
2534 /* create codec instances */ 2576 /* create codec instances */
2535 err = azx_codec_create(chip, model[dev], probe_only[dev]); 2577 err = azx_codec_create(chip, model[dev]);
2536 if (err < 0) 2578 if (err < 0)
2537 goto out_free; 2579 goto out_free;
2580#ifdef CONFIG_SND_HDA_PATCH_LOADER
2581 if (patch[dev]) {
2582 snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
2583 patch[dev]);
2584 err = snd_hda_load_patch(chip->bus, patch[dev]);
2585 if (err < 0)
2586 goto out_free;
2587 }
2588#endif
2589 if (!probe_only[dev]) {
2590 err = azx_codec_configure(chip);
2591 if (err < 0)
2592 goto out_free;
2593 }
2538 2594
2539 /* create PCM streams */ 2595 /* create PCM streams */
2540 err = snd_hda_build_pcms(chip->bus); 2596 err = snd_hda_build_pcms(chip->bus);
@@ -2546,8 +2602,6 @@ static int __devinit azx_probe(struct pci_dev *pci,
2546 if (err < 0) 2602 if (err < 0)
2547 goto out_free; 2603 goto out_free;
2548 2604
2549 snd_card_set_dev(card, &pci->dev);
2550
2551 err = snd_card_register(card); 2605 err = snd_card_register(card);
2552 if (err < 0) 2606 if (err < 0)
2553 goto out_free; 2607 goto out_free;
@@ -2649,11 +2703,15 @@ static struct pci_device_id azx_ids[] = {
2649 /* this entry seems still valid -- i.e. without emu20kx chip */ 2703 /* this entry seems still valid -- i.e. without emu20kx chip */
2650 { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC }, 2704 { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC },
2651#endif 2705#endif
2652 /* AMD Generic, PCI class code and Vendor ID for HD Audio */ 2706 /* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */
2653 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), 2707 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
2654 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, 2708 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2655 .class_mask = 0xffffff, 2709 .class_mask = 0xffffff,
2656 .driver_data = AZX_DRIVER_GENERIC }, 2710 .driver_data = AZX_DRIVER_GENERIC },
2711 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
2712 .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
2713 .class_mask = 0xffffff,
2714 .driver_data = AZX_DRIVER_GENERIC },
2657 { 0, } 2715 { 0, }
2658}; 2716};
2659MODULE_DEVICE_TABLE(pci, azx_ids); 2717MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 83349013b4df..5f1dcc59002b 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -99,7 +99,6 @@ struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
99int snd_hda_add_vmaster(struct hda_codec *codec, char *name, 99int snd_hda_add_vmaster(struct hda_codec *codec, char *name,
100 unsigned int *tlv, const char **slaves); 100 unsigned int *tlv, const char **slaves);
101int snd_hda_codec_reset(struct hda_codec *codec); 101int snd_hda_codec_reset(struct hda_codec *codec);
102int snd_hda_codec_configure(struct hda_codec *codec);
103 102
104/* amp value bits */ 103/* amp value bits */
105#define HDA_AMP_MUTE 0x80 104#define HDA_AMP_MUTE 0x80
@@ -408,6 +407,19 @@ static inline u32 get_wcaps(struct hda_codec *codec, hda_nid_t nid)
408 return codec->wcaps[nid - codec->start_nid]; 407 return codec->wcaps[nid - codec->start_nid];
409} 408}
410 409
410/* get the widget type from widget capability bits */
411#define get_wcaps_type(wcaps) (((wcaps) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT)
412
413static inline unsigned int get_wcaps_channels(u32 wcaps)
414{
415 unsigned int chans;
416
417 chans = (wcaps & AC_WCAP_CHAN_CNT_EXT) >> 13;
418 chans = ((chans << 1) | 1) + 1;
419
420 return chans;
421}
422
411u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction); 423u32 query_amp_caps(struct hda_codec *codec, hda_nid_t nid, int direction);
412int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir, 424int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
413 unsigned int caps); 425 unsigned int caps);
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 418c5d1badaa..95f24e4729f8 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -508,17 +508,14 @@ static void print_codec_info(struct snd_info_entry *entry,
508 unsigned int wid_caps = 508 unsigned int wid_caps =
509 snd_hda_param_read(codec, nid, 509 snd_hda_param_read(codec, nid,
510 AC_PAR_AUDIO_WIDGET_CAP); 510 AC_PAR_AUDIO_WIDGET_CAP);
511 unsigned int wid_type = 511 unsigned int wid_type = get_wcaps_type(wid_caps);
512 (wid_caps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
513 hda_nid_t conn[HDA_MAX_CONNECTIONS]; 512 hda_nid_t conn[HDA_MAX_CONNECTIONS];
514 int conn_len = 0; 513 int conn_len = 0;
515 514
516 snd_iprintf(buffer, "Node 0x%02x [%s] wcaps 0x%x:", nid, 515 snd_iprintf(buffer, "Node 0x%02x [%s] wcaps 0x%x:", nid,
517 get_wid_type_name(wid_type), wid_caps); 516 get_wid_type_name(wid_type), wid_caps);
518 if (wid_caps & AC_WCAP_STEREO) { 517 if (wid_caps & AC_WCAP_STEREO) {
519 unsigned int chans; 518 unsigned int chans = get_wcaps_channels(wid_caps);
520 chans = (wid_caps & AC_WCAP_CHAN_CNT_EXT) >> 13;
521 chans = ((chans << 1) | 1) + 1;
522 if (chans == 2) 519 if (chans == 2)
523 snd_iprintf(buffer, " Stereo"); 520 snd_iprintf(buffer, " Stereo");
524 else 521 else
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 403588c6e3f6..215e72a87113 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -2982,7 +2982,8 @@ static int patch_ad1988(struct hda_codec *codec)
2982 board_config = snd_hda_check_board_config(codec, AD1988_MODEL_LAST, 2982 board_config = snd_hda_check_board_config(codec, AD1988_MODEL_LAST,
2983 ad1988_models, ad1988_cfg_tbl); 2983 ad1988_models, ad1988_cfg_tbl);
2984 if (board_config < 0) { 2984 if (board_config < 0) {
2985 printk(KERN_INFO "hda_codec: Unknown model for AD1988, trying auto-probe from BIOS...\n"); 2985 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
2986 codec->chip_name);
2986 board_config = AD1988_AUTO; 2987 board_config = AD1988_AUTO;
2987 } 2988 }
2988 2989
@@ -3702,19 +3703,29 @@ static struct hda_amp_list ad1884a_loopbacks[] = {
3702 * Port F: Internal speakers 3703 * Port F: Internal speakers
3703 */ 3704 */
3704 3705
3705static struct hda_input_mux ad1884a_laptop_capture_source = { 3706static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
3706 .num_items = 4, 3707 struct snd_ctl_elem_value *ucontrol)
3707 .items = { 3708{
3708 { "Mic", 0x0 }, /* port-B */ 3709 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
3709 { "Internal Mic", 0x1 }, /* port-C */ 3710 int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
3710 { "Dock Mic", 0x4 }, /* port-E */ 3711 int mute = (!ucontrol->value.integer.value[0] &&
3711 { "Mix", 0x3 }, 3712 !ucontrol->value.integer.value[1]);
3712 }, 3713 /* toggle GPIO1 according to the mute state */
3713}; 3714 snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
3715 mute ? 0x02 : 0x0);
3716 return ret;
3717}
3714 3718
3715static struct snd_kcontrol_new ad1884a_laptop_mixers[] = { 3719static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
3716 HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT), 3720 HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
3717 HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT), 3721 {
3722 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
3723 .name = "Master Playback Switch",
3724 .info = snd_hda_mixer_amp_switch_info,
3725 .get = snd_hda_mixer_amp_switch_get,
3726 .put = ad1884a_mobile_master_sw_put,
3727 .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
3728 },
3718 HDA_CODEC_MUTE("Dock Playback Switch", 0x12, 0x0, HDA_OUTPUT), 3729 HDA_CODEC_MUTE("Dock Playback Switch", 0x12, 0x0, HDA_OUTPUT),
3719 HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT), 3730 HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
3720 HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT), 3731 HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
@@ -3729,36 +3740,9 @@ static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
3729 HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT), 3740 HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT),
3730 HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT), 3741 HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
3731 HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT), 3742 HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
3732 HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
3733 HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
3734 {
3735 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
3736 /* The multiple "Capture Source" controls confuse alsamixer
3737 * So call somewhat different..
3738 */
3739 /* .name = "Capture Source", */
3740 .name = "Input Source",
3741 .count = 2,
3742 .info = ad198x_mux_enum_info,
3743 .get = ad198x_mux_enum_get,
3744 .put = ad198x_mux_enum_put,
3745 },
3746 { } /* end */ 3743 { } /* end */
3747}; 3744};
3748 3745
3749static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
3750 struct snd_ctl_elem_value *ucontrol)
3751{
3752 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
3753 int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
3754 int mute = (!ucontrol->value.integer.value[0] &&
3755 !ucontrol->value.integer.value[1]);
3756 /* toggle GPIO1 according to the mute state */
3757 snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
3758 mute ? 0x02 : 0x0);
3759 return ret;
3760}
3761
3762static struct snd_kcontrol_new ad1884a_mobile_mixers[] = { 3746static struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
3763 HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT), 3747 HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
3764 /*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/ 3748 /*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
@@ -3828,6 +3812,63 @@ static int ad1884a_hp_init(struct hda_codec *codec)
3828 return 0; 3812 return 0;
3829} 3813}
3830 3814
3815/* mute internal speaker if HP or docking HP is plugged */
3816static void ad1884a_laptop_automute(struct hda_codec *codec)
3817{
3818 unsigned int present;
3819
3820 present = snd_hda_codec_read(codec, 0x11, 0, AC_VERB_GET_PIN_SENSE, 0);
3821 present &= AC_PINSENSE_PRESENCE;
3822 if (!present) {
3823 present = snd_hda_codec_read(codec, 0x12, 0,
3824 AC_VERB_GET_PIN_SENSE, 0);
3825 present &= AC_PINSENSE_PRESENCE;
3826 }
3827 snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
3828 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
3829 snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
3830 present ? 0x00 : 0x02);
3831}
3832
3833/* switch to external mic if plugged */
3834static void ad1884a_laptop_automic(struct hda_codec *codec)
3835{
3836 unsigned int idx;
3837
3838 if (snd_hda_codec_read(codec, 0x14, 0, AC_VERB_GET_PIN_SENSE, 0) &
3839 AC_PINSENSE_PRESENCE)
3840 idx = 0;
3841 else if (snd_hda_codec_read(codec, 0x1c, 0, AC_VERB_GET_PIN_SENSE, 0) &
3842 AC_PINSENSE_PRESENCE)
3843 idx = 4;
3844 else
3845 idx = 1;
3846 snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL, idx);
3847}
3848
3849/* unsolicited event for HP jack sensing */
3850static void ad1884a_laptop_unsol_event(struct hda_codec *codec,
3851 unsigned int res)
3852{
3853 switch (res >> 26) {
3854 case AD1884A_HP_EVENT:
3855 ad1884a_laptop_automute(codec);
3856 break;
3857 case AD1884A_MIC_EVENT:
3858 ad1884a_laptop_automic(codec);
3859 break;
3860 }
3861}
3862
3863/* initialize jack-sensing, too */
3864static int ad1884a_laptop_init(struct hda_codec *codec)
3865{
3866 ad198x_init(codec);
3867 ad1884a_laptop_automute(codec);
3868 ad1884a_laptop_automic(codec);
3869 return 0;
3870}
3871
3831/* additional verbs for laptop model */ 3872/* additional verbs for laptop model */
3832static struct hda_verb ad1884a_laptop_verbs[] = { 3873static struct hda_verb ad1884a_laptop_verbs[] = {
3833 /* Port-A (HP) pin - always unmuted */ 3874 /* Port-A (HP) pin - always unmuted */
@@ -3844,11 +3885,19 @@ static struct hda_verb ad1884a_laptop_verbs[] = {
3844 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, 3885 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
3845 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */ 3886 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
3846 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */ 3887 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
3888 /* Port-D (docking line-out) pin - default unmuted */
3889 {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
3847 /* analog mix */ 3890 /* analog mix */
3848 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 3891 {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
3849 /* unsolicited event for pin-sense */ 3892 /* unsolicited event for pin-sense */
3850 {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT}, 3893 {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
3894 {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
3851 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT}, 3895 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
3896 {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
3897 /* allow to touch GPIO1 (for mute control) */
3898 {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
3899 {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
3900 {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
3852 { } /* end */ 3901 { } /* end */
3853}; 3902};
3854 3903
@@ -4008,6 +4057,7 @@ static struct snd_pci_quirk ad1884a_cfg_tbl[] = {
4008 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP), 4057 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP),
4009 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP), 4058 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP),
4010 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP), 4059 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
4060 SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
4011 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD), 4061 SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
4012 {} 4062 {}
4013}; 4063};
@@ -4057,9 +4107,8 @@ static int patch_ad1884a(struct hda_codec *codec)
4057 spec->mixers[0] = ad1884a_laptop_mixers; 4107 spec->mixers[0] = ad1884a_laptop_mixers;
4058 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs; 4108 spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
4059 spec->multiout.dig_out_nid = 0; 4109 spec->multiout.dig_out_nid = 0;
4060 spec->input_mux = &ad1884a_laptop_capture_source; 4110 codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
4061 codec->patch_ops.unsol_event = ad1884a_hp_unsol_event; 4111 codec->patch_ops.init = ad1884a_laptop_init;
4062 codec->patch_ops.init = ad1884a_hp_init;
4063 /* set the upper-limit for mixer amp to 0dB for avoiding the 4112 /* set the upper-limit for mixer amp to 0dB for avoiding the
4064 * possible damage by overloading 4113 * possible damage by overloading
4065 */ 4114 */
diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
index 233e4778bba9..fb684f00156b 100644
--- a/sound/pci/hda/patch_atihdmi.c
+++ b/sound/pci/hda/patch_atihdmi.c
@@ -141,8 +141,7 @@ static int atihdmi_build_pcms(struct hda_codec *codec)
141 /* FIXME: we must check ELD and change the PCM parameters dynamically 141 /* FIXME: we must check ELD and change the PCM parameters dynamically
142 */ 142 */
143 chans = get_wcaps(codec, CVT_NID); 143 chans = get_wcaps(codec, CVT_NID);
144 chans = (chans & AC_WCAP_CHAN_CNT_EXT) >> 13; 144 chans = get_wcaps_channels(chans);
145 chans = ((chans << 1) | 1) + 1;
146 info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = chans; 145 info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = chans;
147 146
148 return 0; 147 return 0;
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index 019ca7cb56d7..d08353d3bb7f 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -459,8 +459,7 @@ static void parse_input(struct hda_codec *codec)
459 nid = codec->start_nid; 459 nid = codec->start_nid;
460 for (i = 0; i < codec->num_nodes; i++, nid++) { 460 for (i = 0; i < codec->num_nodes; i++, nid++) {
461 unsigned int wcaps = get_wcaps(codec, nid); 461 unsigned int wcaps = get_wcaps(codec, nid);
462 unsigned int type = (wcaps & AC_WCAP_TYPE) >> 462 unsigned int type = get_wcaps_type(wcaps);
463 AC_WCAP_TYPE_SHIFT;
464 if (type != AC_WID_AUD_IN) 463 if (type != AC_WID_AUD_IN)
465 continue; 464 continue;
466 if (snd_hda_get_connections(codec, nid, &pin, 1) != 1) 465 if (snd_hda_get_connections(codec, nid, &pin, 1) != 1)
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
new file mode 100644
index 000000000000..8ba306856d38
--- /dev/null
+++ b/sound/pci/hda/patch_cirrus.c
@@ -0,0 +1,1194 @@
1/*
2 * HD audio interface patch for Cirrus Logic CS420x chip
3 *
4 * Copyright (c) 2009 Takashi Iwai <tiwai@suse.de>
5 *
6 * This driver is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This driver is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/pci.h>
25#include <sound/core.h>
26#include "hda_codec.h"
27#include "hda_local.h"
28
29/*
30 */
31
32struct cs_spec {
33 int board_config;
34 struct auto_pin_cfg autocfg;
35 struct hda_multi_out multiout;
36 struct snd_kcontrol *vmaster_sw;
37 struct snd_kcontrol *vmaster_vol;
38
39 hda_nid_t dac_nid[AUTO_CFG_MAX_OUTS];
40 hda_nid_t slave_dig_outs[2];
41
42 unsigned int input_idx[AUTO_PIN_LAST];
43 unsigned int capsrc_idx[AUTO_PIN_LAST];
44 hda_nid_t adc_nid[AUTO_PIN_LAST];
45 unsigned int adc_idx[AUTO_PIN_LAST];
46 unsigned int num_inputs;
47 unsigned int cur_input;
48 unsigned int automic_idx;
49 hda_nid_t cur_adc;
50 unsigned int cur_adc_stream_tag;
51 unsigned int cur_adc_format;
52 hda_nid_t dig_in;
53
54 struct hda_bind_ctls *capture_bind[2];
55
56 unsigned int gpio_mask;
57 unsigned int gpio_dir;
58 unsigned int gpio_data;
59
60 struct hda_pcm pcm_rec[2]; /* PCM information */
61
62 unsigned int hp_detect:1;
63 unsigned int mic_detect:1;
64};
65
66/* available models */
67enum {
68 CS420X_MBP55,
69 CS420X_AUTO,
70 CS420X_MODELS
71};
72
73/* Vendor-specific processing widget */
74#define CS420X_VENDOR_NID 0x11
75#define CS_DIG_OUT1_PIN_NID 0x10
76#define CS_DIG_OUT2_PIN_NID 0x15
77#define CS_DMIC1_PIN_NID 0x12
78#define CS_DMIC2_PIN_NID 0x0e
79
80/* coef indices */
81#define IDX_SPDIF_STAT 0x0000
82#define IDX_SPDIF_CTL 0x0001
83#define IDX_ADC_CFG 0x0002
84/* SZC bitmask, 4 modes below:
85 * 0 = immediate,
86 * 1 = digital immediate, analog zero-cross
87 * 2 = digtail & analog soft-ramp
88 * 3 = digital soft-ramp, analog zero-cross
89 */
90#define CS_COEF_ADC_SZC_MASK (3 << 0)
91#define CS_COEF_ADC_MIC_SZC_MODE (3 << 0) /* SZC setup for mic */
92#define CS_COEF_ADC_LI_SZC_MODE (3 << 0) /* SZC setup for line-in */
93/* PGA mode: 0 = differential, 1 = signle-ended */
94#define CS_COEF_ADC_MIC_PGA_MODE (1 << 5) /* PGA setup for mic */
95#define CS_COEF_ADC_LI_PGA_MODE (1 << 6) /* PGA setup for line-in */
96#define IDX_DAC_CFG 0x0003
97/* SZC bitmask, 4 modes below:
98 * 0 = Immediate
99 * 1 = zero-cross
100 * 2 = soft-ramp
101 * 3 = soft-ramp on zero-cross
102 */
103#define CS_COEF_DAC_HP_SZC_MODE (3 << 0) /* nid 0x02 */
104#define CS_COEF_DAC_LO_SZC_MODE (3 << 2) /* nid 0x03 */
105#define CS_COEF_DAC_SPK_SZC_MODE (3 << 4) /* nid 0x04 */
106
107#define IDX_BEEP_CFG 0x0004
108/* 0x0008 - test reg key */
109/* 0x0009 - 0x0014 -> 12 test regs */
110/* 0x0015 - visibility reg */
111
112
113static inline int cs_vendor_coef_get(struct hda_codec *codec, unsigned int idx)
114{
115 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
116 AC_VERB_SET_COEF_INDEX, idx);
117 return snd_hda_codec_read(codec, CS420X_VENDOR_NID, 0,
118 AC_VERB_GET_PROC_COEF, 0);
119}
120
121static inline void cs_vendor_coef_set(struct hda_codec *codec, unsigned int idx,
122 unsigned int coef)
123{
124 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
125 AC_VERB_SET_COEF_INDEX, idx);
126 snd_hda_codec_write(codec, CS420X_VENDOR_NID, 0,
127 AC_VERB_SET_PROC_COEF, coef);
128}
129
130
131#define HP_EVENT 1
132#define MIC_EVENT 2
133
134/*
135 * PCM callbacks
136 */
137static int cs_playback_pcm_open(struct hda_pcm_stream *hinfo,
138 struct hda_codec *codec,
139 struct snd_pcm_substream *substream)
140{
141 struct cs_spec *spec = codec->spec;
142 return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
143 hinfo);
144}
145
146static int cs_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
147 struct hda_codec *codec,
148 unsigned int stream_tag,
149 unsigned int format,
150 struct snd_pcm_substream *substream)
151{
152 struct cs_spec *spec = codec->spec;
153 return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
154 stream_tag, format, substream);
155}
156
157static int cs_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
158 struct hda_codec *codec,
159 struct snd_pcm_substream *substream)
160{
161 struct cs_spec *spec = codec->spec;
162 return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
163}
164
165/*
166 * Digital out
167 */
168static int cs_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
169 struct hda_codec *codec,
170 struct snd_pcm_substream *substream)
171{
172 struct cs_spec *spec = codec->spec;
173 return snd_hda_multi_out_dig_open(codec, &spec->multiout);
174}
175
176static int cs_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
177 struct hda_codec *codec,
178 struct snd_pcm_substream *substream)
179{
180 struct cs_spec *spec = codec->spec;
181 return snd_hda_multi_out_dig_close(codec, &spec->multiout);
182}
183
184static int cs_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
185 struct hda_codec *codec,
186 unsigned int stream_tag,
187 unsigned int format,
188 struct snd_pcm_substream *substream)
189{
190 struct cs_spec *spec = codec->spec;
191 return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
192 format, substream);
193}
194
195static int cs_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
196 struct hda_codec *codec,
197 struct snd_pcm_substream *substream)
198{
199 struct cs_spec *spec = codec->spec;
200 return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
201}
202
203/*
204 * Analog capture
205 */
206static int cs_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
207 struct hda_codec *codec,
208 unsigned int stream_tag,
209 unsigned int format,
210 struct snd_pcm_substream *substream)
211{
212 struct cs_spec *spec = codec->spec;
213 spec->cur_adc = spec->adc_nid[spec->cur_input];
214 spec->cur_adc_stream_tag = stream_tag;
215 spec->cur_adc_format = format;
216 snd_hda_codec_setup_stream(codec, spec->cur_adc, stream_tag, 0, format);
217 return 0;
218}
219
220static int cs_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
221 struct hda_codec *codec,
222 struct snd_pcm_substream *substream)
223{
224 struct cs_spec *spec = codec->spec;
225 snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
226 spec->cur_adc = 0;
227 return 0;
228}
229
230/*
231 */
232static struct hda_pcm_stream cs_pcm_analog_playback = {
233 .substreams = 1,
234 .channels_min = 2,
235 .channels_max = 2,
236 .ops = {
237 .open = cs_playback_pcm_open,
238 .prepare = cs_playback_pcm_prepare,
239 .cleanup = cs_playback_pcm_cleanup
240 },
241};
242
243static struct hda_pcm_stream cs_pcm_analog_capture = {
244 .substreams = 1,
245 .channels_min = 2,
246 .channels_max = 2,
247 .ops = {
248 .prepare = cs_capture_pcm_prepare,
249 .cleanup = cs_capture_pcm_cleanup
250 },
251};
252
253static struct hda_pcm_stream cs_pcm_digital_playback = {
254 .substreams = 1,
255 .channels_min = 2,
256 .channels_max = 2,
257 .ops = {
258 .open = cs_dig_playback_pcm_open,
259 .close = cs_dig_playback_pcm_close,
260 .prepare = cs_dig_playback_pcm_prepare,
261 .cleanup = cs_dig_playback_pcm_cleanup
262 },
263};
264
265static struct hda_pcm_stream cs_pcm_digital_capture = {
266 .substreams = 1,
267 .channels_min = 2,
268 .channels_max = 2,
269};
270
271static int cs_build_pcms(struct hda_codec *codec)
272{
273 struct cs_spec *spec = codec->spec;
274 struct hda_pcm *info = spec->pcm_rec;
275
276 codec->pcm_info = info;
277 codec->num_pcms = 0;
278
279 info->name = "Cirrus Analog";
280 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = cs_pcm_analog_playback;
281 info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dac_nid[0];
282 info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max =
283 spec->multiout.max_channels;
284 info->stream[SNDRV_PCM_STREAM_CAPTURE] = cs_pcm_analog_capture;
285 info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
286 spec->adc_nid[spec->cur_input];
287 codec->num_pcms++;
288
289 if (!spec->multiout.dig_out_nid && !spec->dig_in)
290 return 0;
291
292 info++;
293 info->name = "Cirrus Digital";
294 info->pcm_type = spec->autocfg.dig_out_type[0];
295 if (!info->pcm_type)
296 info->pcm_type = HDA_PCM_TYPE_SPDIF;
297 if (spec->multiout.dig_out_nid) {
298 info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
299 cs_pcm_digital_playback;
300 info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid =
301 spec->multiout.dig_out_nid;
302 }
303 if (spec->dig_in) {
304 info->stream[SNDRV_PCM_STREAM_CAPTURE] =
305 cs_pcm_digital_capture;
306 info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
307 }
308 codec->num_pcms++;
309
310 return 0;
311}
312
313/*
314 * parse codec topology
315 */
316
317static hda_nid_t get_dac(struct hda_codec *codec, hda_nid_t pin)
318{
319 hda_nid_t dac;
320 if (!pin)
321 return 0;
322 if (snd_hda_get_connections(codec, pin, &dac, 1) != 1)
323 return 0;
324 return dac;
325}
326
327static int is_ext_mic(struct hda_codec *codec, unsigned int idx)
328{
329 struct cs_spec *spec = codec->spec;
330 struct auto_pin_cfg *cfg = &spec->autocfg;
331 hda_nid_t pin = cfg->input_pins[idx];
332 unsigned int val = snd_hda_query_pin_caps(codec, pin);
333 if (!(val & AC_PINCAP_PRES_DETECT))
334 return 0;
335 val = snd_hda_codec_get_pincfg(codec, pin);
336 return (get_defcfg_connect(val) == AC_JACK_PORT_COMPLEX);
337}
338
339static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin,
340 unsigned int *idxp)
341{
342 int i;
343 hda_nid_t nid;
344
345 nid = codec->start_nid;
346 for (i = 0; i < codec->num_nodes; i++, nid++) {
347 hda_nid_t pins[2];
348 unsigned int type;
349 int j, nums;
350 type = (get_wcaps(codec, nid) & AC_WCAP_TYPE)
351 >> AC_WCAP_TYPE_SHIFT;
352 if (type != AC_WID_AUD_IN)
353 continue;
354 nums = snd_hda_get_connections(codec, nid, pins,
355 ARRAY_SIZE(pins));
356 if (nums <= 0)
357 continue;
358 for (j = 0; j < nums; j++) {
359 if (pins[j] == pin) {
360 *idxp = j;
361 return nid;
362 }
363 }
364 }
365 return 0;
366}
367
368static int is_active_pin(struct hda_codec *codec, hda_nid_t nid)
369{
370 unsigned int val;
371 val = snd_hda_codec_get_pincfg(codec, nid);
372 return (get_defcfg_connect(val) != AC_JACK_PORT_NONE);
373}
374
375static int parse_output(struct hda_codec *codec)
376{
377 struct cs_spec *spec = codec->spec;
378 struct auto_pin_cfg *cfg = &spec->autocfg;
379 int i, extra_nids;
380 hda_nid_t dac;
381
382 for (i = 0; i < cfg->line_outs; i++) {
383 dac = get_dac(codec, cfg->line_out_pins[i]);
384 if (!dac)
385 break;
386 spec->dac_nid[i] = dac;
387 }
388 spec->multiout.num_dacs = i;
389 spec->multiout.dac_nids = spec->dac_nid;
390 spec->multiout.max_channels = i * 2;
391
392 /* add HP and speakers */
393 extra_nids = 0;
394 for (i = 0; i < cfg->hp_outs; i++) {
395 dac = get_dac(codec, cfg->hp_pins[i]);
396 if (!dac)
397 break;
398 if (!i)
399 spec->multiout.hp_nid = dac;
400 else
401 spec->multiout.extra_out_nid[extra_nids++] = dac;
402 }
403 for (i = 0; i < cfg->speaker_outs; i++) {
404 dac = get_dac(codec, cfg->speaker_pins[i]);
405 if (!dac)
406 break;
407 spec->multiout.extra_out_nid[extra_nids++] = dac;
408 }
409
410 if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
411 cfg->speaker_outs = cfg->line_outs;
412 memcpy(cfg->speaker_pins, cfg->line_out_pins,
413 sizeof(cfg->speaker_pins));
414 cfg->line_outs = 0;
415 }
416
417 return 0;
418}
419
420static int parse_input(struct hda_codec *codec)
421{
422 struct cs_spec *spec = codec->spec;
423 struct auto_pin_cfg *cfg = &spec->autocfg;
424 int i;
425
426 for (i = 0; i < AUTO_PIN_LAST; i++) {
427 hda_nid_t pin = cfg->input_pins[i];
428 if (!pin)
429 continue;
430 spec->input_idx[spec->num_inputs] = i;
431 spec->capsrc_idx[i] = spec->num_inputs++;
432 spec->cur_input = i;
433 spec->adc_nid[i] = get_adc(codec, pin, &spec->adc_idx[i]);
434 }
435 if (!spec->num_inputs)
436 return 0;
437
438 /* check whether the automatic mic switch is available */
439 if (spec->num_inputs == 2 &&
440 spec->adc_nid[AUTO_PIN_MIC] && spec->adc_nid[AUTO_PIN_FRONT_MIC]) {
441 if (is_ext_mic(codec, cfg->input_pins[AUTO_PIN_FRONT_MIC])) {
442 if (!is_ext_mic(codec, cfg->input_pins[AUTO_PIN_MIC])) {
443 spec->mic_detect = 1;
444 spec->automic_idx = AUTO_PIN_FRONT_MIC;
445 }
446 } else {
447 if (is_ext_mic(codec, cfg->input_pins[AUTO_PIN_MIC])) {
448 spec->mic_detect = 1;
449 spec->automic_idx = AUTO_PIN_MIC;
450 }
451 }
452 }
453 return 0;
454}
455
456
457static int parse_digital_output(struct hda_codec *codec)
458{
459 struct cs_spec *spec = codec->spec;
460 struct auto_pin_cfg *cfg = &spec->autocfg;
461 hda_nid_t nid;
462
463 if (!cfg->dig_outs)
464 return 0;
465 if (snd_hda_get_connections(codec, cfg->dig_out_pins[0], &nid, 1) < 1)
466 return 0;
467 spec->multiout.dig_out_nid = nid;
468 spec->multiout.share_spdif = 1;
469 if (cfg->dig_outs > 1 &&
470 snd_hda_get_connections(codec, cfg->dig_out_pins[1], &nid, 1) > 0) {
471 spec->slave_dig_outs[0] = nid;
472 codec->slave_dig_outs = spec->slave_dig_outs;
473 }
474 return 0;
475}
476
477static int parse_digital_input(struct hda_codec *codec)
478{
479 struct cs_spec *spec = codec->spec;
480 struct auto_pin_cfg *cfg = &spec->autocfg;
481 int idx;
482
483 if (cfg->dig_in_pin)
484 spec->dig_in = get_adc(codec, cfg->dig_in_pin, &idx);
485 return 0;
486}
487
488/*
489 * create mixer controls
490 */
491
492static const char *dir_sfx[2] = { "Playback", "Capture" };
493
494static int add_mute(struct hda_codec *codec, const char *name, int index,
495 unsigned int pval, int dir, struct snd_kcontrol **kctlp)
496{
497 char tmp[44];
498 struct snd_kcontrol_new knew =
499 HDA_CODEC_MUTE_IDX(tmp, index, 0, 0, HDA_OUTPUT);
500 knew.private_value = pval;
501 snprintf(tmp, sizeof(tmp), "%s %s Switch", name, dir_sfx[dir]);
502 *kctlp = snd_ctl_new1(&knew, codec);
503 return snd_hda_ctl_add(codec, *kctlp);
504}
505
506static int add_volume(struct hda_codec *codec, const char *name,
507 int index, unsigned int pval, int dir,
508 struct snd_kcontrol **kctlp)
509{
510 char tmp[32];
511 struct snd_kcontrol_new knew =
512 HDA_CODEC_VOLUME_IDX(tmp, index, 0, 0, HDA_OUTPUT);
513 knew.private_value = pval;
514 snprintf(tmp, sizeof(tmp), "%s %s Volume", name, dir_sfx[dir]);
515 *kctlp = snd_ctl_new1(&knew, codec);
516 return snd_hda_ctl_add(codec, *kctlp);
517}
518
519static void fix_volume_caps(struct hda_codec *codec, hda_nid_t dac)
520{
521 unsigned int caps;
522
523 /* set the upper-limit for mixer amp to 0dB */
524 caps = query_amp_caps(codec, dac, HDA_OUTPUT);
525 caps &= ~(0x7f << AC_AMPCAP_NUM_STEPS_SHIFT);
526 caps |= ((caps >> AC_AMPCAP_OFFSET_SHIFT) & 0x7f)
527 << AC_AMPCAP_NUM_STEPS_SHIFT;
528 snd_hda_override_amp_caps(codec, dac, HDA_OUTPUT, caps);
529}
530
531static int add_vmaster(struct hda_codec *codec, hda_nid_t dac)
532{
533 struct cs_spec *spec = codec->spec;
534 unsigned int tlv[4];
535 int err;
536
537 spec->vmaster_sw =
538 snd_ctl_make_virtual_master("Master Playback Switch", NULL);
539 err = snd_hda_ctl_add(codec, spec->vmaster_sw);
540 if (err < 0)
541 return err;
542
543 snd_hda_set_vmaster_tlv(codec, dac, HDA_OUTPUT, tlv);
544 spec->vmaster_vol =
545 snd_ctl_make_virtual_master("Master Playback Volume", tlv);
546 err = snd_hda_ctl_add(codec, spec->vmaster_vol);
547 if (err < 0)
548 return err;
549 return 0;
550}
551
552static int add_output(struct hda_codec *codec, hda_nid_t dac, int idx,
553 int num_ctls, int type)
554{
555 struct cs_spec *spec = codec->spec;
556 const char *name;
557 int err, index;
558 struct snd_kcontrol *kctl;
559 static char *speakers[] = {
560 "Front Speaker", "Surround Speaker", "Bass Speaker"
561 };
562 static char *line_outs[] = {
563 "Front Line-Out", "Surround Line-Out", "Bass Line-Out"
564 };
565
566 fix_volume_caps(codec, dac);
567 if (!spec->vmaster_sw) {
568 err = add_vmaster(codec, dac);
569 if (err < 0)
570 return err;
571 }
572
573 index = 0;
574 switch (type) {
575 case AUTO_PIN_HP_OUT:
576 name = "Headphone";
577 index = idx;
578 break;
579 case AUTO_PIN_SPEAKER_OUT:
580 if (num_ctls > 1)
581 name = speakers[idx];
582 else
583 name = "Speaker";
584 break;
585 default:
586 if (num_ctls > 1)
587 name = line_outs[idx];
588 else
589 name = "Line-Out";
590 break;
591 }
592
593 err = add_mute(codec, name, index,
594 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
595 if (err < 0)
596 return err;
597 err = snd_ctl_add_slave(spec->vmaster_sw, kctl);
598 if (err < 0)
599 return err;
600
601 err = add_volume(codec, name, index,
602 HDA_COMPOSE_AMP_VAL(dac, 3, 0, HDA_OUTPUT), 0, &kctl);
603 if (err < 0)
604 return err;
605 err = snd_ctl_add_slave(spec->vmaster_vol, kctl);
606 if (err < 0)
607 return err;
608
609 return 0;
610}
611
612static int build_output(struct hda_codec *codec)
613{
614 struct cs_spec *spec = codec->spec;
615 struct auto_pin_cfg *cfg = &spec->autocfg;
616 int i, err;
617
618 for (i = 0; i < cfg->line_outs; i++) {
619 err = add_output(codec, get_dac(codec, cfg->line_out_pins[i]),
620 i, cfg->line_outs, cfg->line_out_type);
621 if (err < 0)
622 return err;
623 }
624 for (i = 0; i < cfg->hp_outs; i++) {
625 err = add_output(codec, get_dac(codec, cfg->hp_pins[i]),
626 i, cfg->hp_outs, AUTO_PIN_HP_OUT);
627 if (err < 0)
628 return err;
629 }
630 for (i = 0; i < cfg->speaker_outs; i++) {
631 err = add_output(codec, get_dac(codec, cfg->speaker_pins[i]),
632 i, cfg->speaker_outs, AUTO_PIN_SPEAKER_OUT);
633 if (err < 0)
634 return err;
635 }
636 return 0;
637}
638
639/*
640 */
641
642static struct snd_kcontrol_new cs_capture_ctls[] = {
643 HDA_BIND_SW("Capture Switch", 0),
644 HDA_BIND_VOL("Capture Volume", 0),
645};
646
647static int change_cur_input(struct hda_codec *codec, unsigned int idx,
648 int force)
649{
650 struct cs_spec *spec = codec->spec;
651
652 if (spec->cur_input == idx && !force)
653 return 0;
654 if (spec->cur_adc && spec->cur_adc != spec->adc_nid[idx]) {
655 /* stream is running, let's swap the current ADC */
656 snd_hda_codec_cleanup_stream(codec, spec->cur_adc);
657 spec->cur_adc = spec->adc_nid[idx];
658 snd_hda_codec_setup_stream(codec, spec->cur_adc,
659 spec->cur_adc_stream_tag, 0,
660 spec->cur_adc_format);
661 }
662 snd_hda_codec_write(codec, spec->cur_adc, 0,
663 AC_VERB_SET_CONNECT_SEL,
664 spec->adc_idx[idx]);
665 spec->cur_input = idx;
666 return 1;
667}
668
669static int cs_capture_source_info(struct snd_kcontrol *kcontrol,
670 struct snd_ctl_elem_info *uinfo)
671{
672 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
673 struct cs_spec *spec = codec->spec;
674 unsigned int idx;
675
676 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
677 uinfo->count = 1;
678 uinfo->value.enumerated.items = spec->num_inputs;
679 if (uinfo->value.enumerated.item >= spec->num_inputs)
680 uinfo->value.enumerated.item = spec->num_inputs - 1;
681 idx = spec->input_idx[uinfo->value.enumerated.item];
682 strcpy(uinfo->value.enumerated.name, auto_pin_cfg_labels[idx]);
683 return 0;
684}
685
686static int cs_capture_source_get(struct snd_kcontrol *kcontrol,
687 struct snd_ctl_elem_value *ucontrol)
688{
689 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
690 struct cs_spec *spec = codec->spec;
691 ucontrol->value.enumerated.item[0] = spec->capsrc_idx[spec->cur_input];
692 return 0;
693}
694
695static int cs_capture_source_put(struct snd_kcontrol *kcontrol,
696 struct snd_ctl_elem_value *ucontrol)
697{
698 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
699 struct cs_spec *spec = codec->spec;
700 unsigned int idx = ucontrol->value.enumerated.item[0];
701
702 if (idx >= spec->num_inputs)
703 return -EINVAL;
704 idx = spec->input_idx[idx];
705 return change_cur_input(codec, idx, 0);
706}
707
708static struct snd_kcontrol_new cs_capture_source = {
709 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
710 .name = "Capture Source",
711 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
712 .info = cs_capture_source_info,
713 .get = cs_capture_source_get,
714 .put = cs_capture_source_put,
715};
716
717static struct hda_bind_ctls *make_bind_capture(struct hda_codec *codec,
718 struct hda_ctl_ops *ops)
719{
720 struct cs_spec *spec = codec->spec;
721 struct hda_bind_ctls *bind;
722 int i, n;
723
724 bind = kzalloc(sizeof(*bind) + sizeof(long) * (spec->num_inputs + 1),
725 GFP_KERNEL);
726 if (!bind)
727 return NULL;
728 bind->ops = ops;
729 n = 0;
730 for (i = 0; i < AUTO_PIN_LAST; i++) {
731 if (!spec->adc_nid[i])
732 continue;
733 bind->values[n++] =
734 HDA_COMPOSE_AMP_VAL(spec->adc_nid[i], 3,
735 spec->adc_idx[i], HDA_INPUT);
736 }
737 return bind;
738}
739
740static int build_input(struct hda_codec *codec)
741{
742 struct cs_spec *spec = codec->spec;
743 int i, err;
744
745 if (!spec->num_inputs)
746 return 0;
747
748 /* make bind-capture */
749 spec->capture_bind[0] = make_bind_capture(codec, &snd_hda_bind_sw);
750 spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
751 for (i = 0; i < 2; i++) {
752 struct snd_kcontrol *kctl;
753 if (!spec->capture_bind[i])
754 return -ENOMEM;
755 kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
756 if (!kctl)
757 return -ENOMEM;
758 kctl->private_value = (long)spec->capture_bind[i];
759 err = snd_hda_ctl_add(codec, kctl);
760 if (err < 0)
761 return err;
762 }
763
764 if (spec->num_inputs > 1 && !spec->mic_detect) {
765 err = snd_hda_ctl_add(codec,
766 snd_ctl_new1(&cs_capture_source, codec));
767 if (err < 0)
768 return err;
769 }
770
771 return 0;
772}
773
774/*
775 */
776
777static int build_digital_output(struct hda_codec *codec)
778{
779 struct cs_spec *spec = codec->spec;
780 int err;
781
782 if (!spec->multiout.dig_out_nid)
783 return 0;
784
785 err = snd_hda_create_spdif_out_ctls(codec, spec->multiout.dig_out_nid);
786 if (err < 0)
787 return err;
788 err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
789 if (err < 0)
790 return err;
791 return 0;
792}
793
794static int build_digital_input(struct hda_codec *codec)
795{
796 struct cs_spec *spec = codec->spec;
797 if (spec->dig_in)
798 return snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
799 return 0;
800}
801
802/*
803 * auto-mute and auto-mic switching
804 */
805
806static void cs_automute(struct hda_codec *codec)
807{
808 struct cs_spec *spec = codec->spec;
809 struct auto_pin_cfg *cfg = &spec->autocfg;
810 unsigned int caps, present, hp_present;
811 hda_nid_t nid;
812 int i;
813
814 hp_present = 0;
815 for (i = 0; i < cfg->hp_outs; i++) {
816 nid = cfg->hp_pins[i];
817 caps = snd_hda_query_pin_caps(codec, nid);
818 if (!(caps & AC_PINCAP_PRES_DETECT))
819 continue;
820 if (caps & AC_PINCAP_TRIG_REQ)
821 snd_hda_codec_read(codec, nid, 0,
822 AC_VERB_SET_PIN_SENSE, 0);
823 present = snd_hda_codec_read(codec, nid, 0,
824 AC_VERB_GET_PIN_SENSE, 0);
825 hp_present |= (present & AC_PINSENSE_PRESENCE) != 0;
826 if (hp_present)
827 break;
828 }
829 for (i = 0; i < cfg->speaker_outs; i++) {
830 nid = cfg->speaker_pins[i];
831 snd_hda_codec_write(codec, nid, 0,
832 AC_VERB_SET_PIN_WIDGET_CONTROL,
833 hp_present ? 0 : PIN_OUT);
834 }
835 if (spec->board_config == CS420X_MBP55) {
836 unsigned int gpio = hp_present ? 0x02 : 0x08;
837 snd_hda_codec_write(codec, 0x01, 0,
838 AC_VERB_SET_GPIO_DATA, gpio);
839 }
840}
841
842static void cs_automic(struct hda_codec *codec)
843{
844 struct cs_spec *spec = codec->spec;
845 struct auto_pin_cfg *cfg = &spec->autocfg;
846 hda_nid_t nid;
847 unsigned int caps, present;
848
849 nid = cfg->input_pins[spec->automic_idx];
850 caps = snd_hda_query_pin_caps(codec, nid);
851 if (caps & AC_PINCAP_TRIG_REQ)
852 snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
853 present = snd_hda_codec_read(codec, nid, 0,
854 AC_VERB_GET_PIN_SENSE, 0);
855 if (present & AC_PINSENSE_PRESENCE)
856 change_cur_input(codec, spec->automic_idx, 0);
857 else {
858 unsigned int imic = (spec->automic_idx == AUTO_PIN_MIC) ?
859 AUTO_PIN_FRONT_MIC : AUTO_PIN_MIC;
860 change_cur_input(codec, imic, 0);
861 }
862}
863
864/*
865 */
866
867static void init_output(struct hda_codec *codec)
868{
869 struct cs_spec *spec = codec->spec;
870 struct auto_pin_cfg *cfg = &spec->autocfg;
871 int i;
872
873 /* mute first */
874 for (i = 0; i < spec->multiout.num_dacs; i++)
875 snd_hda_codec_write(codec, spec->multiout.dac_nids[i], 0,
876 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
877 if (spec->multiout.hp_nid)
878 snd_hda_codec_write(codec, spec->multiout.hp_nid, 0,
879 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
880 for (i = 0; i < ARRAY_SIZE(spec->multiout.extra_out_nid); i++) {
881 if (!spec->multiout.extra_out_nid[i])
882 break;
883 snd_hda_codec_write(codec, spec->multiout.extra_out_nid[i], 0,
884 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
885 }
886
887 /* set appropriate pin controls */
888 for (i = 0; i < cfg->line_outs; i++)
889 snd_hda_codec_write(codec, cfg->line_out_pins[i], 0,
890 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
891 for (i = 0; i < cfg->hp_outs; i++) {
892 hda_nid_t nid = cfg->hp_pins[i];
893 snd_hda_codec_write(codec, nid, 0,
894 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP);
895 if (!cfg->speaker_outs)
896 continue;
897 if (get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP) {
898 snd_hda_codec_write(codec, nid, 0,
899 AC_VERB_SET_UNSOLICITED_ENABLE,
900 AC_USRSP_EN | HP_EVENT);
901 spec->hp_detect = 1;
902 }
903 }
904 for (i = 0; i < cfg->speaker_outs; i++)
905 snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
906 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
907 if (spec->hp_detect)
908 cs_automute(codec);
909}
910
911static void init_input(struct hda_codec *codec)
912{
913 struct cs_spec *spec = codec->spec;
914 struct auto_pin_cfg *cfg = &spec->autocfg;
915 unsigned int coef;
916 int i;
917
918 for (i = 0; i < AUTO_PIN_LAST; i++) {
919 unsigned int ctl;
920 hda_nid_t pin = cfg->input_pins[i];
921 if (!pin || !spec->adc_nid[i])
922 continue;
923 /* set appropriate pin control and mute first */
924 ctl = PIN_IN;
925 if (i <= AUTO_PIN_FRONT_MIC) {
926 unsigned int caps = snd_hda_query_pin_caps(codec, pin);
927 caps >>= AC_PINCAP_VREF_SHIFT;
928 if (caps & AC_PINCAP_VREF_80)
929 ctl = PIN_VREF80;
930 }
931 snd_hda_codec_write(codec, pin, 0,
932 AC_VERB_SET_PIN_WIDGET_CONTROL, ctl);
933 snd_hda_codec_write(codec, spec->adc_nid[i], 0,
934 AC_VERB_SET_AMP_GAIN_MUTE,
935 AMP_IN_MUTE(spec->adc_idx[i]));
936 if (spec->mic_detect && spec->automic_idx == i)
937 snd_hda_codec_write(codec, pin, 0,
938 AC_VERB_SET_UNSOLICITED_ENABLE,
939 AC_USRSP_EN | MIC_EVENT);
940 }
941 change_cur_input(codec, spec->cur_input, 1);
942 if (spec->mic_detect)
943 cs_automic(codec);
944
945 coef = 0x000a; /* ADC1/2 - Digital and Analog Soft Ramp */
946 if (is_active_pin(codec, CS_DMIC2_PIN_NID))
947 coef |= 0x0500; /* DMIC2 enable 2 channels, disable GPIO1 */
948 if (is_active_pin(codec, CS_DMIC1_PIN_NID))
949 coef |= 0x1800; /* DMIC1 enable 2 channels, disable GPIO0
950 * No effect if SPDIF_OUT2 is slected in
951 * IDX_SPDIF_CTL.
952 */
953 cs_vendor_coef_set(codec, IDX_ADC_CFG, coef);
954}
955
956static struct hda_verb cs_coef_init_verbs[] = {
957 {0x11, AC_VERB_SET_PROC_STATE, 1},
958 {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
959 {0x11, AC_VERB_SET_PROC_COEF,
960 (0x002a /* DAC1/2/3 SZCMode Soft Ramp */
961 | 0x0040 /* Mute DACs on FIFO error */
962 | 0x1000 /* Enable DACs High Pass Filter */
963 | 0x0400 /* Disable Coefficient Auto increment */
964 )},
965 /* Beep */
966 {0x11, AC_VERB_SET_COEF_INDEX, IDX_DAC_CFG},
967 {0x11, AC_VERB_SET_PROC_COEF, 0x0007}, /* Enable Beep thru DAC1/2/3 */
968
969 {} /* terminator */
970};
971
972/* SPDIF setup */
973static void init_digital(struct hda_codec *codec)
974{
975 unsigned int coef;
976
977 coef = 0x0002; /* SRC_MUTE soft-mute on SPDIF (if no lock) */
978 coef |= 0x0008; /* Replace with mute on error */
979 if (is_active_pin(codec, CS_DIG_OUT2_PIN_NID))
980 coef |= 0x4000; /* RX to TX1 or TX2 Loopthru / SPDIF2
981 * SPDIF_OUT2 is shared with GPIO1 and
982 * DMIC_SDA2.
983 */
984 cs_vendor_coef_set(codec, IDX_SPDIF_CTL, coef);
985}
986
987static int cs_init(struct hda_codec *codec)
988{
989 struct cs_spec *spec = codec->spec;
990
991 snd_hda_sequence_write(codec, cs_coef_init_verbs);
992
993 if (spec->gpio_mask) {
994 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK,
995 spec->gpio_mask);
996 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION,
997 spec->gpio_dir);
998 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
999 spec->gpio_data);
1000 }
1001
1002 init_output(codec);
1003 init_input(codec);
1004 init_digital(codec);
1005 return 0;
1006}
1007
1008static int cs_build_controls(struct hda_codec *codec)
1009{
1010 int err;
1011
1012 err = build_output(codec);
1013 if (err < 0)
1014 return err;
1015 err = build_input(codec);
1016 if (err < 0)
1017 return err;
1018 err = build_digital_output(codec);
1019 if (err < 0)
1020 return err;
1021 err = build_digital_input(codec);
1022 if (err < 0)
1023 return err;
1024 return cs_init(codec);
1025}
1026
1027static void cs_free(struct hda_codec *codec)
1028{
1029 struct cs_spec *spec = codec->spec;
1030 kfree(spec->capture_bind[0]);
1031 kfree(spec->capture_bind[1]);
1032 kfree(codec->spec);
1033}
1034
1035static void cs_unsol_event(struct hda_codec *codec, unsigned int res)
1036{
1037 switch ((res >> 26) & 0x7f) {
1038 case HP_EVENT:
1039 cs_automute(codec);
1040 break;
1041 case MIC_EVENT:
1042 cs_automic(codec);
1043 break;
1044 }
1045}
1046
1047static struct hda_codec_ops cs_patch_ops = {
1048 .build_controls = cs_build_controls,
1049 .build_pcms = cs_build_pcms,
1050 .init = cs_init,
1051 .free = cs_free,
1052 .unsol_event = cs_unsol_event,
1053};
1054
1055static int cs_parse_auto_config(struct hda_codec *codec)
1056{
1057 struct cs_spec *spec = codec->spec;
1058 int err;
1059
1060 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL);
1061 if (err < 0)
1062 return err;
1063
1064 err = parse_output(codec);
1065 if (err < 0)
1066 return err;
1067 err = parse_input(codec);
1068 if (err < 0)
1069 return err;
1070 err = parse_digital_output(codec);
1071 if (err < 0)
1072 return err;
1073 err = parse_digital_input(codec);
1074 if (err < 0)
1075 return err;
1076 return 0;
1077}
1078
1079static const char *cs420x_models[CS420X_MODELS] = {
1080 [CS420X_MBP55] = "mbp55",
1081 [CS420X_AUTO] = "auto",
1082};
1083
1084
1085static struct snd_pci_quirk cs420x_cfg_tbl[] = {
1086 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
1087 {} /* terminator */
1088};
1089
1090struct cs_pincfg {
1091 hda_nid_t nid;
1092 u32 val;
1093};
1094
1095static struct cs_pincfg mbp55_pincfgs[] = {
1096 { 0x09, 0x012b4030 },
1097 { 0x0a, 0x90100121 },
1098 { 0x0b, 0x90100120 },
1099 { 0x0c, 0x400000f0 },
1100 { 0x0d, 0x90a00110 },
1101 { 0x0e, 0x400000f0 },
1102 { 0x0f, 0x400000f0 },
1103 { 0x10, 0x014be040 },
1104 { 0x12, 0x400000f0 },
1105 { 0x15, 0x400000f0 },
1106 {} /* terminator */
1107};
1108
1109static struct cs_pincfg *cs_pincfgs[CS420X_MODELS] = {
1110 [CS420X_MBP55] = mbp55_pincfgs,
1111};
1112
1113static void fix_pincfg(struct hda_codec *codec, int model)
1114{
1115 const struct cs_pincfg *cfg = cs_pincfgs[model];
1116 if (!cfg)
1117 return;
1118 for (; cfg->nid; cfg++)
1119 snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
1120}
1121
1122
1123static int patch_cs420x(struct hda_codec *codec)
1124{
1125 struct cs_spec *spec;
1126 int err;
1127
1128 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1129 if (!spec)
1130 return -ENOMEM;
1131 codec->spec = spec;
1132
1133 spec->board_config =
1134 snd_hda_check_board_config(codec, CS420X_MODELS,
1135 cs420x_models, cs420x_cfg_tbl);
1136 if (spec->board_config >= 0)
1137 fix_pincfg(codec, spec->board_config);
1138
1139 switch (spec->board_config) {
1140 case CS420X_MBP55:
1141 /* GPIO1 = headphones */
1142 /* GPIO3 = speakers */
1143 spec->gpio_mask = 0x0a;
1144 spec->gpio_dir = 0x0a;
1145 break;
1146 }
1147
1148 err = cs_parse_auto_config(codec);
1149 if (err < 0)
1150 goto error;
1151
1152 codec->patch_ops = cs_patch_ops;
1153
1154 return 0;
1155
1156 error:
1157 kfree(codec->spec);
1158 codec->spec = NULL;
1159 return err;
1160}
1161
1162
1163/*
1164 * patch entries
1165 */
1166static struct hda_codec_preset snd_hda_preset_cirrus[] = {
1167 { .id = 0x10134206, .name = "CS4206", .patch = patch_cs420x },
1168 { .id = 0x10134207, .name = "CS4207", .patch = patch_cs420x },
1169 {} /* terminator */
1170};
1171
1172MODULE_ALIAS("snd-hda-codec-id:10134206");
1173MODULE_ALIAS("snd-hda-codec-id:10134207");
1174
1175MODULE_LICENSE("GPL");
1176MODULE_DESCRIPTION("Cirrus Logic HD-audio codec");
1177
1178static struct hda_codec_preset_list cirrus_list = {
1179 .preset = snd_hda_preset_cirrus,
1180 .owner = THIS_MODULE,
1181};
1182
1183static int __init patch_cirrus_init(void)
1184{
1185 return snd_hda_add_codec_preset(&cirrus_list);
1186}
1187
1188static void __exit patch_cirrus_exit(void)
1189{
1190 snd_hda_delete_codec_preset(&cirrus_list);
1191}
1192
1193module_init(patch_cirrus_init)
1194module_exit(patch_cirrus_exit)
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index c921264bbd71..780e1a72114a 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -635,7 +635,8 @@ static int patch_cmi9880(struct hda_codec *codec)
635 cmi9880_models, 635 cmi9880_models,
636 cmi9880_cfg_tbl); 636 cmi9880_cfg_tbl);
637 if (spec->board_config < 0) { 637 if (spec->board_config < 0) {
638 snd_printdd(KERN_INFO "hda_codec: Unknown model for CMI9880\n"); 638 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
639 codec->chip_name);
639 spec->board_config = CMI_AUTO; /* try everything */ 640 spec->board_config = CMI_AUTO; /* try everything */
640 } 641 }
641 642
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index ac868c59f9e3..9d899eda44d7 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -108,6 +108,8 @@ struct conexant_spec {
108 struct hda_input_mux private_imux; 108 struct hda_input_mux private_imux;
109 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS]; 109 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
110 110
111 unsigned int dell_automute;
112 unsigned int port_d_mode;
111}; 113};
112 114
113static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo, 115static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo,
@@ -1908,6 +1910,480 @@ static int patch_cxt5051(struct hda_codec *codec)
1908 return 0; 1910 return 0;
1909} 1911}
1910 1912
1913/* Conexant 5066 specific */
1914
1915static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
1916static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
1917static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
1918#define CXT5066_SPDIF_OUT 0x21
1919
1920static struct hda_channel_mode cxt5066_modes[1] = {
1921 { 2, NULL },
1922};
1923
1924static void cxt5066_update_speaker(struct hda_codec *codec)
1925{
1926 struct conexant_spec *spec = codec->spec;
1927 unsigned int pinctl;
1928
1929 snd_printdd("CXT5066: update speaker, hp_present=%d\n",
1930 spec->hp_present);
1931
1932 /* Port A (HP) */
1933 pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0;
1934 snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
1935 pinctl);
1936
1937 /* Port D (HP/LO) */
1938 pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
1939 ? spec->port_d_mode : 0;
1940 snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
1941 pinctl);
1942
1943 /* CLASS_D AMP */
1944 pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0;
1945 snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
1946 pinctl);
1947
1948 if (spec->dell_automute) {
1949 /* DELL AIO Port Rule: PortA > PortD > IntSpk */
1950 pinctl = (!(spec->hp_present & 1) && spec->cur_eapd)
1951 ? PIN_OUT : 0;
1952 snd_hda_codec_write(codec, 0x1c, 0,
1953 AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl);
1954 }
1955}
1956
1957/* turn on/off EAPD (+ mute HP) as a master switch */
1958static int cxt5066_hp_master_sw_put(struct snd_kcontrol *kcontrol,
1959 struct snd_ctl_elem_value *ucontrol)
1960{
1961 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
1962
1963 if (!cxt_eapd_put(kcontrol, ucontrol))
1964 return 0;
1965
1966 cxt5066_update_speaker(codec);
1967 return 1;
1968}
1969
1970/* toggle input of built-in and mic jack appropriately */
1971static void cxt5066_automic(struct hda_codec *codec)
1972{
1973 static struct hda_verb ext_mic_present[] = {
1974 /* enable external mic, port B */
1975 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1976
1977 /* switch to external mic input */
1978 {0x17, AC_VERB_SET_CONNECT_SEL, 0},
1979
1980 /* disable internal mic, port C */
1981 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
1982 {}
1983 };
1984 static struct hda_verb ext_mic_absent[] = {
1985 /* enable internal mic, port C */
1986 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1987
1988 /* switch to internal mic input */
1989 {0x17, AC_VERB_SET_CONNECT_SEL, 1},
1990
1991 /* disable external mic, port B */
1992 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
1993 {}
1994 };
1995 unsigned int present;
1996
1997 present = snd_hda_codec_read(codec, 0x1a, 0,
1998 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
1999 if (present) {
2000 snd_printdd("CXT5066: external microphone detected\n");
2001 snd_hda_sequence_write(codec, ext_mic_present);
2002 } else {
2003 snd_printdd("CXT5066: external microphone absent\n");
2004 snd_hda_sequence_write(codec, ext_mic_absent);
2005 }
2006}
2007
2008/* mute internal speaker if HP is plugged */
2009static void cxt5066_hp_automute(struct hda_codec *codec)
2010{
2011 struct conexant_spec *spec = codec->spec;
2012 unsigned int portA, portD;
2013
2014 /* Port A */
2015 portA = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0)
2016 & AC_PINSENSE_PRESENCE;
2017
2018 /* Port D */
2019 portD = (snd_hda_codec_read(codec, 0x1c, 0, AC_VERB_GET_PIN_SENSE, 0)
2020 & AC_PINSENSE_PRESENCE) << 1;
2021
2022 spec->hp_present = !!(portA | portD);
2023 snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
2024 portA, portD, spec->hp_present);
2025 cxt5066_update_speaker(codec);
2026}
2027
2028/* unsolicited event for jack sensing */
2029static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
2030{
2031 snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
2032 switch (res >> 26) {
2033 case CONEXANT_HP_EVENT:
2034 cxt5066_hp_automute(codec);
2035 break;
2036 case CONEXANT_MIC_EVENT:
2037 cxt5066_automic(codec);
2038 break;
2039 }
2040}
2041
2042static const struct hda_input_mux cxt5066_analog_mic_boost = {
2043 .num_items = 5,
2044 .items = {
2045 { "0dB", 0 },
2046 { "10dB", 1 },
2047 { "20dB", 2 },
2048 { "30dB", 3 },
2049 { "40dB", 4 },
2050 },
2051};
2052
2053static int cxt5066_mic_boost_mux_enum_info(struct snd_kcontrol *kcontrol,
2054 struct snd_ctl_elem_info *uinfo)
2055{
2056 return snd_hda_input_mux_info(&cxt5066_analog_mic_boost, uinfo);
2057}
2058
2059static int cxt5066_mic_boost_mux_enum_get(struct snd_kcontrol *kcontrol,
2060 struct snd_ctl_elem_value *ucontrol)
2061{
2062 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2063 int val;
2064
2065 val = snd_hda_codec_read(codec, 0x17, 0,
2066 AC_VERB_GET_AMP_GAIN_MUTE, AC_AMP_GET_OUTPUT);
2067
2068 ucontrol->value.enumerated.item[0] = val & AC_AMP_GAIN;
2069 return 0;
2070}
2071
2072static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
2073 struct snd_ctl_elem_value *ucontrol)
2074{
2075 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
2076 const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
2077 unsigned int idx;
2078
2079 if (!imux->num_items)
2080 return 0;
2081 idx = ucontrol->value.enumerated.item[0];
2082 if (idx >= imux->num_items)
2083 idx = imux->num_items - 1;
2084
2085 snd_hda_codec_write_cache(codec, 0x17, 0,
2086 AC_VERB_SET_AMP_GAIN_MUTE,
2087 AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
2088 imux->items[idx].index);
2089
2090 return 1;
2091}
2092
2093static struct hda_input_mux cxt5066_capture_source = {
2094 .num_items = 4,
2095 .items = {
2096 { "Mic B", 0 },
2097 { "Mic C", 1 },
2098 { "Mic E", 2 },
2099 { "Mic F", 3 },
2100 },
2101};
2102
2103static struct hda_bind_ctls cxt5066_bind_capture_vol_others = {
2104 .ops = &snd_hda_bind_vol,
2105 .values = {
2106 HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
2107 HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
2108 0
2109 },
2110};
2111
2112static struct hda_bind_ctls cxt5066_bind_capture_sw_others = {
2113 .ops = &snd_hda_bind_sw,
2114 .values = {
2115 HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_INPUT),
2116 HDA_COMPOSE_AMP_VAL(0x14, 3, 2, HDA_INPUT),
2117 0
2118 },
2119};
2120
2121static struct snd_kcontrol_new cxt5066_mixer_master[] = {
2122 HDA_CODEC_VOLUME("Master Playback Volume", 0x10, 0x00, HDA_OUTPUT),
2123 {}
2124};
2125
2126static struct snd_kcontrol_new cxt5066_mixer_master_olpc[] = {
2127 {
2128 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2129 .name = "Master Playback Volume",
2130 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
2131 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2132 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2133 .info = snd_hda_mixer_amp_volume_info,
2134 .get = snd_hda_mixer_amp_volume_get,
2135 .put = snd_hda_mixer_amp_volume_put,
2136 .tlv = { .c = snd_hda_mixer_amp_tlv },
2137 /* offset by 28 volume steps to limit minimum gain to -46dB */
2138 .private_value =
2139 HDA_COMPOSE_AMP_VAL_OFS(0x10, 3, 0, HDA_OUTPUT, 28),
2140 },
2141 {}
2142};
2143
2144static struct snd_kcontrol_new cxt5066_mixers[] = {
2145 {
2146 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2147 .name = "Master Playback Switch",
2148 .info = cxt_eapd_info,
2149 .get = cxt_eapd_get,
2150 .put = cxt5066_hp_master_sw_put,
2151 .private_value = 0x1d,
2152 },
2153
2154 {
2155 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2156 .name = "Analog Mic Boost Capture Enum",
2157 .info = cxt5066_mic_boost_mux_enum_info,
2158 .get = cxt5066_mic_boost_mux_enum_get,
2159 .put = cxt5066_mic_boost_mux_enum_put,
2160 },
2161
2162 HDA_BIND_VOL("Capture Volume", &cxt5066_bind_capture_vol_others),
2163 HDA_BIND_SW("Capture Switch", &cxt5066_bind_capture_sw_others),
2164 {}
2165};
2166
2167static struct hda_verb cxt5066_init_verbs[] = {
2168 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port B */
2169 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80}, /* Port C */
2170 {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port F */
2171 {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* Port E */
2172
2173 /* Speakers */
2174 {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2175 {0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
2176
2177 /* HP, Amp */
2178 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
2179 {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
2180
2181 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
2182 {0x1c, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
2183
2184 /* DAC1 */
2185 {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2186
2187 /* Node 14 connections: 0x17 0x18 0x23 0x24 0x27 */
2188 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
2189 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
2190 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2) | 0x50},
2191 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
2192 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
2193
2194 /* no digital microphone support yet */
2195 {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2196
2197 /* Audio input selector */
2198 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
2199
2200 /* SPDIF route: PCM */
2201 {0x20, AC_VERB_SET_CONNECT_SEL, 0x0},
2202 {0x22, AC_VERB_SET_CONNECT_SEL, 0x0},
2203
2204 {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2205 {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2206
2207 /* EAPD */
2208 {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
2209
2210 /* not handling these yet */
2211 {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2212 {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2213 {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2214 {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2215 {0x1d, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2216 {0x1e, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2217 {0x20, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2218 {0x22, AC_VERB_SET_UNSOLICITED_ENABLE, 0},
2219 { } /* end */
2220};
2221
2222static struct hda_verb cxt5066_init_verbs_olpc[] = {
2223 /* Port A: headphones */
2224 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
2225 {0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
2226
2227 /* Port B: external microphone */
2228 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
2229
2230 /* Port C: internal microphone */
2231 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
2232
2233 /* Port D: unused */
2234 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2235
2236 /* Port E: unused, but has primary EAPD */
2237 {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2238 {0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
2239
2240 /* Port F: unused */
2241 {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2242
2243 /* Port G: internal speakers */
2244 {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2245 {0x1f, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
2246
2247 /* DAC1 */
2248 {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2249
2250 /* DAC2: unused */
2251 {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
2252
2253 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x50},
2254 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
2255 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
2256 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
2257 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
2258 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
2259 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
2260 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
2261 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
2262 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
2263 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
2264 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
2265
2266 /* Disable digital microphone port */
2267 {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2268
2269 /* Audio input selectors */
2270 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x3},
2271 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
2272
2273 /* Disable SPDIF */
2274 {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2275 {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
2276
2277 /* enable unsolicited events for Port A and B */
2278 {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT},
2279 {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT},
2280 { } /* end */
2281};
2282
2283static struct hda_verb cxt5066_init_verbs_portd_lo[] = {
2284 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2285 { } /* end */
2286};
2287
2288/* initialize jack-sensing, too */
2289static int cxt5066_init(struct hda_codec *codec)
2290{
2291 snd_printdd("CXT5066: init\n");
2292 conexant_init(codec);
2293 if (codec->patch_ops.unsol_event) {
2294 cxt5066_hp_automute(codec);
2295 cxt5066_automic(codec);
2296 }
2297 return 0;
2298}
2299
2300enum {
2301 CXT5066_LAPTOP, /* Laptops w/ EAPD support */
2302 CXT5066_DELL_LAPTOP, /* Dell Laptop */
2303 CXT5066_OLPC_XO_1_5, /* OLPC XO 1.5 */
2304 CXT5066_MODELS
2305};
2306
2307static const char *cxt5066_models[CXT5066_MODELS] = {
2308 [CXT5066_LAPTOP] = "laptop",
2309 [CXT5066_DELL_LAPTOP] = "dell-laptop",
2310 [CXT5066_OLPC_XO_1_5] = "olpc-xo-1_5",
2311};
2312
2313static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
2314 SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board",
2315 CXT5066_LAPTOP),
2316 SND_PCI_QUIRK(0x1028, 0x02f5, "Dell",
2317 CXT5066_DELL_LAPTOP),
2318 {}
2319};
2320
2321static int patch_cxt5066(struct hda_codec *codec)
2322{
2323 struct conexant_spec *spec;
2324 int board_config;
2325
2326 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
2327 if (!spec)
2328 return -ENOMEM;
2329 codec->spec = spec;
2330
2331 codec->patch_ops = conexant_patch_ops;
2332 codec->patch_ops.init = cxt5066_init;
2333
2334 spec->dell_automute = 0;
2335 spec->multiout.max_channels = 2;
2336 spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
2337 spec->multiout.dac_nids = cxt5066_dac_nids;
2338 spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
2339 spec->num_adc_nids = 1;
2340 spec->adc_nids = cxt5066_adc_nids;
2341 spec->capsrc_nids = cxt5066_capsrc_nids;
2342 spec->input_mux = &cxt5066_capture_source;
2343
2344 spec->port_d_mode = PIN_HP;
2345
2346 spec->num_init_verbs = 1;
2347 spec->init_verbs[0] = cxt5066_init_verbs;
2348 spec->num_channel_mode = ARRAY_SIZE(cxt5066_modes);
2349 spec->channel_mode = cxt5066_modes;
2350 spec->cur_adc = 0;
2351 spec->cur_adc_idx = 0;
2352
2353 board_config = snd_hda_check_board_config(codec, CXT5066_MODELS,
2354 cxt5066_models, cxt5066_cfg_tbl);
2355 switch (board_config) {
2356 default:
2357 case CXT5066_LAPTOP:
2358 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
2359 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
2360 break;
2361 case CXT5066_DELL_LAPTOP:
2362 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
2363 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
2364
2365 spec->port_d_mode = PIN_OUT;
2366 spec->init_verbs[spec->num_init_verbs] = cxt5066_init_verbs_portd_lo;
2367 spec->num_init_verbs++;
2368 spec->dell_automute = 1;
2369 break;
2370 case CXT5066_OLPC_XO_1_5:
2371 codec->patch_ops.unsol_event = cxt5066_unsol_event;
2372 spec->init_verbs[0] = cxt5066_init_verbs_olpc;
2373 spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
2374 spec->mixers[spec->num_mixers++] = cxt5066_mixers;
2375 spec->port_d_mode = 0;
2376
2377 /* no S/PDIF out */
2378 spec->multiout.dig_out_nid = 0;
2379
2380 /* input source automatically selected */
2381 spec->input_mux = NULL;
2382 break;
2383 }
2384
2385 return 0;
2386}
1911 2387
1912/* 2388/*
1913 */ 2389 */
@@ -1919,12 +2395,15 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
1919 .patch = patch_cxt5047 }, 2395 .patch = patch_cxt5047 },
1920 { .id = 0x14f15051, .name = "CX20561 (Hermosa)", 2396 { .id = 0x14f15051, .name = "CX20561 (Hermosa)",
1921 .patch = patch_cxt5051 }, 2397 .patch = patch_cxt5051 },
2398 { .id = 0x14f15066, .name = "CX20582 (Pebble)",
2399 .patch = patch_cxt5066 },
1922 {} /* terminator */ 2400 {} /* terminator */
1923}; 2401};
1924 2402
1925MODULE_ALIAS("snd-hda-codec-id:14f15045"); 2403MODULE_ALIAS("snd-hda-codec-id:14f15045");
1926MODULE_ALIAS("snd-hda-codec-id:14f15047"); 2404MODULE_ALIAS("snd-hda-codec-id:14f15047");
1927MODULE_ALIAS("snd-hda-codec-id:14f15051"); 2405MODULE_ALIAS("snd-hda-codec-id:14f15051");
2406MODULE_ALIAS("snd-hda-codec-id:14f15066");
1928 2407
1929MODULE_LICENSE("GPL"); 2408MODULE_LICENSE("GPL");
1930MODULE_DESCRIPTION("Conexant HD-audio codec"); 2409MODULE_DESCRIPTION("Conexant HD-audio codec");
diff --git a/sound/pci/hda/patch_intelhdmi.c b/sound/pci/hda/patch_intelhdmi.c
index fcc77fec4487..01a18ed475ac 100644
--- a/sound/pci/hda/patch_intelhdmi.c
+++ b/sound/pci/hda/patch_intelhdmi.c
@@ -33,8 +33,8 @@
33#include "hda_codec.h" 33#include "hda_codec.h"
34#include "hda_local.h" 34#include "hda_local.h"
35 35
36#define CVT_NID 0x02 /* audio converter */ 36static hda_nid_t cvt_nid; /* audio converter */
37#define PIN_NID 0x03 /* HDMI output pin */ 37static hda_nid_t pin_nid; /* HDMI output pin */
38 38
39#define INTEL_HDMI_EVENT_TAG 0x08 39#define INTEL_HDMI_EVENT_TAG 0x08
40 40
@@ -44,30 +44,6 @@ struct intel_hdmi_spec {
44 struct hdmi_eld sink_eld; 44 struct hdmi_eld sink_eld;
45}; 45};
46 46
47static struct hda_verb pinout_enable_verb[] = {
48 {PIN_NID, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
49 {} /* terminator */
50};
51
52static struct hda_verb unsolicited_response_verb[] = {
53 {PIN_NID, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN |
54 INTEL_HDMI_EVENT_TAG},
55 {}
56};
57
58static struct hda_verb def_chan_map[] = {
59 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x00},
60 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x11},
61 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x22},
62 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x33},
63 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x44},
64 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x55},
65 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x66},
66 {CVT_NID, AC_VERB_SET_HDMI_CHAN_SLOT, 0x77},
67 {}
68};
69
70
71struct hdmi_audio_infoframe { 47struct hdmi_audio_infoframe {
72 u8 type; /* 0x84 */ 48 u8 type; /* 0x84 */
73 u8 ver; /* 0x01 */ 49 u8 ver; /* 0x01 */
@@ -244,11 +220,12 @@ static void hdmi_write_dip_byte(struct hda_codec *codec, hda_nid_t nid,
244static void hdmi_enable_output(struct hda_codec *codec) 220static void hdmi_enable_output(struct hda_codec *codec)
245{ 221{
246 /* Unmute */ 222 /* Unmute */
247 if (get_wcaps(codec, PIN_NID) & AC_WCAP_OUT_AMP) 223 if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP)
248 snd_hda_codec_write(codec, PIN_NID, 0, 224 snd_hda_codec_write(codec, pin_nid, 0,
249 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); 225 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
250 /* Enable pin out */ 226 /* Enable pin out */
251 snd_hda_sequence_write(codec, pinout_enable_verb); 227 snd_hda_codec_write(codec, pin_nid, 0,
228 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
252} 229}
253 230
254/* 231/*
@@ -256,8 +233,8 @@ static void hdmi_enable_output(struct hda_codec *codec)
256 */ 233 */
257static void hdmi_start_infoframe_trans(struct hda_codec *codec) 234static void hdmi_start_infoframe_trans(struct hda_codec *codec)
258{ 235{
259 hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0); 236 hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
260 snd_hda_codec_write(codec, PIN_NID, 0, AC_VERB_SET_HDMI_DIP_XMIT, 237 snd_hda_codec_write(codec, pin_nid, 0, AC_VERB_SET_HDMI_DIP_XMIT,
261 AC_DIPXMIT_BEST); 238 AC_DIPXMIT_BEST);
262} 239}
263 240
@@ -266,20 +243,20 @@ static void hdmi_start_infoframe_trans(struct hda_codec *codec)
266 */ 243 */
267static void hdmi_stop_infoframe_trans(struct hda_codec *codec) 244static void hdmi_stop_infoframe_trans(struct hda_codec *codec)
268{ 245{
269 hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0); 246 hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
270 snd_hda_codec_write(codec, PIN_NID, 0, AC_VERB_SET_HDMI_DIP_XMIT, 247 snd_hda_codec_write(codec, pin_nid, 0, AC_VERB_SET_HDMI_DIP_XMIT,
271 AC_DIPXMIT_DISABLE); 248 AC_DIPXMIT_DISABLE);
272} 249}
273 250
274static int hdmi_get_channel_count(struct hda_codec *codec) 251static int hdmi_get_channel_count(struct hda_codec *codec)
275{ 252{
276 return 1 + snd_hda_codec_read(codec, CVT_NID, 0, 253 return 1 + snd_hda_codec_read(codec, cvt_nid, 0,
277 AC_VERB_GET_CVT_CHAN_COUNT, 0); 254 AC_VERB_GET_CVT_CHAN_COUNT, 0);
278} 255}
279 256
280static void hdmi_set_channel_count(struct hda_codec *codec, int chs) 257static void hdmi_set_channel_count(struct hda_codec *codec, int chs)
281{ 258{
282 snd_hda_codec_write(codec, CVT_NID, 0, 259 snd_hda_codec_write(codec, cvt_nid, 0,
283 AC_VERB_SET_CVT_CHAN_COUNT, chs - 1); 260 AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
284 261
285 if (chs != hdmi_get_channel_count(codec)) 262 if (chs != hdmi_get_channel_count(codec))
@@ -294,7 +271,7 @@ static void hdmi_debug_channel_mapping(struct hda_codec *codec)
294 int slot; 271 int slot;
295 272
296 for (i = 0; i < 8; i++) { 273 for (i = 0; i < 8; i++) {
297 slot = snd_hda_codec_read(codec, CVT_NID, 0, 274 slot = snd_hda_codec_read(codec, cvt_nid, 0,
298 AC_VERB_GET_HDMI_CHAN_SLOT, i); 275 AC_VERB_GET_HDMI_CHAN_SLOT, i);
299 printk(KERN_DEBUG "HDMI: ASP channel %d => slot %d\n", 276 printk(KERN_DEBUG "HDMI: ASP channel %d => slot %d\n",
300 slot >> 4, slot & 0x7); 277 slot >> 4, slot & 0x7);
@@ -307,7 +284,7 @@ static void hdmi_parse_eld(struct hda_codec *codec)
307 struct intel_hdmi_spec *spec = codec->spec; 284 struct intel_hdmi_spec *spec = codec->spec;
308 struct hdmi_eld *eld = &spec->sink_eld; 285 struct hdmi_eld *eld = &spec->sink_eld;
309 286
310 if (!snd_hdmi_get_eld(eld, codec, PIN_NID)) 287 if (!snd_hdmi_get_eld(eld, codec, pin_nid))
311 snd_hdmi_show_eld(eld); 288 snd_hdmi_show_eld(eld);
312} 289}
313 290
@@ -322,11 +299,11 @@ static void hdmi_debug_dip_size(struct hda_codec *codec)
322 int i; 299 int i;
323 int size; 300 int size;
324 301
325 size = snd_hdmi_get_eld_size(codec, PIN_NID); 302 size = snd_hdmi_get_eld_size(codec, pin_nid);
326 printk(KERN_DEBUG "HDMI: ELD buf size is %d\n", size); 303 printk(KERN_DEBUG "HDMI: ELD buf size is %d\n", size);
327 304
328 for (i = 0; i < 8; i++) { 305 for (i = 0; i < 8; i++) {
329 size = snd_hda_codec_read(codec, PIN_NID, 0, 306 size = snd_hda_codec_read(codec, pin_nid, 0,
330 AC_VERB_GET_HDMI_DIP_SIZE, i); 307 AC_VERB_GET_HDMI_DIP_SIZE, i);
331 printk(KERN_DEBUG "HDMI: DIP GP[%d] buf size is %d\n", i, size); 308 printk(KERN_DEBUG "HDMI: DIP GP[%d] buf size is %d\n", i, size);
332 } 309 }
@@ -340,15 +317,15 @@ static void hdmi_clear_dip_buffers(struct hda_codec *codec)
340 int size; 317 int size;
341 int pi, bi; 318 int pi, bi;
342 for (i = 0; i < 8; i++) { 319 for (i = 0; i < 8; i++) {
343 size = snd_hda_codec_read(codec, PIN_NID, 0, 320 size = snd_hda_codec_read(codec, pin_nid, 0,
344 AC_VERB_GET_HDMI_DIP_SIZE, i); 321 AC_VERB_GET_HDMI_DIP_SIZE, i);
345 if (size == 0) 322 if (size == 0)
346 continue; 323 continue;
347 324
348 hdmi_set_dip_index(codec, PIN_NID, i, 0x0); 325 hdmi_set_dip_index(codec, pin_nid, i, 0x0);
349 for (j = 1; j < 1000; j++) { 326 for (j = 1; j < 1000; j++) {
350 hdmi_write_dip_byte(codec, PIN_NID, 0x0); 327 hdmi_write_dip_byte(codec, pin_nid, 0x0);
351 hdmi_get_dip_index(codec, PIN_NID, &pi, &bi); 328 hdmi_get_dip_index(codec, pin_nid, &pi, &bi);
352 if (pi != i) 329 if (pi != i)
353 snd_printd(KERN_INFO "dip index %d: %d != %d\n", 330 snd_printd(KERN_INFO "dip index %d: %d != %d\n",
354 bi, pi, i); 331 bi, pi, i);
@@ -376,9 +353,9 @@ static void hdmi_fill_audio_infoframe(struct hda_codec *codec,
376 sum += params[i]; 353 sum += params[i];
377 ai->checksum = - sum; 354 ai->checksum = - sum;
378 355
379 hdmi_set_dip_index(codec, PIN_NID, 0x0, 0x0); 356 hdmi_set_dip_index(codec, pin_nid, 0x0, 0x0);
380 for (i = 0; i < sizeof(ai); i++) 357 for (i = 0; i < sizeof(ai); i++)
381 hdmi_write_dip_byte(codec, PIN_NID, params[i]); 358 hdmi_write_dip_byte(codec, pin_nid, params[i]);
382} 359}
383 360
384/* 361/*
@@ -465,6 +442,8 @@ static int hdmi_setup_channel_allocation(struct hda_codec *codec,
465static void hdmi_setup_channel_mapping(struct hda_codec *codec, 442static void hdmi_setup_channel_mapping(struct hda_codec *codec,
466 struct hdmi_audio_infoframe *ai) 443 struct hdmi_audio_infoframe *ai)
467{ 444{
445 int i;
446
468 if (!ai->CA) 447 if (!ai->CA)
469 return; 448 return;
470 449
@@ -473,7 +452,11 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
473 * ALSA sequence is front/surr/clfe/side? 452 * ALSA sequence is front/surr/clfe/side?
474 */ 453 */
475 454
476 snd_hda_sequence_write(codec, def_chan_map); 455 for (i = 0; i < 8; i++)
456 snd_hda_codec_write(codec, cvt_nid, 0,
457 AC_VERB_SET_HDMI_CHAN_SLOT,
458 (i << 4) | i);
459
477 hdmi_debug_channel_mapping(codec); 460 hdmi_debug_channel_mapping(codec);
478} 461}
479 462
@@ -597,7 +580,6 @@ static struct hda_pcm_stream intel_hdmi_pcm_playback = {
597 .substreams = 1, 580 .substreams = 1,
598 .channels_min = 2, 581 .channels_min = 2,
599 .channels_max = 8, 582 .channels_max = 8,
600 .nid = CVT_NID, /* NID to query formats and rates and setup streams */
601 .ops = { 583 .ops = {
602 .open = intel_hdmi_playback_pcm_open, 584 .open = intel_hdmi_playback_pcm_open,
603 .close = intel_hdmi_playback_pcm_close, 585 .close = intel_hdmi_playback_pcm_close,
@@ -613,6 +595,9 @@ static int intel_hdmi_build_pcms(struct hda_codec *codec)
613 codec->num_pcms = 1; 595 codec->num_pcms = 1;
614 codec->pcm_info = info; 596 codec->pcm_info = info;
615 597
598 /* NID to query formats and rates and setup streams */
599 intel_hdmi_pcm_playback.nid = cvt_nid;
600
616 info->name = "INTEL HDMI"; 601 info->name = "INTEL HDMI";
617 info->pcm_type = HDA_PCM_TYPE_HDMI; 602 info->pcm_type = HDA_PCM_TYPE_HDMI;
618 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = intel_hdmi_pcm_playback; 603 info->stream[SNDRV_PCM_STREAM_PLAYBACK] = intel_hdmi_pcm_playback;
@@ -636,8 +621,9 @@ static int intel_hdmi_init(struct hda_codec *codec)
636{ 621{
637 hdmi_enable_output(codec); 622 hdmi_enable_output(codec);
638 623
639 snd_hda_sequence_write(codec, unsolicited_response_verb); 624 snd_hda_codec_write(codec, pin_nid, 0,
640 625 AC_VERB_SET_UNSOLICITED_ENABLE,
626 AC_USRSP_EN | INTEL_HDMI_EVENT_TAG);
641 return 0; 627 return 0;
642} 628}
643 629
@@ -657,7 +643,7 @@ static struct hda_codec_ops intel_hdmi_patch_ops = {
657 .unsol_event = intel_hdmi_unsol_event, 643 .unsol_event = intel_hdmi_unsol_event,
658}; 644};
659 645
660static int patch_intel_hdmi(struct hda_codec *codec) 646static int do_patch_intel_hdmi(struct hda_codec *codec)
661{ 647{
662 struct intel_hdmi_spec *spec; 648 struct intel_hdmi_spec *spec;
663 649
@@ -667,7 +653,7 @@ static int patch_intel_hdmi(struct hda_codec *codec)
667 653
668 spec->multiout.num_dacs = 0; /* no analog */ 654 spec->multiout.num_dacs = 0; /* no analog */
669 spec->multiout.max_channels = 8; 655 spec->multiout.max_channels = 8;
670 spec->multiout.dig_out_nid = CVT_NID; 656 spec->multiout.dig_out_nid = cvt_nid;
671 657
672 codec->spec = spec; 658 codec->spec = spec;
673 codec->patch_ops = intel_hdmi_patch_ops; 659 codec->patch_ops = intel_hdmi_patch_ops;
@@ -679,12 +665,27 @@ static int patch_intel_hdmi(struct hda_codec *codec)
679 return 0; 665 return 0;
680} 666}
681 667
668static int patch_intel_hdmi(struct hda_codec *codec)
669{
670 cvt_nid = 0x02;
671 pin_nid = 0x03;
672 return do_patch_intel_hdmi(codec);
673}
674
675static int patch_intel_hdmi_ibexpeak(struct hda_codec *codec)
676{
677 cvt_nid = 0x02;
678 pin_nid = 0x04;
679 return do_patch_intel_hdmi(codec);
680}
681
682static struct hda_codec_preset snd_hda_preset_intelhdmi[] = { 682static struct hda_codec_preset snd_hda_preset_intelhdmi[] = {
683 { .id = 0x808629fb, .name = "G45 DEVCL", .patch = patch_intel_hdmi }, 683 { .id = 0x808629fb, .name = "G45 DEVCL", .patch = patch_intel_hdmi },
684 { .id = 0x80862801, .name = "G45 DEVBLC", .patch = patch_intel_hdmi }, 684 { .id = 0x80862801, .name = "G45 DEVBLC", .patch = patch_intel_hdmi },
685 { .id = 0x80862802, .name = "G45 DEVCTG", .patch = patch_intel_hdmi }, 685 { .id = 0x80862802, .name = "G45 DEVCTG", .patch = patch_intel_hdmi },
686 { .id = 0x80862803, .name = "G45 DEVELK", .patch = patch_intel_hdmi }, 686 { .id = 0x80862803, .name = "G45 DEVELK", .patch = patch_intel_hdmi },
687 { .id = 0x80862804, .name = "G45 DEVIBX", .patch = patch_intel_hdmi }, 687 { .id = 0x80862804, .name = "G45 DEVIBX", .patch = patch_intel_hdmi },
688 { .id = 0x80860054, .name = "Q57 DEVIBX", .patch = patch_intel_hdmi_ibexpeak },
688 { .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_intel_hdmi }, 689 { .id = 0x10951392, .name = "SiI1392 HDMI", .patch = patch_intel_hdmi },
689 {} /* terminator */ 690 {} /* terminator */
690}; 691};
@@ -694,6 +695,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862801");
694MODULE_ALIAS("snd-hda-codec-id:80862802"); 695MODULE_ALIAS("snd-hda-codec-id:80862802");
695MODULE_ALIAS("snd-hda-codec-id:80862803"); 696MODULE_ALIAS("snd-hda-codec-id:80862803");
696MODULE_ALIAS("snd-hda-codec-id:80862804"); 697MODULE_ALIAS("snd-hda-codec-id:80862804");
698MODULE_ALIAS("snd-hda-codec-id:80860054");
697MODULE_ALIAS("snd-hda-codec-id:10951392"); 699MODULE_ALIAS("snd-hda-codec-id:10951392");
698 700
699MODULE_LICENSE("GPL"); 701MODULE_LICENSE("GPL");
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index f5792e2eea82..c8435c9a97f9 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -377,6 +377,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
377 */ 377 */
378static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { 378static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
379 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch }, 379 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
380 { .id = 0x10de0003, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
380 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch }, 381 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
381 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch }, 382 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch },
382 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 383 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
@@ -385,6 +386,7 @@ static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
385}; 386};
386 387
387MODULE_ALIAS("snd-hda-codec-id:10de0002"); 388MODULE_ALIAS("snd-hda-codec-id:10de0002");
389MODULE_ALIAS("snd-hda-codec-id:10de0003");
388MODULE_ALIAS("snd-hda-codec-id:10de0006"); 390MODULE_ALIAS("snd-hda-codec-id:10de0006");
389MODULE_ALIAS("snd-hda-codec-id:10de0007"); 391MODULE_ALIAS("snd-hda-codec-id:10de0007");
390MODULE_ALIAS("snd-hda-codec-id:10de0067"); 392MODULE_ALIAS("snd-hda-codec-id:10de0067");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 30eeb304351c..7ed47f66ddd1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -208,12 +208,6 @@ enum {
208 ALC885_MBP3, 208 ALC885_MBP3,
209 ALC885_MB5, 209 ALC885_MB5,
210 ALC885_IMAC24, 210 ALC885_IMAC24,
211 ALC882_AUTO,
212 ALC882_MODEL_LAST,
213};
214
215/* ALC883 models */
216enum {
217 ALC883_3ST_2ch_DIG, 211 ALC883_3ST_2ch_DIG,
218 ALC883_3ST_6ch_DIG, 212 ALC883_3ST_6ch_DIG,
219 ALC883_3ST_6ch, 213 ALC883_3ST_6ch,
@@ -226,6 +220,7 @@ enum {
226 ALC888_ACER_ASPIRE_4930G, 220 ALC888_ACER_ASPIRE_4930G,
227 ALC888_ACER_ASPIRE_6530G, 221 ALC888_ACER_ASPIRE_6530G,
228 ALC888_ACER_ASPIRE_8930G, 222 ALC888_ACER_ASPIRE_8930G,
223 ALC888_ACER_ASPIRE_7730G,
229 ALC883_MEDION, 224 ALC883_MEDION,
230 ALC883_MEDION_MD2, 225 ALC883_MEDION_MD2,
231 ALC883_LAPTOP_EAPD, 226 ALC883_LAPTOP_EAPD,
@@ -237,17 +232,20 @@ enum {
237 ALC888_3ST_HP, 232 ALC888_3ST_HP,
238 ALC888_6ST_DELL, 233 ALC888_6ST_DELL,
239 ALC883_MITAC, 234 ALC883_MITAC,
235 ALC883_CLEVO_M540R,
240 ALC883_CLEVO_M720, 236 ALC883_CLEVO_M720,
241 ALC883_FUJITSU_PI2515, 237 ALC883_FUJITSU_PI2515,
242 ALC888_FUJITSU_XA3530, 238 ALC888_FUJITSU_XA3530,
243 ALC883_3ST_6ch_INTEL, 239 ALC883_3ST_6ch_INTEL,
240 ALC889A_INTEL,
241 ALC889_INTEL,
244 ALC888_ASUS_M90V, 242 ALC888_ASUS_M90V,
245 ALC888_ASUS_EEE1601, 243 ALC888_ASUS_EEE1601,
246 ALC889A_MB31, 244 ALC889A_MB31,
247 ALC1200_ASUS_P5Q, 245 ALC1200_ASUS_P5Q,
248 ALC883_SONY_VAIO_TT, 246 ALC883_SONY_VAIO_TT,
249 ALC883_AUTO, 247 ALC882_AUTO,
250 ALC883_MODEL_LAST, 248 ALC882_MODEL_LAST,
251}; 249};
252 250
253/* for GPIO Poll */ 251/* for GPIO Poll */
@@ -262,6 +260,14 @@ enum {
262 ALC_INIT_GPIO3, 260 ALC_INIT_GPIO3,
263}; 261};
264 262
263struct alc_mic_route {
264 hda_nid_t pin;
265 unsigned char mux_idx;
266 unsigned char amix_idx;
267};
268
269#define MUX_IDX_UNDEF ((unsigned char)-1)
270
265struct alc_spec { 271struct alc_spec {
266 /* codec parameterization */ 272 /* codec parameterization */
267 struct snd_kcontrol_new *mixers[5]; /* mixer arrays */ 273 struct snd_kcontrol_new *mixers[5]; /* mixer arrays */
@@ -304,6 +310,8 @@ struct alc_spec {
304 unsigned int num_mux_defs; 310 unsigned int num_mux_defs;
305 const struct hda_input_mux *input_mux; 311 const struct hda_input_mux *input_mux;
306 unsigned int cur_mux[3]; 312 unsigned int cur_mux[3];
313 struct alc_mic_route ext_mic;
314 struct alc_mic_route int_mic;
307 315
308 /* channel model */ 316 /* channel model */
309 const struct hda_channel_mode *channel_mode; 317 const struct hda_channel_mode *channel_mode;
@@ -320,6 +328,8 @@ struct alc_spec {
320 struct snd_array kctls; 328 struct snd_array kctls;
321 struct hda_input_mux private_imux[3]; 329 struct hda_input_mux private_imux[3];
322 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS]; 330 hda_nid_t private_dac_nids[AUTO_CFG_MAX_OUTS];
331 hda_nid_t private_adc_nids[AUTO_CFG_MAX_OUTS];
332 hda_nid_t private_capsrc_nids[AUTO_CFG_MAX_OUTS];
323 333
324 /* hooks */ 334 /* hooks */
325 void (*init_hook)(struct hda_codec *codec); 335 void (*init_hook)(struct hda_codec *codec);
@@ -329,6 +339,7 @@ struct alc_spec {
329 unsigned int sense_updated: 1; 339 unsigned int sense_updated: 1;
330 unsigned int jack_present: 1; 340 unsigned int jack_present: 1;
331 unsigned int master_sw: 1; 341 unsigned int master_sw: 1;
342 unsigned int auto_mic:1;
332 343
333 /* other flags */ 344 /* other flags */
334 unsigned int no_analog :1; /* digital I/O only */ 345 unsigned int no_analog :1; /* digital I/O only */
@@ -370,6 +381,7 @@ struct alc_config_preset {
370 unsigned int num_mux_defs; 381 unsigned int num_mux_defs;
371 const struct hda_input_mux *input_mux; 382 const struct hda_input_mux *input_mux;
372 void (*unsol_event)(struct hda_codec *, unsigned int); 383 void (*unsol_event)(struct hda_codec *, unsigned int);
384 void (*setup)(struct hda_codec *);
373 void (*init_hook)(struct hda_codec *); 385 void (*init_hook)(struct hda_codec *);
374#ifdef CONFIG_SND_HDA_POWER_SAVE 386#ifdef CONFIG_SND_HDA_POWER_SAVE
375 struct hda_amp_list *loopbacks; 387 struct hda_amp_list *loopbacks;
@@ -417,7 +429,7 @@ static int alc_mux_enum_put(struct snd_kcontrol *kcontrol,
417 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx; 429 mux_idx = adc_idx >= spec->num_mux_defs ? 0 : adc_idx;
418 imux = &spec->input_mux[mux_idx]; 430 imux = &spec->input_mux[mux_idx];
419 431
420 type = (get_wcaps(codec, nid) & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 432 type = get_wcaps_type(get_wcaps(codec, nid));
421 if (type == AC_WID_AUD_MIX) { 433 if (type == AC_WID_AUD_MIX) {
422 /* Matrix-mixer style (e.g. ALC882) */ 434 /* Matrix-mixer style (e.g. ALC882) */
423 unsigned int *cur_val = &spec->cur_mux[adc_idx]; 435 unsigned int *cur_val = &spec->cur_mux[adc_idx];
@@ -842,9 +854,10 @@ static void print_realtek_coef(struct snd_info_buffer *buffer,
842/* 854/*
843 * set up from the preset table 855 * set up from the preset table
844 */ 856 */
845static void setup_preset(struct alc_spec *spec, 857static void setup_preset(struct hda_codec *codec,
846 const struct alc_config_preset *preset) 858 const struct alc_config_preset *preset)
847{ 859{
860 struct alc_spec *spec = codec->spec;
848 int i; 861 int i;
849 862
850 for (i = 0; i < ARRAY_SIZE(preset->mixers) && preset->mixers[i]; i++) 863 for (i = 0; i < ARRAY_SIZE(preset->mixers) && preset->mixers[i]; i++)
@@ -886,6 +899,9 @@ static void setup_preset(struct alc_spec *spec,
886#ifdef CONFIG_SND_HDA_POWER_SAVE 899#ifdef CONFIG_SND_HDA_POWER_SAVE
887 spec->loopback.amplist = preset->loopbacks; 900 spec->loopback.amplist = preset->loopbacks;
888#endif 901#endif
902
903 if (preset->setup)
904 preset->setup(codec);
889} 905}
890 906
891/* Enable GPIO mask and set output */ 907/* Enable GPIO mask and set output */
@@ -965,30 +981,64 @@ static void alc_automute_pin(struct hda_codec *codec)
965 } 981 }
966} 982}
967 983
968#if 0 /* it's broken in some cases -- temporarily disabled */ 984static int get_connection_index(struct hda_codec *codec, hda_nid_t mux,
985 hda_nid_t nid)
986{
987 hda_nid_t conn[HDA_MAX_NUM_INPUTS];
988 int i, nums;
989
990 nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
991 for (i = 0; i < nums; i++)
992 if (conn[i] == nid)
993 return i;
994 return -1;
995}
996
969static void alc_mic_automute(struct hda_codec *codec) 997static void alc_mic_automute(struct hda_codec *codec)
970{ 998{
971 struct alc_spec *spec = codec->spec; 999 struct alc_spec *spec = codec->spec;
972 unsigned int present; 1000 struct alc_mic_route *dead, *alive;
973 unsigned int mic_nid = spec->autocfg.input_pins[AUTO_PIN_MIC]; 1001 unsigned int present, type;
974 unsigned int fmic_nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC]; 1002 hda_nid_t cap_nid;
975 unsigned int mix_nid = spec->capsrc_nids[0]; 1003
976 unsigned int capsrc_idx_mic, capsrc_idx_fmic; 1004 if (!spec->auto_mic)
977 1005 return;
978 capsrc_idx_mic = mic_nid - 0x18; 1006 if (!spec->int_mic.pin || !spec->ext_mic.pin)
979 capsrc_idx_fmic = fmic_nid - 0x18; 1007 return;
980 present = snd_hda_codec_read(codec, mic_nid, 0, 1008 if (snd_BUG_ON(!spec->adc_nids))
981 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000; 1009 return;
982 snd_hda_codec_write(codec, mix_nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, 1010
983 0x7000 | (capsrc_idx_mic << 8) | (present ? 0 : 0x80)); 1011 cap_nid = spec->capsrc_nids ? spec->capsrc_nids[0] : spec->adc_nids[0];
984 snd_hda_codec_write(codec, mix_nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, 1012
985 0x7000 | (capsrc_idx_fmic << 8) | (present ? 0x80 : 0)); 1013 present = snd_hda_codec_read(codec, spec->ext_mic.pin, 0,
986 snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, capsrc_idx_fmic, 1014 AC_VERB_GET_PIN_SENSE, 0);
987 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); 1015 present &= AC_PINSENSE_PRESENCE;
1016 if (present) {
1017 alive = &spec->ext_mic;
1018 dead = &spec->int_mic;
1019 } else {
1020 alive = &spec->int_mic;
1021 dead = &spec->ext_mic;
1022 }
1023
1024 type = get_wcaps_type(get_wcaps(codec, cap_nid));
1025 if (type == AC_WID_AUD_MIX) {
1026 /* Matrix-mixer style (e.g. ALC882) */
1027 snd_hda_codec_amp_stereo(codec, cap_nid, HDA_INPUT,
1028 alive->mux_idx,
1029 HDA_AMP_MUTE, 0);
1030 snd_hda_codec_amp_stereo(codec, cap_nid, HDA_INPUT,
1031 dead->mux_idx,
1032 HDA_AMP_MUTE, HDA_AMP_MUTE);
1033 } else {
1034 /* MUX style (e.g. ALC880) */
1035 snd_hda_codec_write_cache(codec, cap_nid, 0,
1036 AC_VERB_SET_CONNECT_SEL,
1037 alive->mux_idx);
1038 }
1039
1040 /* FIXME: analog mixer */
988} 1041}
989#else
990#define alc_mic_automute(codec) do {} while(0) /* NOP */
991#endif /* disabled */
992 1042
993/* unsolicited event for HP jack sensing */ 1043/* unsolicited event for HP jack sensing */
994static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res) 1044static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
@@ -1031,6 +1081,16 @@ static void alc888_coef_init(struct hda_codec *codec)
1031 AC_VERB_SET_PROC_COEF, 0x3030); 1081 AC_VERB_SET_PROC_COEF, 0x3030);
1032} 1082}
1033 1083
1084static void alc889_coef_init(struct hda_codec *codec)
1085{
1086 unsigned int tmp;
1087
1088 snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
1089 tmp = snd_hda_codec_read(codec, 0x20, 0, AC_VERB_GET_PROC_COEF, 0);
1090 snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 7);
1091 snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, tmp|0x2010);
1092}
1093
1034static void alc_auto_init_amp(struct hda_codec *codec, int type) 1094static void alc_auto_init_amp(struct hda_codec *codec, int type)
1035{ 1095{
1036 unsigned int tmp; 1096 unsigned int tmp;
@@ -1088,15 +1148,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
1088 case 0x10ec0885: 1148 case 0x10ec0885:
1089 case 0x10ec0887: 1149 case 0x10ec0887:
1090 case 0x10ec0889: 1150 case 0x10ec0889:
1091 snd_hda_codec_write(codec, 0x20, 0, 1151 alc889_coef_init(codec);
1092 AC_VERB_SET_COEF_INDEX, 7);
1093 tmp = snd_hda_codec_read(codec, 0x20, 0,
1094 AC_VERB_GET_PROC_COEF, 0);
1095 snd_hda_codec_write(codec, 0x20, 0,
1096 AC_VERB_SET_COEF_INDEX, 7);
1097 snd_hda_codec_write(codec, 0x20, 0,
1098 AC_VERB_SET_PROC_COEF,
1099 tmp | 0x2010);
1100 break; 1152 break;
1101 case 0x10ec0888: 1153 case 0x10ec0888:
1102 alc888_coef_init(codec); 1154 alc888_coef_init(codec);
@@ -1142,6 +1194,55 @@ static void alc_init_auto_hp(struct hda_codec *codec)
1142 spec->unsol_event = alc_sku_unsol_event; 1194 spec->unsol_event = alc_sku_unsol_event;
1143} 1195}
1144 1196
1197static void alc_init_auto_mic(struct hda_codec *codec)
1198{
1199 struct alc_spec *spec = codec->spec;
1200 struct auto_pin_cfg *cfg = &spec->autocfg;
1201 hda_nid_t fixed, ext;
1202 int i;
1203
1204 /* there must be only two mic inputs exclusively */
1205 for (i = AUTO_PIN_LINE; i < AUTO_PIN_LAST; i++)
1206 if (cfg->input_pins[i])
1207 return;
1208
1209 fixed = ext = 0;
1210 for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++) {
1211 hda_nid_t nid = cfg->input_pins[i];
1212 unsigned int defcfg;
1213 if (!nid)
1214 return;
1215 defcfg = snd_hda_codec_get_pincfg(codec, nid);
1216 switch (get_defcfg_connect(defcfg)) {
1217 case AC_JACK_PORT_FIXED:
1218 if (fixed)
1219 return; /* already occupied */
1220 fixed = nid;
1221 break;
1222 case AC_JACK_PORT_COMPLEX:
1223 if (ext)
1224 return; /* already occupied */
1225 ext = nid;
1226 break;
1227 default:
1228 return; /* invalid entry */
1229 }
1230 }
1231 if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
1232 return; /* no unsol support */
1233 snd_printdd("realtek: Enable auto-mic switch on NID 0x%x/0x%x\n",
1234 ext, fixed);
1235 spec->ext_mic.pin = ext;
1236 spec->int_mic.pin = fixed;
1237 spec->ext_mic.mux_idx = MUX_IDX_UNDEF; /* set later */
1238 spec->int_mic.mux_idx = MUX_IDX_UNDEF; /* set later */
1239 spec->auto_mic = 1;
1240 snd_hda_codec_write_cache(codec, spec->ext_mic.pin, 0,
1241 AC_VERB_SET_UNSOLICITED_ENABLE,
1242 AC_USRSP_EN | ALC880_MIC_EVENT);
1243 spec->unsol_event = alc_sku_unsol_event;
1244}
1245
1145/* check subsystem ID and set up device-specific initialization; 1246/* check subsystem ID and set up device-specific initialization;
1146 * return 1 if initialized, 0 if invalid SSID 1247 * return 1 if initialized, 0 if invalid SSID
1147 */ 1248 */
@@ -1243,6 +1344,7 @@ do_sku:
1243 } 1344 }
1244 1345
1245 alc_init_auto_hp(codec); 1346 alc_init_auto_hp(codec);
1347 alc_init_auto_mic(codec);
1246 return 1; 1348 return 1;
1247} 1349}
1248 1350
@@ -1255,6 +1357,7 @@ static void alc_ssid_check(struct hda_codec *codec,
1255 "Enable default setup for auto mode as fallback\n"); 1357 "Enable default setup for auto mode as fallback\n");
1256 spec->init_amp = ALC_INIT_DEFAULT; 1358 spec->init_amp = ALC_INIT_DEFAULT;
1257 alc_init_auto_hp(codec); 1359 alc_init_auto_hp(codec);
1360 alc_init_auto_mic(codec);
1258 } 1361 }
1259} 1362}
1260 1363
@@ -1436,7 +1539,25 @@ static void alc_automute_amp_unsol_event(struct hda_codec *codec,
1436 alc_automute_amp(codec); 1539 alc_automute_amp(codec);
1437} 1540}
1438 1541
1439static void alc888_fujitsu_xa3530_init_hook(struct hda_codec *codec) 1542static void alc889_automute_setup(struct hda_codec *codec)
1543{
1544 struct alc_spec *spec = codec->spec;
1545
1546 spec->autocfg.hp_pins[0] = 0x15;
1547 spec->autocfg.speaker_pins[0] = 0x14;
1548 spec->autocfg.speaker_pins[1] = 0x16;
1549 spec->autocfg.speaker_pins[2] = 0x17;
1550 spec->autocfg.speaker_pins[3] = 0x19;
1551 spec->autocfg.speaker_pins[4] = 0x1a;
1552}
1553
1554static void alc889_intel_init_hook(struct hda_codec *codec)
1555{
1556 alc889_coef_init(codec);
1557 alc_automute_amp(codec);
1558}
1559
1560static void alc888_fujitsu_xa3530_setup(struct hda_codec *codec)
1440{ 1561{
1441 struct alc_spec *spec = codec->spec; 1562 struct alc_spec *spec = codec->spec;
1442 1563
@@ -1444,7 +1565,6 @@ static void alc888_fujitsu_xa3530_init_hook(struct hda_codec *codec)
1444 spec->autocfg.hp_pins[1] = 0x1b; /* hp */ 1565 spec->autocfg.hp_pins[1] = 0x1b; /* hp */
1445 spec->autocfg.speaker_pins[0] = 0x14; /* speaker */ 1566 spec->autocfg.speaker_pins[0] = 0x14; /* speaker */
1446 spec->autocfg.speaker_pins[1] = 0x15; /* bass */ 1567 spec->autocfg.speaker_pins[1] = 0x15; /* bass */
1447 alc_automute_amp(codec);
1448} 1568}
1449 1569
1450/* 1570/*
@@ -1643,16 +1763,15 @@ static struct snd_kcontrol_new alc888_base_mixer[] = {
1643 { } /* end */ 1763 { } /* end */
1644}; 1764};
1645 1765
1646static void alc888_acer_aspire_4930g_init_hook(struct hda_codec *codec) 1766static void alc888_acer_aspire_4930g_setup(struct hda_codec *codec)
1647{ 1767{
1648 struct alc_spec *spec = codec->spec; 1768 struct alc_spec *spec = codec->spec;
1649 1769
1650 spec->autocfg.hp_pins[0] = 0x15; 1770 spec->autocfg.hp_pins[0] = 0x15;
1651 spec->autocfg.speaker_pins[0] = 0x14; 1771 spec->autocfg.speaker_pins[0] = 0x14;
1652 alc_automute_amp(codec);
1653} 1772}
1654 1773
1655static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec) 1774static void alc888_acer_aspire_6530g_setup(struct hda_codec *codec)
1656{ 1775{
1657 struct alc_spec *spec = codec->spec; 1776 struct alc_spec *spec = codec->spec;
1658 1777
@@ -1660,10 +1779,9 @@ static void alc888_acer_aspire_6530g_init_hook(struct hda_codec *codec)
1660 spec->autocfg.speaker_pins[0] = 0x14; 1779 spec->autocfg.speaker_pins[0] = 0x14;
1661 spec->autocfg.speaker_pins[1] = 0x16; 1780 spec->autocfg.speaker_pins[1] = 0x16;
1662 spec->autocfg.speaker_pins[2] = 0x17; 1781 spec->autocfg.speaker_pins[2] = 0x17;
1663 alc_automute_amp(codec);
1664} 1782}
1665 1783
1666static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec) 1784static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec)
1667{ 1785{
1668 struct alc_spec *spec = codec->spec; 1786 struct alc_spec *spec = codec->spec;
1669 1787
@@ -1671,7 +1789,6 @@ static void alc889_acer_aspire_8930g_init_hook(struct hda_codec *codec)
1671 spec->autocfg.speaker_pins[0] = 0x14; 1789 spec->autocfg.speaker_pins[0] = 0x14;
1672 spec->autocfg.speaker_pins[1] = 0x16; 1790 spec->autocfg.speaker_pins[1] = 0x16;
1673 spec->autocfg.speaker_pins[2] = 0x1b; 1791 spec->autocfg.speaker_pins[2] = 0x1b;
1674 alc_automute_amp(codec);
1675} 1792}
1676 1793
1677/* 1794/*
@@ -2651,13 +2768,17 @@ static void alc880_uniwill_mic_automute(struct hda_codec *codec)
2651 snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits); 2768 snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
2652} 2769}
2653 2770
2654static void alc880_uniwill_init_hook(struct hda_codec *codec) 2771static void alc880_uniwill_setup(struct hda_codec *codec)
2655{ 2772{
2656 struct alc_spec *spec = codec->spec; 2773 struct alc_spec *spec = codec->spec;
2657 2774
2658 spec->autocfg.hp_pins[0] = 0x14; 2775 spec->autocfg.hp_pins[0] = 0x14;
2659 spec->autocfg.speaker_pins[0] = 0x15; 2776 spec->autocfg.speaker_pins[0] = 0x15;
2660 spec->autocfg.speaker_pins[0] = 0x16; 2777 spec->autocfg.speaker_pins[0] = 0x16;
2778}
2779
2780static void alc880_uniwill_init_hook(struct hda_codec *codec)
2781{
2661 alc_automute_amp(codec); 2782 alc_automute_amp(codec);
2662 alc880_uniwill_mic_automute(codec); 2783 alc880_uniwill_mic_automute(codec);
2663} 2784}
@@ -2678,13 +2799,12 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec,
2678 } 2799 }
2679} 2800}
2680 2801
2681static void alc880_uniwill_p53_init_hook(struct hda_codec *codec) 2802static void alc880_uniwill_p53_setup(struct hda_codec *codec)
2682{ 2803{
2683 struct alc_spec *spec = codec->spec; 2804 struct alc_spec *spec = codec->spec;
2684 2805
2685 spec->autocfg.hp_pins[0] = 0x14; 2806 spec->autocfg.hp_pins[0] = 0x14;
2686 spec->autocfg.speaker_pins[0] = 0x15; 2807 spec->autocfg.speaker_pins[0] = 0x15;
2687 alc_automute_amp(codec);
2688} 2808}
2689 2809
2690static void alc880_uniwill_p53_dcvol_automute(struct hda_codec *codec) 2810static void alc880_uniwill_p53_dcvol_automute(struct hda_codec *codec)
@@ -2947,13 +3067,12 @@ static struct hda_verb alc880_lg_init_verbs[] = {
2947}; 3067};
2948 3068
2949/* toggle speaker-output according to the hp-jack state */ 3069/* toggle speaker-output according to the hp-jack state */
2950static void alc880_lg_init_hook(struct hda_codec *codec) 3070static void alc880_lg_setup(struct hda_codec *codec)
2951{ 3071{
2952 struct alc_spec *spec = codec->spec; 3072 struct alc_spec *spec = codec->spec;
2953 3073
2954 spec->autocfg.hp_pins[0] = 0x1b; 3074 spec->autocfg.hp_pins[0] = 0x1b;
2955 spec->autocfg.speaker_pins[0] = 0x17; 3075 spec->autocfg.speaker_pins[0] = 0x17;
2956 alc_automute_amp(codec);
2957} 3076}
2958 3077
2959/* 3078/*
@@ -3032,13 +3151,12 @@ static struct hda_verb alc880_lg_lw_init_verbs[] = {
3032}; 3151};
3033 3152
3034/* toggle speaker-output according to the hp-jack state */ 3153/* toggle speaker-output according to the hp-jack state */
3035static void alc880_lg_lw_init_hook(struct hda_codec *codec) 3154static void alc880_lg_lw_setup(struct hda_codec *codec)
3036{ 3155{
3037 struct alc_spec *spec = codec->spec; 3156 struct alc_spec *spec = codec->spec;
3038 3157
3039 spec->autocfg.hp_pins[0] = 0x1b; 3158 spec->autocfg.hp_pins[0] = 0x1b;
3040 spec->autocfg.speaker_pins[0] = 0x14; 3159 spec->autocfg.speaker_pins[0] = 0x14;
3041 alc_automute_amp(codec);
3042} 3160}
3043 3161
3044static struct snd_kcontrol_new alc880_medion_rim_mixer[] = { 3162static struct snd_kcontrol_new alc880_medion_rim_mixer[] = {
@@ -3104,13 +3222,12 @@ static void alc880_medion_rim_unsol_event(struct hda_codec *codec,
3104 alc880_medion_rim_automute(codec); 3222 alc880_medion_rim_automute(codec);
3105} 3223}
3106 3224
3107static void alc880_medion_rim_init_hook(struct hda_codec *codec) 3225static void alc880_medion_rim_setup(struct hda_codec *codec)
3108{ 3226{
3109 struct alc_spec *spec = codec->spec; 3227 struct alc_spec *spec = codec->spec;
3110 3228
3111 spec->autocfg.hp_pins[0] = 0x14; 3229 spec->autocfg.hp_pins[0] = 0x14;
3112 spec->autocfg.speaker_pins[0] = 0x1b; 3230 spec->autocfg.speaker_pins[0] = 0x1b;
3113 alc880_medion_rim_automute(codec);
3114} 3231}
3115 3232
3116#ifdef CONFIG_SND_HDA_POWER_SAVE 3233#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -3977,7 +4094,8 @@ static struct alc_config_preset alc880_presets[] = {
3977 .channel_mode = alc880_2_jack_modes, 4094 .channel_mode = alc880_2_jack_modes,
3978 .input_mux = &alc880_f1734_capture_source, 4095 .input_mux = &alc880_f1734_capture_source,
3979 .unsol_event = alc880_uniwill_p53_unsol_event, 4096 .unsol_event = alc880_uniwill_p53_unsol_event,
3980 .init_hook = alc880_uniwill_p53_init_hook, 4097 .setup = alc880_uniwill_p53_setup,
4098 .init_hook = alc_automute_amp,
3981 }, 4099 },
3982 [ALC880_ASUS] = { 4100 [ALC880_ASUS] = {
3983 .mixers = { alc880_asus_mixer }, 4101 .mixers = { alc880_asus_mixer },
@@ -4054,6 +4172,7 @@ static struct alc_config_preset alc880_presets[] = {
4054 .need_dac_fix = 1, 4172 .need_dac_fix = 1,
4055 .input_mux = &alc880_capture_source, 4173 .input_mux = &alc880_capture_source,
4056 .unsol_event = alc880_uniwill_unsol_event, 4174 .unsol_event = alc880_uniwill_unsol_event,
4175 .setup = alc880_uniwill_setup,
4057 .init_hook = alc880_uniwill_init_hook, 4176 .init_hook = alc880_uniwill_init_hook,
4058 }, 4177 },
4059 [ALC880_UNIWILL_P53] = { 4178 [ALC880_UNIWILL_P53] = {
@@ -4066,7 +4185,8 @@ static struct alc_config_preset alc880_presets[] = {
4066 .channel_mode = alc880_threestack_modes, 4185 .channel_mode = alc880_threestack_modes,
4067 .input_mux = &alc880_capture_source, 4186 .input_mux = &alc880_capture_source,
4068 .unsol_event = alc880_uniwill_p53_unsol_event, 4187 .unsol_event = alc880_uniwill_p53_unsol_event,
4069 .init_hook = alc880_uniwill_p53_init_hook, 4188 .setup = alc880_uniwill_p53_setup,
4189 .init_hook = alc_automute_amp,
4070 }, 4190 },
4071 [ALC880_FUJITSU] = { 4191 [ALC880_FUJITSU] = {
4072 .mixers = { alc880_fujitsu_mixer }, 4192 .mixers = { alc880_fujitsu_mixer },
@@ -4080,7 +4200,8 @@ static struct alc_config_preset alc880_presets[] = {
4080 .channel_mode = alc880_2_jack_modes, 4200 .channel_mode = alc880_2_jack_modes,
4081 .input_mux = &alc880_capture_source, 4201 .input_mux = &alc880_capture_source,
4082 .unsol_event = alc880_uniwill_p53_unsol_event, 4202 .unsol_event = alc880_uniwill_p53_unsol_event,
4083 .init_hook = alc880_uniwill_p53_init_hook, 4203 .setup = alc880_uniwill_p53_setup,
4204 .init_hook = alc_automute_amp,
4084 }, 4205 },
4085 [ALC880_CLEVO] = { 4206 [ALC880_CLEVO] = {
4086 .mixers = { alc880_three_stack_mixer }, 4207 .mixers = { alc880_three_stack_mixer },
@@ -4106,7 +4227,8 @@ static struct alc_config_preset alc880_presets[] = {
4106 .need_dac_fix = 1, 4227 .need_dac_fix = 1,
4107 .input_mux = &alc880_lg_capture_source, 4228 .input_mux = &alc880_lg_capture_source,
4108 .unsol_event = alc_automute_amp_unsol_event, 4229 .unsol_event = alc_automute_amp_unsol_event,
4109 .init_hook = alc880_lg_init_hook, 4230 .setup = alc880_lg_setup,
4231 .init_hook = alc_automute_amp,
4110#ifdef CONFIG_SND_HDA_POWER_SAVE 4232#ifdef CONFIG_SND_HDA_POWER_SAVE
4111 .loopbacks = alc880_lg_loopbacks, 4233 .loopbacks = alc880_lg_loopbacks,
4112#endif 4234#endif
@@ -4122,7 +4244,8 @@ static struct alc_config_preset alc880_presets[] = {
4122 .channel_mode = alc880_lg_lw_modes, 4244 .channel_mode = alc880_lg_lw_modes,
4123 .input_mux = &alc880_lg_lw_capture_source, 4245 .input_mux = &alc880_lg_lw_capture_source,
4124 .unsol_event = alc_automute_amp_unsol_event, 4246 .unsol_event = alc_automute_amp_unsol_event,
4125 .init_hook = alc880_lg_lw_init_hook, 4247 .setup = alc880_lg_lw_setup,
4248 .init_hook = alc_automute_amp,
4126 }, 4249 },
4127 [ALC880_MEDION_RIM] = { 4250 [ALC880_MEDION_RIM] = {
4128 .mixers = { alc880_medion_rim_mixer }, 4251 .mixers = { alc880_medion_rim_mixer },
@@ -4136,7 +4259,8 @@ static struct alc_config_preset alc880_presets[] = {
4136 .channel_mode = alc880_2_jack_modes, 4259 .channel_mode = alc880_2_jack_modes,
4137 .input_mux = &alc880_medion_rim_capture_source, 4260 .input_mux = &alc880_medion_rim_capture_source,
4138 .unsol_event = alc880_medion_rim_unsol_event, 4261 .unsol_event = alc880_medion_rim_unsol_event,
4139 .init_hook = alc880_medion_rim_init_hook, 4262 .setup = alc880_medion_rim_setup,
4263 .init_hook = alc880_medion_rim_automute,
4140 }, 4264 },
4141#ifdef CONFIG_SND_DEBUG 4265#ifdef CONFIG_SND_DEBUG
4142 [ALC880_TEST] = { 4266 [ALC880_TEST] = {
@@ -4189,8 +4313,6 @@ static int add_control(struct alc_spec *spec, int type, const char *name,
4189#define alc880_fixed_pin_idx(nid) ((nid) - 0x14) 4313#define alc880_fixed_pin_idx(nid) ((nid) - 0x14)
4190#define alc880_is_multi_pin(nid) ((nid) >= 0x18) 4314#define alc880_is_multi_pin(nid) ((nid) >= 0x18)
4191#define alc880_multi_pin_idx(nid) ((nid) - 0x18) 4315#define alc880_multi_pin_idx(nid) ((nid) - 0x18)
4192#define alc880_is_input_pin(nid) ((nid) >= 0x18)
4193#define alc880_input_pin_idx(nid) ((nid) - 0x18)
4194#define alc880_idx_to_dac(nid) ((nid) + 0x02) 4316#define alc880_idx_to_dac(nid) ((nid) + 0x02)
4195#define alc880_dac_to_idx(nid) ((nid) - 0x02) 4317#define alc880_dac_to_idx(nid) ((nid) - 0x02)
4196#define alc880_idx_to_mixer(nid) ((nid) + 0x0c) 4318#define alc880_idx_to_mixer(nid) ((nid) + 0x0c)
@@ -4278,13 +4400,19 @@ static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec,
4278 if (err < 0) 4400 if (err < 0)
4279 return err; 4401 return err;
4280 } else { 4402 } else {
4281 sprintf(name, "%s Playback Volume", chname[i]); 4403 const char *pfx;
4404 if (cfg->line_outs == 1 &&
4405 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
4406 pfx = "Speaker";
4407 else
4408 pfx = chname[i];
4409 sprintf(name, "%s Playback Volume", pfx);
4282 err = add_control(spec, ALC_CTL_WIDGET_VOL, name, 4410 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
4283 HDA_COMPOSE_AMP_VAL(nid, 3, 0, 4411 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
4284 HDA_OUTPUT)); 4412 HDA_OUTPUT));
4285 if (err < 0) 4413 if (err < 0)
4286 return err; 4414 return err;
4287 sprintf(name, "%s Playback Switch", chname[i]); 4415 sprintf(name, "%s Playback Switch", pfx);
4288 err = add_control(spec, ALC_CTL_BIND_MUTE, name, 4416 err = add_control(spec, ALC_CTL_BIND_MUTE, name,
4289 HDA_COMPOSE_AMP_VAL(nid, 3, 2, 4417 HDA_COMPOSE_AMP_VAL(nid, 3, 2,
4290 HDA_INPUT)); 4418 HDA_INPUT));
@@ -4358,31 +4486,61 @@ static int new_analog_input(struct alc_spec *spec, hda_nid_t pin,
4358 return 0; 4486 return 0;
4359} 4487}
4360 4488
4489static int alc_is_input_pin(struct hda_codec *codec, hda_nid_t nid)
4490{
4491 unsigned int pincap = snd_hda_query_pin_caps(codec, nid);
4492 return (pincap & AC_PINCAP_IN) != 0;
4493}
4494
4361/* create playback/capture controls for input pins */ 4495/* create playback/capture controls for input pins */
4362static int alc880_auto_create_analog_input_ctls(struct alc_spec *spec, 4496static int alc_auto_create_input_ctls(struct hda_codec *codec,
4363 const struct auto_pin_cfg *cfg) 4497 const struct auto_pin_cfg *cfg,
4498 hda_nid_t mixer,
4499 hda_nid_t cap1, hda_nid_t cap2)
4364{ 4500{
4501 struct alc_spec *spec = codec->spec;
4365 struct hda_input_mux *imux = &spec->private_imux[0]; 4502 struct hda_input_mux *imux = &spec->private_imux[0];
4366 int i, err, idx; 4503 int i, err, idx;
4367 4504
4368 for (i = 0; i < AUTO_PIN_LAST; i++) { 4505 for (i = 0; i < AUTO_PIN_LAST; i++) {
4369 if (alc880_is_input_pin(cfg->input_pins[i])) { 4506 hda_nid_t pin;
4370 idx = alc880_input_pin_idx(cfg->input_pins[i]); 4507
4371 err = new_analog_input(spec, cfg->input_pins[i], 4508 pin = cfg->input_pins[i];
4372 auto_pin_cfg_labels[i], 4509 if (!alc_is_input_pin(codec, pin))
4373 idx, 0x0b); 4510 continue;
4374 if (err < 0) 4511
4375 return err; 4512 if (mixer) {
4513 idx = get_connection_index(codec, mixer, pin);
4514 if (idx >= 0) {
4515 err = new_analog_input(spec, pin,
4516 auto_pin_cfg_labels[i],
4517 idx, mixer);
4518 if (err < 0)
4519 return err;
4520 }
4521 }
4522
4523 if (!cap1)
4524 continue;
4525 idx = get_connection_index(codec, cap1, pin);
4526 if (idx < 0 && cap2)
4527 idx = get_connection_index(codec, cap2, pin);
4528 if (idx >= 0) {
4376 imux->items[imux->num_items].label = 4529 imux->items[imux->num_items].label =
4377 auto_pin_cfg_labels[i]; 4530 auto_pin_cfg_labels[i];
4378 imux->items[imux->num_items].index = 4531 imux->items[imux->num_items].index = idx;
4379 alc880_input_pin_idx(cfg->input_pins[i]);
4380 imux->num_items++; 4532 imux->num_items++;
4381 } 4533 }
4382 } 4534 }
4383 return 0; 4535 return 0;
4384} 4536}
4385 4537
4538static int alc880_auto_create_input_ctls(struct hda_codec *codec,
4539 const struct auto_pin_cfg *cfg)
4540{
4541 return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x08, 0x09);
4542}
4543
4386static void alc_set_pin_output(struct hda_codec *codec, hda_nid_t nid, 4544static void alc_set_pin_output(struct hda_codec *codec, hda_nid_t nid,
4387 unsigned int pin_type) 4545 unsigned int pin_type)
4388{ 4546{
@@ -4448,7 +4606,7 @@ static void alc880_auto_init_analog_input(struct hda_codec *codec)
4448 4606
4449 for (i = 0; i < AUTO_PIN_LAST; i++) { 4607 for (i = 0; i < AUTO_PIN_LAST; i++) {
4450 hda_nid_t nid = spec->autocfg.input_pins[i]; 4608 hda_nid_t nid = spec->autocfg.input_pins[i];
4451 if (alc880_is_input_pin(nid)) { 4609 if (alc_is_input_pin(codec, nid)) {
4452 alc_set_input_pin(codec, nid, i); 4610 alc_set_input_pin(codec, nid, i);
4453 if (nid != ALC880_PIN_CD_NID && 4611 if (nid != ALC880_PIN_CD_NID &&
4454 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)) 4612 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -4491,7 +4649,7 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
4491 "Headphone"); 4649 "Headphone");
4492 if (err < 0) 4650 if (err < 0)
4493 return err; 4651 return err;
4494 err = alc880_auto_create_analog_input_ctls(spec, &spec->autocfg); 4652 err = alc880_auto_create_input_ctls(codec, &spec->autocfg);
4495 if (err < 0) 4653 if (err < 0)
4496 return err; 4654 return err;
4497 4655
@@ -4505,12 +4663,6 @@ static int alc880_parse_auto_config(struct hda_codec *codec)
4505 &dig_nid, 1); 4663 &dig_nid, 1);
4506 if (err < 0) 4664 if (err < 0)
4507 continue; 4665 continue;
4508 if (dig_nid > 0x7f) {
4509 printk(KERN_ERR "alc880_auto: invalid dig_nid "
4510 "connection 0x%x for NID 0x%x\n", dig_nid,
4511 spec->autocfg.dig_out_pins[i]);
4512 continue;
4513 }
4514 if (!i) 4666 if (!i)
4515 spec->multiout.dig_out_nid = dig_nid; 4667 spec->multiout.dig_out_nid = dig_nid;
4516 else { 4668 else {
@@ -4547,8 +4699,42 @@ static void alc880_auto_init(struct hda_codec *codec)
4547 alc_inithook(codec); 4699 alc_inithook(codec);
4548} 4700}
4549 4701
4550static void set_capture_mixer(struct alc_spec *spec) 4702/* check the ADC/MUX contains all input pins; some ADC/MUX contains only
4703 * one of two digital mic pins, e.g. on ALC272
4704 */
4705static void fixup_automic_adc(struct hda_codec *codec)
4551{ 4706{
4707 struct alc_spec *spec = codec->spec;
4708 int i;
4709
4710 for (i = 0; i < spec->num_adc_nids; i++) {
4711 hda_nid_t cap = spec->capsrc_nids ?
4712 spec->capsrc_nids[i] : spec->adc_nids[i];
4713 int iidx, eidx;
4714
4715 iidx = get_connection_index(codec, cap, spec->int_mic.pin);
4716 if (iidx < 0)
4717 continue;
4718 eidx = get_connection_index(codec, cap, spec->ext_mic.pin);
4719 if (eidx < 0)
4720 continue;
4721 spec->int_mic.mux_idx = iidx;
4722 spec->ext_mic.mux_idx = eidx;
4723 if (spec->capsrc_nids)
4724 spec->capsrc_nids += i;
4725 spec->adc_nids += i;
4726 spec->num_adc_nids = 1;
4727 return;
4728 }
4729 snd_printd(KERN_INFO "hda_codec: %s: "
4730 "No ADC/MUX containing both 0x%x and 0x%x pins\n",
4731 codec->chip_name, spec->int_mic.pin, spec->ext_mic.pin);
4732 spec->auto_mic = 0; /* disable auto-mic to be sure */
4733}
4734
4735static void set_capture_mixer(struct hda_codec *codec)
4736{
4737 struct alc_spec *spec = codec->spec;
4552 static struct snd_kcontrol_new *caps[2][3] = { 4738 static struct snd_kcontrol_new *caps[2][3] = {
4553 { alc_capture_mixer_nosrc1, 4739 { alc_capture_mixer_nosrc1,
4554 alc_capture_mixer_nosrc2, 4740 alc_capture_mixer_nosrc2,
@@ -4559,7 +4745,10 @@ static void set_capture_mixer(struct alc_spec *spec)
4559 }; 4745 };
4560 if (spec->num_adc_nids > 0 && spec->num_adc_nids <= 3) { 4746 if (spec->num_adc_nids > 0 && spec->num_adc_nids <= 3) {
4561 int mux; 4747 int mux;
4562 if (spec->input_mux && spec->input_mux->num_items > 1) 4748 if (spec->auto_mic) {
4749 mux = 0;
4750 fixup_automic_adc(codec);
4751 } else if (spec->input_mux && spec->input_mux->num_items > 1)
4563 mux = 1; 4752 mux = 1;
4564 else 4753 else
4565 mux = 0; 4754 mux = 0;
@@ -4590,8 +4779,8 @@ static int patch_alc880(struct hda_codec *codec)
4590 alc880_models, 4779 alc880_models,
4591 alc880_cfg_tbl); 4780 alc880_cfg_tbl);
4592 if (board_config < 0) { 4781 if (board_config < 0) {
4593 printk(KERN_INFO "hda_codec: Unknown model for %s, " 4782 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
4594 "trying auto-probe from BIOS...\n", codec->chip_name); 4783 codec->chip_name);
4595 board_config = ALC880_AUTO; 4784 board_config = ALC880_AUTO;
4596 } 4785 }
4597 4786
@@ -4616,7 +4805,7 @@ static int patch_alc880(struct hda_codec *codec)
4616 } 4805 }
4617 4806
4618 if (board_config != ALC880_AUTO) 4807 if (board_config != ALC880_AUTO)
4619 setup_preset(spec, &alc880_presets[board_config]); 4808 setup_preset(codec, &alc880_presets[board_config]);
4620 4809
4621 spec->stream_analog_playback = &alc880_pcm_analog_playback; 4810 spec->stream_analog_playback = &alc880_pcm_analog_playback;
4622 spec->stream_analog_capture = &alc880_pcm_analog_capture; 4811 spec->stream_analog_capture = &alc880_pcm_analog_capture;
@@ -4629,7 +4818,7 @@ static int patch_alc880(struct hda_codec *codec)
4629 /* check whether NID 0x07 is valid */ 4818 /* check whether NID 0x07 is valid */
4630 unsigned int wcap = get_wcaps(codec, alc880_adc_nids[0]); 4819 unsigned int wcap = get_wcaps(codec, alc880_adc_nids[0]);
4631 /* get type */ 4820 /* get type */
4632 wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 4821 wcap = get_wcaps_type(wcap);
4633 if (wcap != AC_WID_AUD_IN) { 4822 if (wcap != AC_WID_AUD_IN) {
4634 spec->adc_nids = alc880_adc_nids_alt; 4823 spec->adc_nids = alc880_adc_nids_alt;
4635 spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids_alt); 4824 spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids_alt);
@@ -4638,7 +4827,7 @@ static int patch_alc880(struct hda_codec *codec)
4638 spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids); 4827 spec->num_adc_nids = ARRAY_SIZE(alc880_adc_nids);
4639 } 4828 }
4640 } 4829 }
4641 set_capture_mixer(spec); 4830 set_capture_mixer(codec);
4642 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 4831 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
4643 4832
4644 spec->vmaster_nid = 0x0c; 4833 spec->vmaster_nid = 0x0c;
@@ -5830,7 +6019,14 @@ static int alc260_auto_create_multi_out_ctls(struct alc_spec *spec,
5830 6019
5831 nid = cfg->line_out_pins[0]; 6020 nid = cfg->line_out_pins[0];
5832 if (nid) { 6021 if (nid) {
5833 err = alc260_add_playback_controls(spec, nid, "Front", &vols); 6022 const char *pfx;
6023 if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
6024 pfx = "Master";
6025 else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
6026 pfx = "Speaker";
6027 else
6028 pfx = "Front";
6029 err = alc260_add_playback_controls(spec, nid, pfx, &vols);
5834 if (err < 0) 6030 if (err < 0)
5835 return err; 6031 return err;
5836 } 6032 }
@@ -5853,39 +6049,10 @@ static int alc260_auto_create_multi_out_ctls(struct alc_spec *spec,
5853} 6049}
5854 6050
5855/* create playback/capture controls for input pins */ 6051/* create playback/capture controls for input pins */
5856static int alc260_auto_create_analog_input_ctls(struct alc_spec *spec, 6052static int alc260_auto_create_input_ctls(struct hda_codec *codec,
5857 const struct auto_pin_cfg *cfg) 6053 const struct auto_pin_cfg *cfg)
5858{ 6054{
5859 struct hda_input_mux *imux = &spec->private_imux[0]; 6055 return alc_auto_create_input_ctls(codec, cfg, 0x07, 0x04, 0x05);
5860 int i, err, idx;
5861
5862 for (i = 0; i < AUTO_PIN_LAST; i++) {
5863 if (cfg->input_pins[i] >= 0x12) {
5864 idx = cfg->input_pins[i] - 0x12;
5865 err = new_analog_input(spec, cfg->input_pins[i],
5866 auto_pin_cfg_labels[i], idx,
5867 0x07);
5868 if (err < 0)
5869 return err;
5870 imux->items[imux->num_items].label =
5871 auto_pin_cfg_labels[i];
5872 imux->items[imux->num_items].index = idx;
5873 imux->num_items++;
5874 }
5875 if (cfg->input_pins[i] >= 0x0f && cfg->input_pins[i] <= 0x10){
5876 idx = cfg->input_pins[i] - 0x09;
5877 err = new_analog_input(spec, cfg->input_pins[i],
5878 auto_pin_cfg_labels[i], idx,
5879 0x07);
5880 if (err < 0)
5881 return err;
5882 imux->items[imux->num_items].label =
5883 auto_pin_cfg_labels[i];
5884 imux->items[imux->num_items].index = idx;
5885 imux->num_items++;
5886 }
5887 }
5888 return 0;
5889} 6056}
5890 6057
5891static void alc260_auto_set_output_and_unmute(struct hda_codec *codec, 6058static void alc260_auto_set_output_and_unmute(struct hda_codec *codec,
@@ -5999,7 +6166,7 @@ static int alc260_parse_auto_config(struct hda_codec *codec)
5999 return err; 6166 return err;
6000 if (!spec->kctls.list) 6167 if (!spec->kctls.list)
6001 return 0; /* can't find valid BIOS pin config */ 6168 return 0; /* can't find valid BIOS pin config */
6002 err = alc260_auto_create_analog_input_ctls(spec, &spec->autocfg); 6169 err = alc260_auto_create_input_ctls(codec, &spec->autocfg);
6003 if (err < 0) 6170 if (err < 0)
6004 return err; 6171 return err;
6005 6172
@@ -6234,8 +6401,7 @@ static int patch_alc260(struct hda_codec *codec)
6234 alc260_models, 6401 alc260_models,
6235 alc260_cfg_tbl); 6402 alc260_cfg_tbl);
6236 if (board_config < 0) { 6403 if (board_config < 0) {
6237 snd_printd(KERN_INFO "hda_codec: Unknown model for %s, " 6404 snd_printd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
6238 "trying auto-probe from BIOS...\n",
6239 codec->chip_name); 6405 codec->chip_name);
6240 board_config = ALC260_AUTO; 6406 board_config = ALC260_AUTO;
6241 } 6407 }
@@ -6261,7 +6427,7 @@ static int patch_alc260(struct hda_codec *codec)
6261 } 6427 }
6262 6428
6263 if (board_config != ALC260_AUTO) 6429 if (board_config != ALC260_AUTO)
6264 setup_preset(spec, &alc260_presets[board_config]); 6430 setup_preset(codec, &alc260_presets[board_config]);
6265 6431
6266 spec->stream_analog_playback = &alc260_pcm_analog_playback; 6432 spec->stream_analog_playback = &alc260_pcm_analog_playback;
6267 spec->stream_analog_capture = &alc260_pcm_analog_capture; 6433 spec->stream_analog_capture = &alc260_pcm_analog_capture;
@@ -6272,7 +6438,7 @@ static int patch_alc260(struct hda_codec *codec)
6272 if (!spec->adc_nids && spec->input_mux) { 6438 if (!spec->adc_nids && spec->input_mux) {
6273 /* check whether NID 0x04 is valid */ 6439 /* check whether NID 0x04 is valid */
6274 unsigned int wcap = get_wcaps(codec, 0x04); 6440 unsigned int wcap = get_wcaps(codec, 0x04);
6275 wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 6441 wcap = get_wcaps_type(wcap);
6276 /* get type */ 6442 /* get type */
6277 if (wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) { 6443 if (wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
6278 spec->adc_nids = alc260_adc_nids_alt; 6444 spec->adc_nids = alc260_adc_nids_alt;
@@ -6282,7 +6448,7 @@ static int patch_alc260(struct hda_codec *codec)
6282 spec->num_adc_nids = ARRAY_SIZE(alc260_adc_nids); 6448 spec->num_adc_nids = ARRAY_SIZE(alc260_adc_nids);
6283 } 6449 }
6284 } 6450 }
6285 set_capture_mixer(spec); 6451 set_capture_mixer(codec);
6286 set_beep_amp(spec, 0x07, 0x05, HDA_INPUT); 6452 set_beep_amp(spec, 0x07, 0x05, HDA_INPUT);
6287 6453
6288 spec->vmaster_nid = 0x08; 6454 spec->vmaster_nid = 0x08;
@@ -6301,7 +6467,7 @@ static int patch_alc260(struct hda_codec *codec)
6301 6467
6302 6468
6303/* 6469/*
6304 * ALC882 support 6470 * ALC882/883/885/888/889 support
6305 * 6471 *
6306 * ALC882 is almost identical with ALC880 but has cleaner and more flexible 6472 * ALC882 is almost identical with ALC880 but has cleaner and more flexible
6307 * configuration. Each pin widget can choose any input DACs and a mixer. 6473 * configuration. Each pin widget can choose any input DACs and a mixer.
@@ -6313,22 +6479,35 @@ static int patch_alc260(struct hda_codec *codec)
6313 */ 6479 */
6314#define ALC882_DIGOUT_NID 0x06 6480#define ALC882_DIGOUT_NID 0x06
6315#define ALC882_DIGIN_NID 0x0a 6481#define ALC882_DIGIN_NID 0x0a
6482#define ALC883_DIGOUT_NID ALC882_DIGOUT_NID
6483#define ALC883_DIGIN_NID ALC882_DIGIN_NID
6484#define ALC1200_DIGOUT_NID 0x10
6485
6316 6486
6317static struct hda_channel_mode alc882_ch_modes[1] = { 6487static struct hda_channel_mode alc882_ch_modes[1] = {
6318 { 8, NULL } 6488 { 8, NULL }
6319}; 6489};
6320 6490
6491/* DACs */
6321static hda_nid_t alc882_dac_nids[4] = { 6492static hda_nid_t alc882_dac_nids[4] = {
6322 /* front, rear, clfe, rear_surr */ 6493 /* front, rear, clfe, rear_surr */
6323 0x02, 0x03, 0x04, 0x05 6494 0x02, 0x03, 0x04, 0x05
6324}; 6495};
6496#define alc883_dac_nids alc882_dac_nids
6325 6497
6326/* identical with ALC880 */ 6498/* ADCs */
6327#define alc882_adc_nids alc880_adc_nids 6499#define alc882_adc_nids alc880_adc_nids
6328#define alc882_adc_nids_alt alc880_adc_nids_alt 6500#define alc882_adc_nids_alt alc880_adc_nids_alt
6501#define alc883_adc_nids alc882_adc_nids_alt
6502static hda_nid_t alc883_adc_nids_alt[1] = { 0x08 };
6503static hda_nid_t alc883_adc_nids_rev[2] = { 0x09, 0x08 };
6504#define alc889_adc_nids alc880_adc_nids
6329 6505
6330static hda_nid_t alc882_capsrc_nids[3] = { 0x24, 0x23, 0x22 }; 6506static hda_nid_t alc882_capsrc_nids[3] = { 0x24, 0x23, 0x22 };
6331static hda_nid_t alc882_capsrc_nids_alt[2] = { 0x23, 0x22 }; 6507static hda_nid_t alc882_capsrc_nids_alt[2] = { 0x23, 0x22 };
6508#define alc883_capsrc_nids alc882_capsrc_nids_alt
6509static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
6510#define alc889_capsrc_nids alc882_capsrc_nids
6332 6511
6333/* input MUX */ 6512/* input MUX */
6334/* FIXME: should be a matrix-type input source selection */ 6513/* FIXME: should be a matrix-type input source selection */
@@ -6343,6 +6522,17 @@ static struct hda_input_mux alc882_capture_source = {
6343 }, 6522 },
6344}; 6523};
6345 6524
6525#define alc883_capture_source alc882_capture_source
6526
6527static struct hda_input_mux alc889_capture_source = {
6528 .num_items = 3,
6529 .items = {
6530 { "Front Mic", 0x0 },
6531 { "Mic", 0x3 },
6532 { "Line", 0x2 },
6533 },
6534};
6535
6346static struct hda_input_mux mb5_capture_source = { 6536static struct hda_input_mux mb5_capture_source = {
6347 .num_items = 3, 6537 .num_items = 3,
6348 .items = { 6538 .items = {
@@ -6352,6 +6542,77 @@ static struct hda_input_mux mb5_capture_source = {
6352 }, 6542 },
6353}; 6543};
6354 6544
6545static struct hda_input_mux alc883_3stack_6ch_intel = {
6546 .num_items = 4,
6547 .items = {
6548 { "Mic", 0x1 },
6549 { "Front Mic", 0x0 },
6550 { "Line", 0x2 },
6551 { "CD", 0x4 },
6552 },
6553};
6554
6555static struct hda_input_mux alc883_lenovo_101e_capture_source = {
6556 .num_items = 2,
6557 .items = {
6558 { "Mic", 0x1 },
6559 { "Line", 0x2 },
6560 },
6561};
6562
6563static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
6564 .num_items = 4,
6565 .items = {
6566 { "Mic", 0x0 },
6567 { "iMic", 0x1 },
6568 { "Line", 0x2 },
6569 { "CD", 0x4 },
6570 },
6571};
6572
6573static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
6574 .num_items = 2,
6575 .items = {
6576 { "Mic", 0x0 },
6577 { "Int Mic", 0x1 },
6578 },
6579};
6580
6581static struct hda_input_mux alc883_lenovo_sky_capture_source = {
6582 .num_items = 3,
6583 .items = {
6584 { "Mic", 0x0 },
6585 { "Front Mic", 0x1 },
6586 { "Line", 0x4 },
6587 },
6588};
6589
6590static struct hda_input_mux alc883_asus_eee1601_capture_source = {
6591 .num_items = 2,
6592 .items = {
6593 { "Mic", 0x0 },
6594 { "Line", 0x2 },
6595 },
6596};
6597
6598static struct hda_input_mux alc889A_mb31_capture_source = {
6599 .num_items = 2,
6600 .items = {
6601 { "Mic", 0x0 },
6602 /* Front Mic (0x01) unused */
6603 { "Line", 0x2 },
6604 /* Line 2 (0x03) unused */
6605 /* CD (0x04) unsused? */
6606 },
6607};
6608
6609/*
6610 * 2ch mode
6611 */
6612static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
6613 { 2, NULL }
6614};
6615
6355/* 6616/*
6356 * 2ch mode 6617 * 2ch mode
6357 */ 6618 */
@@ -6364,6 +6625,18 @@ static struct hda_verb alc882_3ST_ch2_init[] = {
6364}; 6625};
6365 6626
6366/* 6627/*
6628 * 4ch mode
6629 */
6630static struct hda_verb alc882_3ST_ch4_init[] = {
6631 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6632 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6633 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6634 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6635 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6636 { } /* end */
6637};
6638
6639/*
6367 * 6ch mode 6640 * 6ch mode
6368 */ 6641 */
6369static struct hda_verb alc882_3ST_ch6_init[] = { 6642static struct hda_verb alc882_3ST_ch6_init[] = {
@@ -6376,11 +6649,60 @@ static struct hda_verb alc882_3ST_ch6_init[] = {
6376 { } /* end */ 6649 { } /* end */
6377}; 6650};
6378 6651
6379static struct hda_channel_mode alc882_3ST_6ch_modes[2] = { 6652static struct hda_channel_mode alc882_3ST_6ch_modes[3] = {
6380 { 2, alc882_3ST_ch2_init }, 6653 { 2, alc882_3ST_ch2_init },
6654 { 4, alc882_3ST_ch4_init },
6381 { 6, alc882_3ST_ch6_init }, 6655 { 6, alc882_3ST_ch6_init },
6382}; 6656};
6383 6657
6658#define alc883_3ST_6ch_modes alc882_3ST_6ch_modes
6659
6660/*
6661 * 2ch mode
6662 */
6663static struct hda_verb alc883_3ST_ch2_clevo_init[] = {
6664 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP },
6665 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6666 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6667 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
6668 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6669 { } /* end */
6670};
6671
6672/*
6673 * 4ch mode
6674 */
6675static struct hda_verb alc883_3ST_ch4_clevo_init[] = {
6676 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6677 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6678 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6679 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6680 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6681 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6682 { } /* end */
6683};
6684
6685/*
6686 * 6ch mode
6687 */
6688static struct hda_verb alc883_3ST_ch6_clevo_init[] = {
6689 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6690 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6691 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6692 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
6693 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6694 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6695 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6696 { } /* end */
6697};
6698
6699static struct hda_channel_mode alc883_3ST_6ch_clevo_modes[3] = {
6700 { 2, alc883_3ST_ch2_clevo_init },
6701 { 4, alc883_3ST_ch4_clevo_init },
6702 { 6, alc883_3ST_ch6_clevo_init },
6703};
6704
6705
6384/* 6706/*
6385 * 6ch mode 6707 * 6ch mode
6386 */ 6708 */
@@ -6468,6 +6790,189 @@ static struct hda_channel_mode alc885_mb5_6ch_modes[2] = {
6468 { 6, alc885_mb5_ch6_init }, 6790 { 6, alc885_mb5_ch6_init },
6469}; 6791};
6470 6792
6793
6794/*
6795 * 2ch mode
6796 */
6797static struct hda_verb alc883_4ST_ch2_init[] = {
6798 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6799 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6800 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6801 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6802 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
6803 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6804 { } /* end */
6805};
6806
6807/*
6808 * 4ch mode
6809 */
6810static struct hda_verb alc883_4ST_ch4_init[] = {
6811 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6812 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6813 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6814 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6815 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6816 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6817 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6818 { } /* end */
6819};
6820
6821/*
6822 * 6ch mode
6823 */
6824static struct hda_verb alc883_4ST_ch6_init[] = {
6825 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6826 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6827 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6828 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6829 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
6830 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6831 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6832 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6833 { } /* end */
6834};
6835
6836/*
6837 * 8ch mode
6838 */
6839static struct hda_verb alc883_4ST_ch8_init[] = {
6840 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6841 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6842 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
6843 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6844 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6845 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
6846 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6847 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6848 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6849 { } /* end */
6850};
6851
6852static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
6853 { 2, alc883_4ST_ch2_init },
6854 { 4, alc883_4ST_ch4_init },
6855 { 6, alc883_4ST_ch6_init },
6856 { 8, alc883_4ST_ch8_init },
6857};
6858
6859
6860/*
6861 * 2ch mode
6862 */
6863static struct hda_verb alc883_3ST_ch2_intel_init[] = {
6864 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6865 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6866 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
6867 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6868 { } /* end */
6869};
6870
6871/*
6872 * 4ch mode
6873 */
6874static struct hda_verb alc883_3ST_ch4_intel_init[] = {
6875 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
6876 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6877 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6878 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6879 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6880 { } /* end */
6881};
6882
6883/*
6884 * 6ch mode
6885 */
6886static struct hda_verb alc883_3ST_ch6_intel_init[] = {
6887 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6888 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6889 { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
6890 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6891 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6892 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
6893 { } /* end */
6894};
6895
6896static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
6897 { 2, alc883_3ST_ch2_intel_init },
6898 { 4, alc883_3ST_ch4_intel_init },
6899 { 6, alc883_3ST_ch6_intel_init },
6900};
6901
6902/*
6903 * 2ch mode
6904 */
6905static struct hda_verb alc889_ch2_intel_init[] = {
6906 { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
6907 { 0x19, AC_VERB_SET_CONNECT_SEL, 0x00 },
6908 { 0x16, AC_VERB_SET_CONNECT_SEL, 0x00 },
6909 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x00 },
6910 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
6911 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6912 { } /* end */
6913};
6914
6915/*
6916 * 6ch mode
6917 */
6918static struct hda_verb alc889_ch6_intel_init[] = {
6919 { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
6920 { 0x19, AC_VERB_SET_CONNECT_SEL, 0x01 },
6921 { 0x16, AC_VERB_SET_CONNECT_SEL, 0x02 },
6922 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
6923 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
6924 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
6925 { } /* end */
6926};
6927
6928/*
6929 * 8ch mode
6930 */
6931static struct hda_verb alc889_ch8_intel_init[] = {
6932 { 0x14, AC_VERB_SET_CONNECT_SEL, 0x00 },
6933 { 0x19, AC_VERB_SET_CONNECT_SEL, 0x01 },
6934 { 0x16, AC_VERB_SET_CONNECT_SEL, 0x02 },
6935 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
6936 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x03 },
6937 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6938 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
6939 { } /* end */
6940};
6941
6942static struct hda_channel_mode alc889_8ch_intel_modes[3] = {
6943 { 2, alc889_ch2_intel_init },
6944 { 6, alc889_ch6_intel_init },
6945 { 8, alc889_ch8_intel_init },
6946};
6947
6948/*
6949 * 6ch mode
6950 */
6951static struct hda_verb alc883_sixstack_ch6_init[] = {
6952 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
6953 { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6954 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6955 { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6956 { } /* end */
6957};
6958
6959/*
6960 * 8ch mode
6961 */
6962static struct hda_verb alc883_sixstack_ch8_init[] = {
6963 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6964 { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6965 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6966 { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
6967 { } /* end */
6968};
6969
6970static struct hda_channel_mode alc883_sixstack_modes[2] = {
6971 { 6, alc883_sixstack_ch6_init },
6972 { 8, alc883_sixstack_ch8_init },
6973};
6974
6975
6471/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17 6976/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17
6472 * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b 6977 * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
6473 */ 6978 */
@@ -6604,7 +7109,7 @@ static struct snd_kcontrol_new alc882_chmode_mixer[] = {
6604 { } /* end */ 7109 { } /* end */
6605}; 7110};
6606 7111
6607static struct hda_verb alc882_init_verbs[] = { 7112static struct hda_verb alc882_base_init_verbs[] = {
6608 /* Front mixer: unmute input/output amp left and right (volume = 0) */ 7113 /* Front mixer: unmute input/output amp left and right (volume = 0) */
6609 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, 7114 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
6610 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 7115 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -6622,6 +7127,13 @@ static struct hda_verb alc882_init_verbs[] = {
6622 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 7127 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6623 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 7128 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
6624 7129
7130 /* mute analog input loopbacks */
7131 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7132 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
7133 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
7134 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
7135 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
7136
6625 /* Front Pin: output 0 (0x0c) */ 7137 /* Front Pin: output 0 (0x0c) */
6626 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, 7138 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
6627 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, 7139 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
@@ -6656,11 +7168,6 @@ static struct hda_verb alc882_init_verbs[] = {
6656 7168
6657 /* FIXME: use matrix-type input source selection */ 7169 /* FIXME: use matrix-type input source selection */
6658 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */ 7170 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
6659 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
6660 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
6661 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
6662 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
6663 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
6664 /* Input mixer2 */ 7171 /* Input mixer2 */
6665 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 7172 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
6666 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 7173 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
@@ -6671,9 +7178,6 @@ static struct hda_verb alc882_init_verbs[] = {
6671 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, 7178 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
6672 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, 7179 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
6673 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)}, 7180 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
6674 /* ADC1: mute amp left and right */
6675 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6676 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
6677 /* ADC2: mute amp left and right */ 7181 /* ADC2: mute amp left and right */
6678 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 7182 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
6679 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, 7183 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -6684,6 +7188,18 @@ static struct hda_verb alc882_init_verbs[] = {
6684 { } 7188 { }
6685}; 7189};
6686 7190
7191static struct hda_verb alc882_adc1_init_verbs[] = {
7192 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
7193 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
7194 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
7195 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
7196 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
7197 /* ADC1: mute amp left and right */
7198 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7199 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
7200 { }
7201};
7202
6687static struct hda_verb alc882_eapd_verbs[] = { 7203static struct hda_verb alc882_eapd_verbs[] = {
6688 /* change to EAPD mode */ 7204 /* change to EAPD mode */
6689 {0x20, AC_VERB_SET_COEF_INDEX, 0x07}, 7205 {0x20, AC_VERB_SET_COEF_INDEX, 0x07},
@@ -6691,6 +7207,110 @@ static struct hda_verb alc882_eapd_verbs[] = {
6691 { } 7207 { }
6692}; 7208};
6693 7209
7210static struct hda_verb alc889_eapd_verbs[] = {
7211 {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2},
7212 {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
7213 { }
7214};
7215
7216static struct hda_verb alc_hp15_unsol_verbs[] = {
7217 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
7218 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
7219 {}
7220};
7221
7222static struct hda_verb alc885_init_verbs[] = {
7223 /* Front mixer: unmute input/output amp left and right (volume = 0) */
7224 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
7225 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7226 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
7227 /* Rear mixer */
7228 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
7229 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7230 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
7231 /* CLFE mixer */
7232 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
7233 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7234 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
7235 /* Side mixer */
7236 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
7237 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7238 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
7239
7240 /* mute analog input loopbacks */
7241 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7242 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
7243 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
7244
7245 /* Front HP Pin: output 0 (0x0c) */
7246 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
7247 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7248 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
7249 /* Front Pin: output 0 (0x0c) */
7250 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
7251 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7252 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
7253 /* Rear Pin: output 1 (0x0d) */
7254 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
7255 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7256 {0x19, AC_VERB_SET_CONNECT_SEL, 0x01},
7257 /* CLFE Pin: output 2 (0x0e) */
7258 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
7259 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7260 {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
7261 /* Side Pin: output 3 (0x0f) */
7262 {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
7263 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7264 {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
7265 /* Mic (rear) pin: input vref at 80% */
7266 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
7267 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
7268 /* Front Mic pin: input vref at 80% */
7269 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
7270 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
7271 /* Line In pin: input */
7272 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
7273 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
7274
7275 /* Mixer elements: 0x18, , 0x1a, 0x1b */
7276 /* Input mixer1 */
7277 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
7278 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7279 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
7280 /* Input mixer2 */
7281 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
7282 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
7283 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
7284 /* Input mixer3 */
7285 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
7286 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7287 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
7288 /* ADC2: mute amp left and right */
7289 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7290 /* ADC3: mute amp left and right */
7291 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
7292
7293 { }
7294};
7295
7296static struct hda_verb alc885_init_input_verbs[] = {
7297 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
7298 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
7299 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)},
7300 { }
7301};
7302
7303
7304/* Unmute Selector 24h and set the default input to front mic */
7305static struct hda_verb alc889_init_input_verbs[] = {
7306 {0x24, AC_VERB_SET_CONNECT_SEL, 0x00},
7307 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
7308 { }
7309};
7310
7311
7312#define alc883_init_verbs alc882_base_init_verbs
7313
6694/* Mac Pro test */ 7314/* Mac Pro test */
6695static struct snd_kcontrol_new alc882_macpro_mixer[] = { 7315static struct snd_kcontrol_new alc882_macpro_mixer[] = {
6696 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 7316 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -6898,23 +7518,21 @@ static struct hda_verb alc885_imac24_init_verbs[] = {
6898}; 7518};
6899 7519
6900/* Toggle speaker-output according to the hp-jack state */ 7520/* Toggle speaker-output according to the hp-jack state */
6901static void alc885_imac24_automute_init_hook(struct hda_codec *codec) 7521static void alc885_imac24_setup(struct hda_codec *codec)
6902{ 7522{
6903 struct alc_spec *spec = codec->spec; 7523 struct alc_spec *spec = codec->spec;
6904 7524
6905 spec->autocfg.hp_pins[0] = 0x14; 7525 spec->autocfg.hp_pins[0] = 0x14;
6906 spec->autocfg.speaker_pins[0] = 0x18; 7526 spec->autocfg.speaker_pins[0] = 0x18;
6907 spec->autocfg.speaker_pins[1] = 0x1a; 7527 spec->autocfg.speaker_pins[1] = 0x1a;
6908 alc_automute_amp(codec);
6909} 7528}
6910 7529
6911static void alc885_mbp3_init_hook(struct hda_codec *codec) 7530static void alc885_mbp3_setup(struct hda_codec *codec)
6912{ 7531{
6913 struct alc_spec *spec = codec->spec; 7532 struct alc_spec *spec = codec->spec;
6914 7533
6915 spec->autocfg.hp_pins[0] = 0x15; 7534 spec->autocfg.hp_pins[0] = 0x15;
6916 spec->autocfg.speaker_pins[0] = 0x14; 7535 spec->autocfg.speaker_pins[0] = 0x14;
6917 alc_automute_amp(codec);
6918} 7536}
6919 7537
6920 7538
@@ -6942,13 +7560,12 @@ static void alc882_targa_automute(struct hda_codec *codec)
6942 spec->jack_present ? 1 : 3); 7560 spec->jack_present ? 1 : 3);
6943} 7561}
6944 7562
6945static void alc882_targa_init_hook(struct hda_codec *codec) 7563static void alc882_targa_setup(struct hda_codec *codec)
6946{ 7564{
6947 struct alc_spec *spec = codec->spec; 7565 struct alc_spec *spec = codec->spec;
6948 7566
6949 spec->autocfg.hp_pins[0] = 0x14; 7567 spec->autocfg.hp_pins[0] = 0x14;
6950 spec->autocfg.speaker_pins[0] = 0x1b; 7568 spec->autocfg.speaker_pins[0] = 0x1b;
6951 alc882_targa_automute(codec);
6952} 7569}
6953 7570
6954static void alc882_targa_unsol_event(struct hda_codec *codec, unsigned int res) 7571static void alc882_targa_unsol_event(struct hda_codec *codec, unsigned int res)
@@ -7036,18 +7653,16 @@ static void alc885_macpro_init_hook(struct hda_codec *codec)
7036static void alc885_imac24_init_hook(struct hda_codec *codec) 7653static void alc885_imac24_init_hook(struct hda_codec *codec)
7037{ 7654{
7038 alc885_macpro_init_hook(codec); 7655 alc885_macpro_init_hook(codec);
7039 alc885_imac24_automute_init_hook(codec); 7656 alc_automute_amp(codec);
7040} 7657}
7041 7658
7042/* 7659/*
7043 * generic initialization of ADC, input mixers and output mixers 7660 * generic initialization of ADC, input mixers and output mixers
7044 */ 7661 */
7045static struct hda_verb alc882_auto_init_verbs[] = { 7662static struct hda_verb alc883_auto_init_verbs[] = {
7046 /* 7663 /*
7047 * Unmute ADC0-2 and set the default input to mic-in 7664 * Unmute ADC0-2 and set the default input to mic-in
7048 */ 7665 */
7049 {0x07, AC_VERB_SET_CONNECT_SEL, 0x00},
7050 {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
7051 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, 7666 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
7052 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 7667 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
7053 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00}, 7668 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -7088,11 +7703,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
7088 7703
7089 /* FIXME: use matrix-type input source selection */ 7704 /* FIXME: use matrix-type input source selection */
7090 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */ 7705 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
7091 /* Input mixer1: unmute Mic, F-Mic, Line, CD inputs */
7092 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
7093 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
7094 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x02 << 8))},
7095 {0x24, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x04 << 8))},
7096 /* Input mixer2 */ 7706 /* Input mixer2 */
7097 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))}, 7707 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7000 | (0x00 << 8))},
7098 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))}, 7708 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, (0x7080 | (0x03 << 8))},
@@ -7107,821 +7717,6 @@ static struct hda_verb alc882_auto_init_verbs[] = {
7107 { } 7717 { }
7108}; 7718};
7109 7719
7110#ifdef CONFIG_SND_HDA_POWER_SAVE
7111#define alc882_loopbacks alc880_loopbacks
7112#endif
7113
7114/* pcm configuration: identical with ALC880 */
7115#define alc882_pcm_analog_playback alc880_pcm_analog_playback
7116#define alc882_pcm_analog_capture alc880_pcm_analog_capture
7117#define alc882_pcm_digital_playback alc880_pcm_digital_playback
7118#define alc882_pcm_digital_capture alc880_pcm_digital_capture
7119
7120/*
7121 * configuration and preset
7122 */
7123static const char *alc882_models[ALC882_MODEL_LAST] = {
7124 [ALC882_3ST_DIG] = "3stack-dig",
7125 [ALC882_6ST_DIG] = "6stack-dig",
7126 [ALC882_ARIMA] = "arima",
7127 [ALC882_W2JC] = "w2jc",
7128 [ALC882_TARGA] = "targa",
7129 [ALC882_ASUS_A7J] = "asus-a7j",
7130 [ALC882_ASUS_A7M] = "asus-a7m",
7131 [ALC885_MACPRO] = "macpro",
7132 [ALC885_MB5] = "mb5",
7133 [ALC885_MBP3] = "mbp3",
7134 [ALC885_IMAC24] = "imac24",
7135 [ALC882_AUTO] = "auto",
7136};
7137
7138static struct snd_pci_quirk alc882_cfg_tbl[] = {
7139 SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
7140 SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
7141 SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
7142 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
7143 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
7144 SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
7145 SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
7146 SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
7147 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
7148 SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
7149 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
7150 SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA),
7151 {}
7152};
7153
7154static struct alc_config_preset alc882_presets[] = {
7155 [ALC882_3ST_DIG] = {
7156 .mixers = { alc882_base_mixer },
7157 .init_verbs = { alc882_init_verbs },
7158 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7159 .dac_nids = alc882_dac_nids,
7160 .dig_out_nid = ALC882_DIGOUT_NID,
7161 .dig_in_nid = ALC882_DIGIN_NID,
7162 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
7163 .channel_mode = alc882_ch_modes,
7164 .need_dac_fix = 1,
7165 .input_mux = &alc882_capture_source,
7166 },
7167 [ALC882_6ST_DIG] = {
7168 .mixers = { alc882_base_mixer, alc882_chmode_mixer },
7169 .init_verbs = { alc882_init_verbs },
7170 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7171 .dac_nids = alc882_dac_nids,
7172 .dig_out_nid = ALC882_DIGOUT_NID,
7173 .dig_in_nid = ALC882_DIGIN_NID,
7174 .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
7175 .channel_mode = alc882_sixstack_modes,
7176 .input_mux = &alc882_capture_source,
7177 },
7178 [ALC882_ARIMA] = {
7179 .mixers = { alc882_base_mixer, alc882_chmode_mixer },
7180 .init_verbs = { alc882_init_verbs, alc882_eapd_verbs },
7181 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7182 .dac_nids = alc882_dac_nids,
7183 .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
7184 .channel_mode = alc882_sixstack_modes,
7185 .input_mux = &alc882_capture_source,
7186 },
7187 [ALC882_W2JC] = {
7188 .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
7189 .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
7190 alc880_gpio1_init_verbs },
7191 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7192 .dac_nids = alc882_dac_nids,
7193 .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
7194 .channel_mode = alc880_threestack_modes,
7195 .need_dac_fix = 1,
7196 .input_mux = &alc882_capture_source,
7197 .dig_out_nid = ALC882_DIGOUT_NID,
7198 },
7199 [ALC885_MBP3] = {
7200 .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
7201 .init_verbs = { alc885_mbp3_init_verbs,
7202 alc880_gpio1_init_verbs },
7203 .num_dacs = 2,
7204 .dac_nids = alc882_dac_nids,
7205 .hp_nid = 0x04,
7206 .channel_mode = alc885_mbp_4ch_modes,
7207 .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
7208 .input_mux = &alc882_capture_source,
7209 .dig_out_nid = ALC882_DIGOUT_NID,
7210 .dig_in_nid = ALC882_DIGIN_NID,
7211 .unsol_event = alc_automute_amp_unsol_event,
7212 .init_hook = alc885_mbp3_init_hook,
7213 },
7214 [ALC885_MB5] = {
7215 .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
7216 .init_verbs = { alc885_mb5_init_verbs,
7217 alc880_gpio1_init_verbs },
7218 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7219 .dac_nids = alc882_dac_nids,
7220 .channel_mode = alc885_mb5_6ch_modes,
7221 .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
7222 .input_mux = &mb5_capture_source,
7223 .dig_out_nid = ALC882_DIGOUT_NID,
7224 .dig_in_nid = ALC882_DIGIN_NID,
7225 },
7226 [ALC885_MACPRO] = {
7227 .mixers = { alc882_macpro_mixer },
7228 .init_verbs = { alc882_macpro_init_verbs },
7229 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7230 .dac_nids = alc882_dac_nids,
7231 .dig_out_nid = ALC882_DIGOUT_NID,
7232 .dig_in_nid = ALC882_DIGIN_NID,
7233 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
7234 .channel_mode = alc882_ch_modes,
7235 .input_mux = &alc882_capture_source,
7236 .init_hook = alc885_macpro_init_hook,
7237 },
7238 [ALC885_IMAC24] = {
7239 .mixers = { alc885_imac24_mixer },
7240 .init_verbs = { alc885_imac24_init_verbs },
7241 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7242 .dac_nids = alc882_dac_nids,
7243 .dig_out_nid = ALC882_DIGOUT_NID,
7244 .dig_in_nid = ALC882_DIGIN_NID,
7245 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
7246 .channel_mode = alc882_ch_modes,
7247 .input_mux = &alc882_capture_source,
7248 .unsol_event = alc_automute_amp_unsol_event,
7249 .init_hook = alc885_imac24_init_hook,
7250 },
7251 [ALC882_TARGA] = {
7252 .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
7253 .init_verbs = { alc882_init_verbs, alc880_gpio3_init_verbs,
7254 alc882_targa_verbs},
7255 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7256 .dac_nids = alc882_dac_nids,
7257 .dig_out_nid = ALC882_DIGOUT_NID,
7258 .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
7259 .adc_nids = alc882_adc_nids,
7260 .capsrc_nids = alc882_capsrc_nids,
7261 .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
7262 .channel_mode = alc882_3ST_6ch_modes,
7263 .need_dac_fix = 1,
7264 .input_mux = &alc882_capture_source,
7265 .unsol_event = alc882_targa_unsol_event,
7266 .init_hook = alc882_targa_init_hook,
7267 },
7268 [ALC882_ASUS_A7J] = {
7269 .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
7270 .init_verbs = { alc882_init_verbs, alc882_asus_a7j_verbs},
7271 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7272 .dac_nids = alc882_dac_nids,
7273 .dig_out_nid = ALC882_DIGOUT_NID,
7274 .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
7275 .adc_nids = alc882_adc_nids,
7276 .capsrc_nids = alc882_capsrc_nids,
7277 .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
7278 .channel_mode = alc882_3ST_6ch_modes,
7279 .need_dac_fix = 1,
7280 .input_mux = &alc882_capture_source,
7281 },
7282 [ALC882_ASUS_A7M] = {
7283 .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
7284 .init_verbs = { alc882_init_verbs, alc882_eapd_verbs,
7285 alc880_gpio1_init_verbs,
7286 alc882_asus_a7m_verbs },
7287 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
7288 .dac_nids = alc882_dac_nids,
7289 .dig_out_nid = ALC882_DIGOUT_NID,
7290 .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
7291 .channel_mode = alc880_threestack_modes,
7292 .need_dac_fix = 1,
7293 .input_mux = &alc882_capture_source,
7294 },
7295};
7296
7297
7298/*
7299 * Pin config fixes
7300 */
7301enum {
7302 PINFIX_ABIT_AW9D_MAX
7303};
7304
7305static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
7306 { 0x15, 0x01080104 }, /* side */
7307 { 0x16, 0x01011012 }, /* rear */
7308 { 0x17, 0x01016011 }, /* clfe */
7309 { }
7310};
7311
7312static const struct alc_pincfg *alc882_pin_fixes[] = {
7313 [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
7314};
7315
7316static struct snd_pci_quirk alc882_pinfix_tbl[] = {
7317 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
7318 {}
7319};
7320
7321/*
7322 * BIOS auto configuration
7323 */
7324static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
7325 hda_nid_t nid, int pin_type,
7326 int dac_idx)
7327{
7328 /* set as output */
7329 struct alc_spec *spec = codec->spec;
7330 int idx;
7331
7332 alc_set_pin_output(codec, nid, pin_type);
7333 if (spec->multiout.dac_nids[dac_idx] == 0x25)
7334 idx = 4;
7335 else
7336 idx = spec->multiout.dac_nids[dac_idx] - 2;
7337 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
7338
7339}
7340
7341static void alc882_auto_init_multi_out(struct hda_codec *codec)
7342{
7343 struct alc_spec *spec = codec->spec;
7344 int i;
7345
7346 for (i = 0; i <= HDA_SIDE; i++) {
7347 hda_nid_t nid = spec->autocfg.line_out_pins[i];
7348 int pin_type = get_pin_type(spec->autocfg.line_out_type);
7349 if (nid)
7350 alc882_auto_set_output_and_unmute(codec, nid, pin_type,
7351 i);
7352 }
7353}
7354
7355static void alc882_auto_init_hp_out(struct hda_codec *codec)
7356{
7357 struct alc_spec *spec = codec->spec;
7358 hda_nid_t pin;
7359
7360 pin = spec->autocfg.hp_pins[0];
7361 if (pin) /* connect to front */
7362 /* use dac 0 */
7363 alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
7364 pin = spec->autocfg.speaker_pins[0];
7365 if (pin)
7366 alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
7367}
7368
7369#define alc882_is_input_pin(nid) alc880_is_input_pin(nid)
7370#define ALC882_PIN_CD_NID ALC880_PIN_CD_NID
7371
7372static void alc882_auto_init_analog_input(struct hda_codec *codec)
7373{
7374 struct alc_spec *spec = codec->spec;
7375 int i;
7376
7377 for (i = 0; i < AUTO_PIN_LAST; i++) {
7378 hda_nid_t nid = spec->autocfg.input_pins[i];
7379 if (!nid)
7380 continue;
7381 alc_set_input_pin(codec, nid, AUTO_PIN_FRONT_MIC /*i*/);
7382 if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
7383 snd_hda_codec_write(codec, nid, 0,
7384 AC_VERB_SET_AMP_GAIN_MUTE,
7385 AMP_OUT_MUTE);
7386 }
7387}
7388
7389static void alc882_auto_init_input_src(struct hda_codec *codec)
7390{
7391 struct alc_spec *spec = codec->spec;
7392 int c;
7393
7394 for (c = 0; c < spec->num_adc_nids; c++) {
7395 hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
7396 hda_nid_t nid = spec->capsrc_nids[c];
7397 unsigned int mux_idx;
7398 const struct hda_input_mux *imux;
7399 int conns, mute, idx, item;
7400
7401 conns = snd_hda_get_connections(codec, nid, conn_list,
7402 ARRAY_SIZE(conn_list));
7403 if (conns < 0)
7404 continue;
7405 mux_idx = c >= spec->num_mux_defs ? 0 : c;
7406 imux = &spec->input_mux[mux_idx];
7407 for (idx = 0; idx < conns; idx++) {
7408 /* if the current connection is the selected one,
7409 * unmute it as default - otherwise mute it
7410 */
7411 mute = AMP_IN_MUTE(idx);
7412 for (item = 0; item < imux->num_items; item++) {
7413 if (imux->items[item].index == idx) {
7414 if (spec->cur_mux[c] == item)
7415 mute = AMP_IN_UNMUTE(idx);
7416 break;
7417 }
7418 }
7419 /* check if we have a selector or mixer
7420 * we could check for the widget type instead, but
7421 * just check for Amp-In presence (in case of mixer
7422 * without amp-in there is something wrong, this
7423 * function shouldn't be used or capsrc nid is wrong)
7424 */
7425 if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
7426 snd_hda_codec_write(codec, nid, 0,
7427 AC_VERB_SET_AMP_GAIN_MUTE,
7428 mute);
7429 else if (mute != AMP_IN_MUTE(idx))
7430 snd_hda_codec_write(codec, nid, 0,
7431 AC_VERB_SET_CONNECT_SEL,
7432 idx);
7433 }
7434 }
7435}
7436
7437/* add mic boosts if needed */
7438static int alc_auto_add_mic_boost(struct hda_codec *codec)
7439{
7440 struct alc_spec *spec = codec->spec;
7441 int err;
7442 hda_nid_t nid;
7443
7444 nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
7445 if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
7446 err = add_control(spec, ALC_CTL_WIDGET_VOL,
7447 "Mic Boost",
7448 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
7449 if (err < 0)
7450 return err;
7451 }
7452 nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
7453 if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
7454 err = add_control(spec, ALC_CTL_WIDGET_VOL,
7455 "Front Mic Boost",
7456 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
7457 if (err < 0)
7458 return err;
7459 }
7460 return 0;
7461}
7462
7463/* almost identical with ALC880 parser... */
7464static int alc882_parse_auto_config(struct hda_codec *codec)
7465{
7466 struct alc_spec *spec = codec->spec;
7467 int err = alc880_parse_auto_config(codec);
7468
7469 if (err < 0)
7470 return err;
7471 else if (!err)
7472 return 0; /* no config found */
7473
7474 err = alc_auto_add_mic_boost(codec);
7475 if (err < 0)
7476 return err;
7477
7478 /* hack - override the init verbs */
7479 spec->init_verbs[0] = alc882_auto_init_verbs;
7480
7481 return 1; /* config found */
7482}
7483
7484/* additional initialization for auto-configuration model */
7485static void alc882_auto_init(struct hda_codec *codec)
7486{
7487 struct alc_spec *spec = codec->spec;
7488 alc882_auto_init_multi_out(codec);
7489 alc882_auto_init_hp_out(codec);
7490 alc882_auto_init_analog_input(codec);
7491 alc882_auto_init_input_src(codec);
7492 if (spec->unsol_event)
7493 alc_inithook(codec);
7494}
7495
7496static int patch_alc883(struct hda_codec *codec); /* called in patch_alc882() */
7497
7498static int patch_alc882(struct hda_codec *codec)
7499{
7500 struct alc_spec *spec;
7501 int err, board_config;
7502
7503 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
7504 if (spec == NULL)
7505 return -ENOMEM;
7506
7507 codec->spec = spec;
7508
7509 board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
7510 alc882_models,
7511 alc882_cfg_tbl);
7512
7513 if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
7514 /* Pick up systems that don't supply PCI SSID */
7515 switch (codec->subsystem_id) {
7516 case 0x106b0c00: /* Mac Pro */
7517 board_config = ALC885_MACPRO;
7518 break;
7519 case 0x106b1000: /* iMac 24 */
7520 case 0x106b2800: /* AppleTV */
7521 case 0x106b3e00: /* iMac 24 Aluminium */
7522 board_config = ALC885_IMAC24;
7523 break;
7524 case 0x106b00a0: /* MacBookPro3,1 - Another revision */
7525 case 0x106b00a1: /* Macbook (might be wrong - PCI SSID?) */
7526 case 0x106b00a4: /* MacbookPro4,1 */
7527 case 0x106b2c00: /* Macbook Pro rev3 */
7528 /* Macbook 3.1 (0x106b3600) is handled by patch_alc883() */
7529 case 0x106b3800: /* MacbookPro4,1 - latter revision */
7530 board_config = ALC885_MBP3;
7531 break;
7532 case 0x106b3f00: /* Macbook 5,1 */
7533 case 0x106b4000: /* Macbook Pro 5,1 - FIXME: HP jack sense
7534 * seems not working, so apparently
7535 * no perfect solution yet
7536 */
7537 board_config = ALC885_MB5;
7538 break;
7539 default:
7540 /* ALC889A is handled better as ALC888-compatible */
7541 if (codec->revision_id == 0x100101 ||
7542 codec->revision_id == 0x100103) {
7543 alc_free(codec);
7544 return patch_alc883(codec);
7545 }
7546 printk(KERN_INFO "hda_codec: Unknown model for %s, "
7547 "trying auto-probe from BIOS...\n",
7548 codec->chip_name);
7549 board_config = ALC882_AUTO;
7550 }
7551 }
7552
7553 alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
7554
7555 if (board_config == ALC882_AUTO) {
7556 /* automatic parse from the BIOS config */
7557 err = alc882_parse_auto_config(codec);
7558 if (err < 0) {
7559 alc_free(codec);
7560 return err;
7561 } else if (!err) {
7562 printk(KERN_INFO
7563 "hda_codec: Cannot set up configuration "
7564 "from BIOS. Using base mode...\n");
7565 board_config = ALC882_3ST_DIG;
7566 }
7567 }
7568
7569 err = snd_hda_attach_beep_device(codec, 0x1);
7570 if (err < 0) {
7571 alc_free(codec);
7572 return err;
7573 }
7574
7575 if (board_config != ALC882_AUTO)
7576 setup_preset(spec, &alc882_presets[board_config]);
7577
7578 spec->stream_analog_playback = &alc882_pcm_analog_playback;
7579 spec->stream_analog_capture = &alc882_pcm_analog_capture;
7580 /* FIXME: setup DAC5 */
7581 /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
7582 spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
7583
7584 spec->stream_digital_playback = &alc882_pcm_digital_playback;
7585 spec->stream_digital_capture = &alc882_pcm_digital_capture;
7586
7587 if (!spec->adc_nids && spec->input_mux) {
7588 /* check whether NID 0x07 is valid */
7589 unsigned int wcap = get_wcaps(codec, 0x07);
7590 /* get type */
7591 wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT;
7592 if (wcap != AC_WID_AUD_IN) {
7593 spec->adc_nids = alc882_adc_nids_alt;
7594 spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids_alt);
7595 spec->capsrc_nids = alc882_capsrc_nids_alt;
7596 } else {
7597 spec->adc_nids = alc882_adc_nids;
7598 spec->num_adc_nids = ARRAY_SIZE(alc882_adc_nids);
7599 spec->capsrc_nids = alc882_capsrc_nids;
7600 }
7601 }
7602 set_capture_mixer(spec);
7603 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
7604
7605 spec->vmaster_nid = 0x0c;
7606
7607 codec->patch_ops = alc_patch_ops;
7608 if (board_config == ALC882_AUTO)
7609 spec->init_hook = alc882_auto_init;
7610#ifdef CONFIG_SND_HDA_POWER_SAVE
7611 if (!spec->loopback.amplist)
7612 spec->loopback.amplist = alc882_loopbacks;
7613#endif
7614 codec->proc_widget_hook = print_realtek_coef;
7615
7616 return 0;
7617}
7618
7619/*
7620 * ALC883 support
7621 *
7622 * ALC883 is almost identical with ALC880 but has cleaner and more flexible
7623 * configuration. Each pin widget can choose any input DACs and a mixer.
7624 * Each ADC is connected from a mixer of all inputs. This makes possible
7625 * 6-channel independent captures.
7626 *
7627 * In addition, an independent DAC for the multi-playback (not used in this
7628 * driver yet).
7629 */
7630#define ALC883_DIGOUT_NID 0x06
7631#define ALC883_DIGIN_NID 0x0a
7632
7633#define ALC1200_DIGOUT_NID 0x10
7634
7635static hda_nid_t alc883_dac_nids[4] = {
7636 /* front, rear, clfe, rear_surr */
7637 0x02, 0x03, 0x04, 0x05
7638};
7639
7640static hda_nid_t alc883_adc_nids[2] = {
7641 /* ADC1-2 */
7642 0x08, 0x09,
7643};
7644
7645static hda_nid_t alc883_adc_nids_alt[1] = {
7646 /* ADC1 */
7647 0x08,
7648};
7649
7650static hda_nid_t alc883_adc_nids_rev[2] = {
7651 /* ADC2-1 */
7652 0x09, 0x08
7653};
7654
7655#define alc889_adc_nids alc880_adc_nids
7656
7657static hda_nid_t alc883_capsrc_nids[2] = { 0x23, 0x22 };
7658
7659static hda_nid_t alc883_capsrc_nids_rev[2] = { 0x22, 0x23 };
7660
7661#define alc889_capsrc_nids alc882_capsrc_nids
7662
7663/* input MUX */
7664/* FIXME: should be a matrix-type input source selection */
7665
7666static struct hda_input_mux alc883_capture_source = {
7667 .num_items = 4,
7668 .items = {
7669 { "Mic", 0x0 },
7670 { "Front Mic", 0x1 },
7671 { "Line", 0x2 },
7672 { "CD", 0x4 },
7673 },
7674};
7675
7676static struct hda_input_mux alc883_3stack_6ch_intel = {
7677 .num_items = 4,
7678 .items = {
7679 { "Mic", 0x1 },
7680 { "Front Mic", 0x0 },
7681 { "Line", 0x2 },
7682 { "CD", 0x4 },
7683 },
7684};
7685
7686static struct hda_input_mux alc883_lenovo_101e_capture_source = {
7687 .num_items = 2,
7688 .items = {
7689 { "Mic", 0x1 },
7690 { "Line", 0x2 },
7691 },
7692};
7693
7694static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
7695 .num_items = 4,
7696 .items = {
7697 { "Mic", 0x0 },
7698 { "iMic", 0x1 },
7699 { "Line", 0x2 },
7700 { "CD", 0x4 },
7701 },
7702};
7703
7704static struct hda_input_mux alc883_fujitsu_pi2515_capture_source = {
7705 .num_items = 2,
7706 .items = {
7707 { "Mic", 0x0 },
7708 { "Int Mic", 0x1 },
7709 },
7710};
7711
7712static struct hda_input_mux alc883_lenovo_sky_capture_source = {
7713 .num_items = 3,
7714 .items = {
7715 { "Mic", 0x0 },
7716 { "Front Mic", 0x1 },
7717 { "Line", 0x4 },
7718 },
7719};
7720
7721static struct hda_input_mux alc883_asus_eee1601_capture_source = {
7722 .num_items = 2,
7723 .items = {
7724 { "Mic", 0x0 },
7725 { "Line", 0x2 },
7726 },
7727};
7728
7729static struct hda_input_mux alc889A_mb31_capture_source = {
7730 .num_items = 2,
7731 .items = {
7732 { "Mic", 0x0 },
7733 /* Front Mic (0x01) unused */
7734 { "Line", 0x2 },
7735 /* Line 2 (0x03) unused */
7736 /* CD (0x04) unsused? */
7737 },
7738};
7739
7740/*
7741 * 2ch mode
7742 */
7743static struct hda_channel_mode alc883_3ST_2ch_modes[1] = {
7744 { 2, NULL }
7745};
7746
7747/*
7748 * 2ch mode
7749 */
7750static struct hda_verb alc883_3ST_ch2_init[] = {
7751 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7752 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7753 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
7754 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7755 { } /* end */
7756};
7757
7758/*
7759 * 4ch mode
7760 */
7761static struct hda_verb alc883_3ST_ch4_init[] = {
7762 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7763 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7764 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7765 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7766 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7767 { } /* end */
7768};
7769
7770/*
7771 * 6ch mode
7772 */
7773static struct hda_verb alc883_3ST_ch6_init[] = {
7774 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7775 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7776 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
7777 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7778 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7779 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7780 { } /* end */
7781};
7782
7783static struct hda_channel_mode alc883_3ST_6ch_modes[3] = {
7784 { 2, alc883_3ST_ch2_init },
7785 { 4, alc883_3ST_ch4_init },
7786 { 6, alc883_3ST_ch6_init },
7787};
7788
7789
7790/*
7791 * 2ch mode
7792 */
7793static struct hda_verb alc883_4ST_ch2_init[] = {
7794 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7795 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7796 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7797 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7798 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
7799 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7800 { } /* end */
7801};
7802
7803/*
7804 * 4ch mode
7805 */
7806static struct hda_verb alc883_4ST_ch4_init[] = {
7807 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7808 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7809 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7810 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7811 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7812 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7813 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7814 { } /* end */
7815};
7816
7817/*
7818 * 6ch mode
7819 */
7820static struct hda_verb alc883_4ST_ch6_init[] = {
7821 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7822 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7823 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7824 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7825 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
7826 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7827 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7828 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7829 { } /* end */
7830};
7831
7832/*
7833 * 8ch mode
7834 */
7835static struct hda_verb alc883_4ST_ch8_init[] = {
7836 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7837 { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7838 { 0x17, AC_VERB_SET_CONNECT_SEL, 0x03 },
7839 { 0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7840 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7841 { 0x18, AC_VERB_SET_CONNECT_SEL, 0x02 },
7842 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7843 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7844 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7845 { } /* end */
7846};
7847
7848static struct hda_channel_mode alc883_4ST_8ch_modes[4] = {
7849 { 2, alc883_4ST_ch2_init },
7850 { 4, alc883_4ST_ch4_init },
7851 { 6, alc883_4ST_ch6_init },
7852 { 8, alc883_4ST_ch8_init },
7853};
7854
7855
7856/*
7857 * 2ch mode
7858 */
7859static struct hda_verb alc883_3ST_ch2_intel_init[] = {
7860 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7861 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7862 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
7863 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7864 { } /* end */
7865};
7866
7867/*
7868 * 4ch mode
7869 */
7870static struct hda_verb alc883_3ST_ch4_intel_init[] = {
7871 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
7872 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
7873 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7874 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7875 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7876 { } /* end */
7877};
7878
7879/*
7880 * 6ch mode
7881 */
7882static struct hda_verb alc883_3ST_ch6_intel_init[] = {
7883 { 0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7884 { 0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7885 { 0x19, AC_VERB_SET_CONNECT_SEL, 0x02 },
7886 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7887 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
7888 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
7889 { } /* end */
7890};
7891
7892static struct hda_channel_mode alc883_3ST_6ch_intel_modes[3] = {
7893 { 2, alc883_3ST_ch2_intel_init },
7894 { 4, alc883_3ST_ch4_intel_init },
7895 { 6, alc883_3ST_ch6_intel_init },
7896};
7897
7898/*
7899 * 6ch mode
7900 */
7901static struct hda_verb alc883_sixstack_ch6_init[] = {
7902 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x00 },
7903 { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7904 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7905 { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7906 { } /* end */
7907};
7908
7909/*
7910 * 8ch mode
7911 */
7912static struct hda_verb alc883_sixstack_ch8_init[] = {
7913 { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7914 { 0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7915 { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7916 { 0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
7917 { } /* end */
7918};
7919
7920static struct hda_channel_mode alc883_sixstack_modes[2] = {
7921 { 6, alc883_sixstack_ch6_init },
7922 { 8, alc883_sixstack_ch8_init },
7923};
7924
7925/* 2ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:front) */ 7720/* 2ch mode (Speaker:front, Subwoofer:CLFE, Line:input, Headphones:front) */
7926static struct hda_verb alc889A_mb31_ch2_init[] = { 7721static struct hda_verb alc889A_mb31_ch2_init[] = {
7927 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */ 7722 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP as front */
@@ -7972,34 +7767,7 @@ static struct hda_verb alc883_medion_eapd_verbs[] = {
7972 { } 7767 { }
7973}; 7768};
7974 7769
7975/* Pin assignment: Front=0x14, Rear=0x15, CLFE=0x16, Side=0x17 7770#define alc883_base_mixer alc882_base_mixer
7976 * Mic=0x18, Front Mic=0x19, Line-In=0x1a, HP=0x1b
7977 */
7978
7979static struct snd_kcontrol_new alc883_base_mixer[] = {
7980 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
7981 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
7982 HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
7983 HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
7984 HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0, HDA_OUTPUT),
7985 HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
7986 HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
7987 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
7988 HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
7989 HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
7990 HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
7991 HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
7992 HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
7993 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
7994 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
7995 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
7996 HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
7997 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
7998 HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
7999 HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
8000 HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
8001 { } /* end */
8002};
8003 7771
8004static struct snd_kcontrol_new alc883_mitac_mixer[] = { 7772static struct snd_kcontrol_new alc883_mitac_mixer[] = {
8005 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 7773 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -8110,6 +7878,30 @@ static struct snd_kcontrol_new alc883_3ST_6ch_intel_mixer[] = {
8110 { } /* end */ 7878 { } /* end */
8111}; 7879};
8112 7880
7881static struct snd_kcontrol_new alc885_8ch_intel_mixer[] = {
7882 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
7883 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
7884 HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
7885 HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
7886 HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0,
7887 HDA_OUTPUT),
7888 HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
7889 HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT),
7890 HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
7891 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
7892 HDA_BIND_MUTE("Speaker Playback Switch", 0x0f, 2, HDA_INPUT),
7893 HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
7894 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
7895 HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
7896 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT),
7897 HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT),
7898 HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT),
7899 HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
7900 HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
7901 HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
7902 { } /* end */
7903};
7904
8113static struct snd_kcontrol_new alc883_fivestack_mixer[] = { 7905static struct snd_kcontrol_new alc883_fivestack_mixer[] = {
8114 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 7906 HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
8115 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), 7907 HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -8350,93 +8142,14 @@ static struct snd_kcontrol_new alc883_chmode_mixer[] = {
8350 { } /* end */ 8142 { } /* end */
8351}; 8143};
8352 8144
8353static struct hda_verb alc883_init_verbs[] = {
8354 /* ADC1: mute amp left and right */
8355 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8356 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
8357 /* ADC2: mute amp left and right */
8358 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8359 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
8360 /* Front mixer: unmute input/output amp left and right (volume = 0) */
8361 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8362 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8363 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8364 /* Rear mixer */
8365 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8366 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8367 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8368 /* CLFE mixer */
8369 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8370 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8371 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8372 /* Side mixer */
8373 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8374 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8375 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8376
8377 /* mute analog input loopbacks */
8378 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8379 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8380 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8381 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8382 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8383
8384 /* Front Pin: output 0 (0x0c) */
8385 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8386 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8387 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
8388 /* Rear Pin: output 1 (0x0d) */
8389 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8390 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8391 {0x15, AC_VERB_SET_CONNECT_SEL, 0x01},
8392 /* CLFE Pin: output 2 (0x0e) */
8393 {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8394 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8395 {0x16, AC_VERB_SET_CONNECT_SEL, 0x02},
8396 /* Side Pin: output 3 (0x0f) */
8397 {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
8398 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8399 {0x17, AC_VERB_SET_CONNECT_SEL, 0x03},
8400 /* Mic (rear) pin: input vref at 80% */
8401 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8402 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8403 /* Front Mic pin: input vref at 80% */
8404 {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
8405 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8406 /* Line In pin: input */
8407 {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
8408 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
8409 /* Line-2 In: Headphone output (output 0 - 0x0c) */
8410 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8411 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
8412 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
8413 /* CD pin widget for input */
8414 {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
8415
8416 /* FIXME: use matrix-type input source selection */
8417 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
8418 /* Input mixer2 */
8419 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8420 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8421 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8422 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8423 /* Input mixer3 */
8424 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8425 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8426 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8427 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8428 { }
8429};
8430
8431/* toggle speaker-output according to the hp-jack state */ 8145/* toggle speaker-output according to the hp-jack state */
8432static void alc883_mitac_init_hook(struct hda_codec *codec) 8146static void alc883_mitac_setup(struct hda_codec *codec)
8433{ 8147{
8434 struct alc_spec *spec = codec->spec; 8148 struct alc_spec *spec = codec->spec;
8435 8149
8436 spec->autocfg.hp_pins[0] = 0x15; 8150 spec->autocfg.hp_pins[0] = 0x15;
8437 spec->autocfg.speaker_pins[0] = 0x14; 8151 spec->autocfg.speaker_pins[0] = 0x14;
8438 spec->autocfg.speaker_pins[1] = 0x17; 8152 spec->autocfg.speaker_pins[1] = 0x17;
8439 alc_automute_amp(codec);
8440} 8153}
8441 8154
8442/* auto-toggle front mic */ 8155/* auto-toggle front mic */
@@ -8468,6 +8181,22 @@ static struct hda_verb alc883_mitac_verbs[] = {
8468 { } /* end */ 8181 { } /* end */
8469}; 8182};
8470 8183
8184static struct hda_verb alc883_clevo_m540r_verbs[] = {
8185 /* HP */
8186 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8187 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
8188 /* Int speaker */
8189 /*{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},*/
8190
8191 /* enable unsolicited event */
8192 /*
8193 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8194 {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN},
8195 */
8196
8197 { } /* end */
8198};
8199
8471static struct hda_verb alc883_clevo_m720_verbs[] = { 8200static struct hda_verb alc883_clevo_m720_verbs[] = {
8472 /* HP */ 8201 /* HP */
8473 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, 8202 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -8591,7 +8320,7 @@ static struct hda_verb alc883_vaiott_verbs[] = {
8591 { } /* end */ 8320 { } /* end */
8592}; 8321};
8593 8322
8594static void alc888_3st_hp_init_hook(struct hda_codec *codec) 8323static void alc888_3st_hp_setup(struct hda_codec *codec)
8595{ 8324{
8596 struct alc_spec *spec = codec->spec; 8325 struct alc_spec *spec = codec->spec;
8597 8326
@@ -8599,7 +8328,6 @@ static void alc888_3st_hp_init_hook(struct hda_codec *codec)
8599 spec->autocfg.speaker_pins[0] = 0x14; 8328 spec->autocfg.speaker_pins[0] = 0x14;
8600 spec->autocfg.speaker_pins[1] = 0x16; 8329 spec->autocfg.speaker_pins[1] = 0x16;
8601 spec->autocfg.speaker_pins[2] = 0x18; 8330 spec->autocfg.speaker_pins[2] = 0x18;
8602 alc_automute_amp(codec);
8603} 8331}
8604 8332
8605static struct hda_verb alc888_3st_hp_verbs[] = { 8333static struct hda_verb alc888_3st_hp_verbs[] = {
@@ -8696,13 +8424,12 @@ static struct hda_verb alc883_medion_md2_verbs[] = {
8696}; 8424};
8697 8425
8698/* toggle speaker-output according to the hp-jack state */ 8426/* toggle speaker-output according to the hp-jack state */
8699static void alc883_medion_md2_init_hook(struct hda_codec *codec) 8427static void alc883_medion_md2_setup(struct hda_codec *codec)
8700{ 8428{
8701 struct alc_spec *spec = codec->spec; 8429 struct alc_spec *spec = codec->spec;
8702 8430
8703 spec->autocfg.hp_pins[0] = 0x14; 8431 spec->autocfg.hp_pins[0] = 0x14;
8704 spec->autocfg.speaker_pins[0] = 0x15; 8432 spec->autocfg.speaker_pins[0] = 0x15;
8705 alc_automute_amp(codec);
8706} 8433}
8707 8434
8708/* toggle speaker-output according to the hp-jack state */ 8435/* toggle speaker-output according to the hp-jack state */
@@ -8719,12 +8446,16 @@ static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
8719 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0); 8446 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
8720} 8447}
8721 8448
8722static void alc883_clevo_m720_init_hook(struct hda_codec *codec) 8449static void alc883_clevo_m720_setup(struct hda_codec *codec)
8723{ 8450{
8724 struct alc_spec *spec = codec->spec; 8451 struct alc_spec *spec = codec->spec;
8725 8452
8726 spec->autocfg.hp_pins[0] = 0x15; 8453 spec->autocfg.hp_pins[0] = 0x15;
8727 spec->autocfg.speaker_pins[0] = 0x14; 8454 spec->autocfg.speaker_pins[0] = 0x14;
8455}
8456
8457static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
8458{
8728 alc_automute_amp(codec); 8459 alc_automute_amp(codec);
8729 alc883_clevo_m720_mic_automute(codec); 8460 alc883_clevo_m720_mic_automute(codec);
8730} 8461}
@@ -8743,22 +8474,20 @@ static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
8743} 8474}
8744 8475
8745/* toggle speaker-output according to the hp-jack state */ 8476/* toggle speaker-output according to the hp-jack state */
8746static void alc883_2ch_fujitsu_pi2515_init_hook(struct hda_codec *codec) 8477static void alc883_2ch_fujitsu_pi2515_setup(struct hda_codec *codec)
8747{ 8478{
8748 struct alc_spec *spec = codec->spec; 8479 struct alc_spec *spec = codec->spec;
8749 8480
8750 spec->autocfg.hp_pins[0] = 0x14; 8481 spec->autocfg.hp_pins[0] = 0x14;
8751 spec->autocfg.speaker_pins[0] = 0x15; 8482 spec->autocfg.speaker_pins[0] = 0x15;
8752 alc_automute_amp(codec);
8753} 8483}
8754 8484
8755static void alc883_haier_w66_init_hook(struct hda_codec *codec) 8485static void alc883_haier_w66_setup(struct hda_codec *codec)
8756{ 8486{
8757 struct alc_spec *spec = codec->spec; 8487 struct alc_spec *spec = codec->spec;
8758 8488
8759 spec->autocfg.hp_pins[0] = 0x1b; 8489 spec->autocfg.hp_pins[0] = 0x1b;
8760 spec->autocfg.speaker_pins[0] = 0x14; 8490 spec->autocfg.speaker_pins[0] = 0x14;
8761 alc_automute_amp(codec);
8762} 8491}
8763 8492
8764static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec) 8493static void alc883_lenovo_101e_ispeaker_automute(struct hda_codec *codec)
@@ -8797,14 +8526,13 @@ static void alc883_lenovo_101e_unsol_event(struct hda_codec *codec,
8797} 8526}
8798 8527
8799/* toggle speaker-output according to the hp-jack state */ 8528/* toggle speaker-output according to the hp-jack state */
8800static void alc883_acer_aspire_init_hook(struct hda_codec *codec) 8529static void alc883_acer_aspire_setup(struct hda_codec *codec)
8801{ 8530{
8802 struct alc_spec *spec = codec->spec; 8531 struct alc_spec *spec = codec->spec;
8803 8532
8804 spec->autocfg.hp_pins[0] = 0x14; 8533 spec->autocfg.hp_pins[0] = 0x14;
8805 spec->autocfg.speaker_pins[0] = 0x15; 8534 spec->autocfg.speaker_pins[0] = 0x15;
8806 spec->autocfg.speaker_pins[1] = 0x16; 8535 spec->autocfg.speaker_pins[1] = 0x16;
8807 alc_automute_amp(codec);
8808} 8536}
8809 8537
8810static struct hda_verb alc883_acer_eapd_verbs[] = { 8538static struct hda_verb alc883_acer_eapd_verbs[] = {
@@ -8825,7 +8553,14 @@ static struct hda_verb alc883_acer_eapd_verbs[] = {
8825 { } 8553 { }
8826}; 8554};
8827 8555
8828static void alc888_6st_dell_init_hook(struct hda_codec *codec) 8556static struct hda_verb alc888_acer_aspire_7730G_verbs[] = {
8557 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
8558 {0x17, AC_VERB_SET_CONNECT_SEL, 0x02},
8559 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
8560 { } /* end */
8561};
8562
8563static void alc888_6st_dell_setup(struct hda_codec *codec)
8829{ 8564{
8830 struct alc_spec *spec = codec->spec; 8565 struct alc_spec *spec = codec->spec;
8831 8566
@@ -8834,10 +8569,9 @@ static void alc888_6st_dell_init_hook(struct hda_codec *codec)
8834 spec->autocfg.speaker_pins[1] = 0x15; 8569 spec->autocfg.speaker_pins[1] = 0x15;
8835 spec->autocfg.speaker_pins[2] = 0x16; 8570 spec->autocfg.speaker_pins[2] = 0x16;
8836 spec->autocfg.speaker_pins[3] = 0x17; 8571 spec->autocfg.speaker_pins[3] = 0x17;
8837 alc_automute_amp(codec);
8838} 8572}
8839 8573
8840static void alc888_lenovo_sky_init_hook(struct hda_codec *codec) 8574static void alc888_lenovo_sky_setup(struct hda_codec *codec)
8841{ 8575{
8842 struct alc_spec *spec = codec->spec; 8576 struct alc_spec *spec = codec->spec;
8843 8577
@@ -8847,82 +8581,17 @@ static void alc888_lenovo_sky_init_hook(struct hda_codec *codec)
8847 spec->autocfg.speaker_pins[2] = 0x16; 8581 spec->autocfg.speaker_pins[2] = 0x16;
8848 spec->autocfg.speaker_pins[3] = 0x17; 8582 spec->autocfg.speaker_pins[3] = 0x17;
8849 spec->autocfg.speaker_pins[4] = 0x1a; 8583 spec->autocfg.speaker_pins[4] = 0x1a;
8850 alc_automute_amp(codec);
8851} 8584}
8852 8585
8853static void alc883_vaiott_init_hook(struct hda_codec *codec) 8586static void alc883_vaiott_setup(struct hda_codec *codec)
8854{ 8587{
8855 struct alc_spec *spec = codec->spec; 8588 struct alc_spec *spec = codec->spec;
8856 8589
8857 spec->autocfg.hp_pins[0] = 0x15; 8590 spec->autocfg.hp_pins[0] = 0x15;
8858 spec->autocfg.speaker_pins[0] = 0x14; 8591 spec->autocfg.speaker_pins[0] = 0x14;
8859 spec->autocfg.speaker_pins[1] = 0x17; 8592 spec->autocfg.speaker_pins[1] = 0x17;
8860 alc_automute_amp(codec);
8861} 8593}
8862 8594
8863/*
8864 * generic initialization of ADC, input mixers and output mixers
8865 */
8866static struct hda_verb alc883_auto_init_verbs[] = {
8867 /*
8868 * Unmute ADC0-2 and set the default input to mic-in
8869 */
8870 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00},
8871 {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8872 {0x09, AC_VERB_SET_CONNECT_SEL, 0x00},
8873 {0x09, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8874
8875 /* Mute input amps (CD, Line In, Mic 1 & Mic 2) of the analog-loopback
8876 * mixer widget
8877 * Note: PASD motherboards uses the Line In 2 as the input for
8878 * front panel mic (mic 2)
8879 */
8880 /* Amp Indices: Mic1 = 0, Mic2 = 1, Line1 = 2, Line2 = 3, CD = 4 */
8881 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8882 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
8883 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
8884 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
8885 {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
8886
8887 /*
8888 * Set up output mixers (0x0c - 0x0f)
8889 */
8890 /* set vol=0 to output mixers */
8891 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8892 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8893 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8894 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
8895 /* set up input amps for analog loopback */
8896 /* Amp Indices: DAC = 0, mixer = 1 */
8897 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8898 {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8899 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8900 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8901 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8902 {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8903 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8904 {0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8905 {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8906 {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8907
8908 /* FIXME: use matrix-type input source selection */
8909 /* Mixer elements: 0x18, 19, 1a, 1b, 1c, 1d, 14, 15, 16, 17, 0b */
8910 /* Input mixer1 */
8911 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8912 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8913 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
8914 /* {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
8915 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
8916 /* Input mixer2 */
8917 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
8918 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
8919 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
8920 /* {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, */
8921 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)},
8922
8923 { }
8924};
8925
8926static struct hda_verb alc888_asus_m90v_verbs[] = { 8595static struct hda_verb alc888_asus_m90v_verbs[] = {
8927 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8596 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
8928 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 8597 {0x23, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
@@ -8933,19 +8602,7 @@ static struct hda_verb alc888_asus_m90v_verbs[] = {
8933 { } /* end */ 8602 { } /* end */
8934}; 8603};
8935 8604
8936static void alc883_nb_mic_automute(struct hda_codec *codec) 8605static void alc883_mode2_setup(struct hda_codec *codec)
8937{
8938 unsigned int present;
8939
8940 present = snd_hda_codec_read(codec, 0x18, 0,
8941 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
8942 snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
8943 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
8944 snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
8945 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
8946}
8947
8948static void alc883_M90V_init_hook(struct hda_codec *codec)
8949{ 8606{
8950 struct alc_spec *spec = codec->spec; 8607 struct alc_spec *spec = codec->spec;
8951 8608
@@ -8953,26 +8610,11 @@ static void alc883_M90V_init_hook(struct hda_codec *codec)
8953 spec->autocfg.speaker_pins[0] = 0x14; 8610 spec->autocfg.speaker_pins[0] = 0x14;
8954 spec->autocfg.speaker_pins[1] = 0x15; 8611 spec->autocfg.speaker_pins[1] = 0x15;
8955 spec->autocfg.speaker_pins[2] = 0x16; 8612 spec->autocfg.speaker_pins[2] = 0x16;
8956 alc_automute_pin(codec); 8613 spec->ext_mic.pin = 0x18;
8957} 8614 spec->int_mic.pin = 0x19;
8958 8615 spec->ext_mic.mux_idx = 0;
8959static void alc883_mode2_unsol_event(struct hda_codec *codec, 8616 spec->int_mic.mux_idx = 1;
8960 unsigned int res) 8617 spec->auto_mic = 1;
8961{
8962 switch (res >> 26) {
8963 case ALC880_MIC_EVENT:
8964 alc883_nb_mic_automute(codec);
8965 break;
8966 default:
8967 alc_sku_unsol_event(codec, res);
8968 break;
8969 }
8970}
8971
8972static void alc883_mode2_inithook(struct hda_codec *codec)
8973{
8974 alc883_M90V_init_hook(codec);
8975 alc883_nb_mic_automute(codec);
8976} 8618}
8977 8619
8978static struct hda_verb alc888_asus_eee1601_verbs[] = { 8620static struct hda_verb alc888_asus_eee1601_verbs[] = {
@@ -9033,25 +8675,44 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
9033 alc889A_mb31_automute(codec); 8675 alc889A_mb31_automute(codec);
9034} 8676}
9035 8677
8678
9036#ifdef CONFIG_SND_HDA_POWER_SAVE 8679#ifdef CONFIG_SND_HDA_POWER_SAVE
9037#define alc883_loopbacks alc880_loopbacks 8680#define alc882_loopbacks alc880_loopbacks
9038#endif 8681#endif
9039 8682
9040/* pcm configuration: identical with ALC880 */ 8683/* pcm configuration: identical with ALC880 */
9041#define alc883_pcm_analog_playback alc880_pcm_analog_playback 8684#define alc882_pcm_analog_playback alc880_pcm_analog_playback
9042#define alc883_pcm_analog_capture alc880_pcm_analog_capture 8685#define alc882_pcm_analog_capture alc880_pcm_analog_capture
9043#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture 8686#define alc882_pcm_digital_playback alc880_pcm_digital_playback
9044#define alc883_pcm_digital_playback alc880_pcm_digital_playback 8687#define alc882_pcm_digital_capture alc880_pcm_digital_capture
9045#define alc883_pcm_digital_capture alc880_pcm_digital_capture 8688
8689static hda_nid_t alc883_slave_dig_outs[] = {
8690 ALC1200_DIGOUT_NID, 0,
8691};
8692
8693static hda_nid_t alc1200_slave_dig_outs[] = {
8694 ALC883_DIGOUT_NID, 0,
8695};
9046 8696
9047/* 8697/*
9048 * configuration and preset 8698 * configuration and preset
9049 */ 8699 */
9050static const char *alc883_models[ALC883_MODEL_LAST] = { 8700static const char *alc882_models[ALC882_MODEL_LAST] = {
9051 [ALC883_3ST_2ch_DIG] = "3stack-dig", 8701 [ALC882_3ST_DIG] = "3stack-dig",
8702 [ALC882_6ST_DIG] = "6stack-dig",
8703 [ALC882_ARIMA] = "arima",
8704 [ALC882_W2JC] = "w2jc",
8705 [ALC882_TARGA] = "targa",
8706 [ALC882_ASUS_A7J] = "asus-a7j",
8707 [ALC882_ASUS_A7M] = "asus-a7m",
8708 [ALC885_MACPRO] = "macpro",
8709 [ALC885_MB5] = "mb5",
8710 [ALC885_MBP3] = "mbp3",
8711 [ALC885_IMAC24] = "imac24",
8712 [ALC883_3ST_2ch_DIG] = "3stack-2ch-dig",
9052 [ALC883_3ST_6ch_DIG] = "3stack-6ch-dig", 8713 [ALC883_3ST_6ch_DIG] = "3stack-6ch-dig",
9053 [ALC883_3ST_6ch] = "3stack-6ch", 8714 [ALC883_3ST_6ch] = "3stack-6ch",
9054 [ALC883_6ST_DIG] = "6stack-dig", 8715 [ALC883_6ST_DIG] = "alc883-6stack-dig",
9055 [ALC883_TARGA_DIG] = "targa-dig", 8716 [ALC883_TARGA_DIG] = "targa-dig",
9056 [ALC883_TARGA_2ch_DIG] = "targa-2ch-dig", 8717 [ALC883_TARGA_2ch_DIG] = "targa-2ch-dig",
9057 [ALC883_TARGA_8ch_DIG] = "targa-8ch-dig", 8718 [ALC883_TARGA_8ch_DIG] = "targa-8ch-dig",
@@ -9060,6 +8721,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
9060 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", 8721 [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
9061 [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g", 8722 [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g",
9062 [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", 8723 [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
8724 [ALC888_ACER_ASPIRE_7730G] = "acer-aspire-7730g",
9063 [ALC883_MEDION] = "medion", 8725 [ALC883_MEDION] = "medion",
9064 [ALC883_MEDION_MD2] = "medion-md2", 8726 [ALC883_MEDION_MD2] = "medion-md2",
9065 [ALC883_LAPTOP_EAPD] = "laptop-eapd", 8727 [ALC883_LAPTOP_EAPD] = "laptop-eapd",
@@ -9071,18 +8733,22 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
9071 [ALC888_3ST_HP] = "3stack-hp", 8733 [ALC888_3ST_HP] = "3stack-hp",
9072 [ALC888_6ST_DELL] = "6stack-dell", 8734 [ALC888_6ST_DELL] = "6stack-dell",
9073 [ALC883_MITAC] = "mitac", 8735 [ALC883_MITAC] = "mitac",
8736 [ALC883_CLEVO_M540R] = "clevo-m540r",
9074 [ALC883_CLEVO_M720] = "clevo-m720", 8737 [ALC883_CLEVO_M720] = "clevo-m720",
9075 [ALC883_FUJITSU_PI2515] = "fujitsu-pi2515", 8738 [ALC883_FUJITSU_PI2515] = "fujitsu-pi2515",
9076 [ALC888_FUJITSU_XA3530] = "fujitsu-xa3530", 8739 [ALC888_FUJITSU_XA3530] = "fujitsu-xa3530",
9077 [ALC883_3ST_6ch_INTEL] = "3stack-6ch-intel", 8740 [ALC883_3ST_6ch_INTEL] = "3stack-6ch-intel",
8741 [ALC889A_INTEL] = "intel-alc889a",
8742 [ALC889_INTEL] = "intel-x58",
9078 [ALC1200_ASUS_P5Q] = "asus-p5q", 8743 [ALC1200_ASUS_P5Q] = "asus-p5q",
9079 [ALC889A_MB31] = "mb31", 8744 [ALC889A_MB31] = "mb31",
9080 [ALC883_SONY_VAIO_TT] = "sony-vaio-tt", 8745 [ALC883_SONY_VAIO_TT] = "sony-vaio-tt",
9081 [ALC883_AUTO] = "auto", 8746 [ALC882_AUTO] = "auto",
9082}; 8747};
9083 8748
9084static struct snd_pci_quirk alc883_cfg_tbl[] = { 8749static struct snd_pci_quirk alc882_cfg_tbl[] = {
9085 SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC883_3ST_6ch_DIG), 8750 SND_PCI_QUIRK(0x1019, 0x6668, "ECS", ALC882_6ST_DIG),
8751
9086 SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_ACER_ASPIRE), 8752 SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_ACER_ASPIRE),
9087 SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_ACER_ASPIRE), 8753 SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_ACER_ASPIRE),
9088 SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_ACER_ASPIRE), 8754 SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_ACER_ASPIRE),
@@ -9097,40 +8763,56 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9097 ALC888_ACER_ASPIRE_8930G), 8763 ALC888_ACER_ASPIRE_8930G),
9098 SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G", 8764 SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
9099 ALC888_ACER_ASPIRE_8930G), 8765 ALC888_ACER_ASPIRE_8930G),
9100 SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC883_AUTO), 8766 SND_PCI_QUIRK(0x1025, 0x0157, "Acer X3200", ALC882_AUTO),
9101 SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC883_AUTO), 8767 SND_PCI_QUIRK(0x1025, 0x0158, "Acer AX1700-U3700A", ALC882_AUTO),
9102 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", 8768 SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
9103 ALC888_ACER_ASPIRE_6530G), 8769 ALC888_ACER_ASPIRE_6530G),
9104 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", 8770 SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
9105 ALC888_ACER_ASPIRE_6530G), 8771 ALC888_ACER_ASPIRE_6530G),
8772 SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
8773 ALC888_ACER_ASPIRE_7730G),
9106 /* default Acer -- disabled as it causes more problems. 8774 /* default Acer -- disabled as it causes more problems.
9107 * model=auto should work fine now 8775 * model=auto should work fine now
9108 */ 8776 */
9109 /* SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), */ 8777 /* SND_PCI_QUIRK_VENDOR(0x1025, "Acer laptop", ALC883_ACER), */
8778
9110 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL), 8779 SND_PCI_QUIRK(0x1028, 0x020d, "Dell Inspiron 530", ALC888_6ST_DELL),
8780
9111 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG), 8781 SND_PCI_QUIRK(0x103c, 0x2a3d, "HP Pavillion", ALC883_6ST_DIG),
9112 SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP), 8782 SND_PCI_QUIRK(0x103c, 0x2a4f, "HP Samba", ALC888_3ST_HP),
9113 SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP), 8783 SND_PCI_QUIRK(0x103c, 0x2a60, "HP Lucknow", ALC888_3ST_HP),
9114 SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG), 8784 SND_PCI_QUIRK(0x103c, 0x2a61, "HP Nettle", ALC883_6ST_DIG),
9115 SND_PCI_QUIRK(0x103c, 0x2a66, "HP Acacia", ALC888_3ST_HP), 8785 SND_PCI_QUIRK(0x103c, 0x2a66, "HP Acacia", ALC888_3ST_HP),
9116 SND_PCI_QUIRK(0x103c, 0x2a72, "HP Educ.ar", ALC888_3ST_HP), 8786 SND_PCI_QUIRK(0x103c, 0x2a72, "HP Educ.ar", ALC888_3ST_HP),
8787
8788 SND_PCI_QUIRK(0x1043, 0x060d, "Asus A7J", ALC882_ASUS_A7J),
8789 SND_PCI_QUIRK(0x1043, 0x1243, "Asus A7J", ALC882_ASUS_A7J),
8790 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_ASUS_A7M),
9117 SND_PCI_QUIRK(0x1043, 0x1873, "Asus M90V", ALC888_ASUS_M90V), 8791 SND_PCI_QUIRK(0x1043, 0x1873, "Asus M90V", ALC888_ASUS_M90V),
8792 SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_W2JC),
8793 SND_PCI_QUIRK(0x1043, 0x817f, "Asus P5LD2", ALC882_6ST_DIG),
8794 SND_PCI_QUIRK(0x1043, 0x81d8, "Asus P5WD", ALC882_6ST_DIG),
9118 SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG), 8795 SND_PCI_QUIRK(0x1043, 0x8249, "Asus M2A-VM HDMI", ALC883_3ST_6ch_DIG),
9119 SND_PCI_QUIRK(0x1043, 0x8284, "Asus Z37E", ALC883_6ST_DIG), 8796 SND_PCI_QUIRK(0x1043, 0x8284, "Asus Z37E", ALC883_6ST_DIG),
9120 SND_PCI_QUIRK(0x1043, 0x82fe, "Asus P5Q-EM HDMI", ALC1200_ASUS_P5Q), 8797 SND_PCI_QUIRK(0x1043, 0x82fe, "Asus P5Q-EM HDMI", ALC1200_ASUS_P5Q),
9121 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601), 8798 SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_ASUS_EEE1601),
8799
8800 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
9122 SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG), 8801 SND_PCI_QUIRK(0x105b, 0x0ce8, "Foxconn P35AX-S", ALC883_6ST_DIG),
9123 SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC883_6ST_DIG), 8802 SND_PCI_QUIRK(0x105b, 0x6668, "Foxconn", ALC882_6ST_DIG),
9124 SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC), 8803 SND_PCI_QUIRK(0x1071, 0x8227, "Mitac 82801H", ALC883_MITAC),
9125 SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC), 8804 SND_PCI_QUIRK(0x1071, 0x8253, "Mitac 8252d", ALC883_MITAC),
9126 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD), 8805 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD),
9127 SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL), 8806 SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL),
9128 SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch), 8807 SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch),
9129 SND_PCI_QUIRK(0x1458, 0xa002, "MSI", ALC883_6ST_DIG), 8808 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
8809
9130 SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG), 8810 SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG),
9131 SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG), 8811 SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
9132 SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG), 8812 SND_PCI_QUIRK(0x1462, 0x0579, "MSI", ALC883_TARGA_2ch_DIG),
8813 SND_PCI_QUIRK(0x1462, 0x28fb, "Targa T8", ALC882_TARGA), /* MSI-1049 T8 */
9133 SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG), 8814 SND_PCI_QUIRK(0x1462, 0x2fb3, "MSI", ALC883_TARGA_2ch_DIG),
8815 SND_PCI_QUIRK(0x1462, 0x6668, "MSI", ALC882_6ST_DIG),
9134 SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG), 8816 SND_PCI_QUIRK(0x1462, 0x3729, "MSI S420", ALC883_TARGA_DIG),
9135 SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG), 8817 SND_PCI_QUIRK(0x1462, 0x3783, "NEC S970", ALC883_TARGA_DIG),
9136 SND_PCI_QUIRK(0x1462, 0x3b7f, "MSI", ALC883_TARGA_2ch_DIG), 8818 SND_PCI_QUIRK(0x1462, 0x3b7f, "MSI", ALC883_TARGA_2ch_DIG),
@@ -9139,6 +8821,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9139 SND_PCI_QUIRK(0x1462, 0x3fc3, "MSI", ALC883_TARGA_DIG), 8821 SND_PCI_QUIRK(0x1462, 0x3fc3, "MSI", ALC883_TARGA_DIG),
9140 SND_PCI_QUIRK(0x1462, 0x3fcc, "MSI", ALC883_TARGA_DIG), 8822 SND_PCI_QUIRK(0x1462, 0x3fcc, "MSI", ALC883_TARGA_DIG),
9141 SND_PCI_QUIRK(0x1462, 0x3fdf, "MSI", ALC883_TARGA_DIG), 8823 SND_PCI_QUIRK(0x1462, 0x3fdf, "MSI", ALC883_TARGA_DIG),
8824 SND_PCI_QUIRK(0x1462, 0x42cd, "MSI", ALC883_TARGA_DIG),
9142 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG), 8825 SND_PCI_QUIRK(0x1462, 0x4314, "MSI", ALC883_TARGA_DIG),
9143 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG), 8826 SND_PCI_QUIRK(0x1462, 0x4319, "MSI", ALC883_TARGA_DIG),
9144 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG), 8827 SND_PCI_QUIRK(0x1462, 0x4324, "MSI", ALC883_TARGA_DIG),
@@ -9152,11 +8835,15 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9152 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), 8835 SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
9153 SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG), 8836 SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
9154 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), 8837 SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
8838 SND_PCI_QUIRK(0x1462, 0xaa08, "MSI", ALC883_TARGA_2ch_DIG),
8839
9155 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), 8840 SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
9156 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), 8841 SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
9157 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720), 8842 SND_PCI_QUIRK(0x1558, 0x0722, "Clevo laptop M720SR", ALC883_CLEVO_M720),
8843 SND_PCI_QUIRK(0x1558, 0x5409, "Clevo laptop M540R", ALC883_CLEVO_M540R),
9158 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD), 8844 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC883_LAPTOP_EAPD),
9159 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch), 8845 SND_PCI_QUIRK(0x15d9, 0x8780, "Supermicro PDSBA", ALC883_3ST_6ch),
8846 /* SND_PCI_QUIRK(0x161f, 0x2054, "Arima W820", ALC882_ARIMA), */
9160 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION), 8847 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_MEDION),
9161 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx", 8848 SND_PCI_QUIRK_MASK(0x1734, 0xfff0, 0x1100, "FSC AMILO Xi/Pi25xx",
9162 ALC883_FUJITSU_PI2515), 8849 ALC883_FUJITSU_PI2515),
@@ -9171,24 +8858,186 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
9171 SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG), 8858 SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
9172 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG), 8859 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
9173 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), 8860 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
8861
9174 SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL), 8862 SND_PCI_QUIRK(0x8086, 0x0001, "DG33BUC", ALC883_3ST_6ch_INTEL),
9175 SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL), 8863 SND_PCI_QUIRK(0x8086, 0x0002, "DG33FBC", ALC883_3ST_6ch_INTEL),
9176 SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC), 8864 SND_PCI_QUIRK(0x8086, 0x2503, "82801H", ALC883_MITAC),
9177 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC883_3ST_6ch_INTEL), 8865 SND_PCI_QUIRK(0x8086, 0x0022, "DX58SO", ALC889_INTEL),
8866 SND_PCI_QUIRK(0x8086, 0x0021, "Intel IbexPeak", ALC889A_INTEL),
8867 SND_PCI_QUIRK(0x8086, 0x3b56, "Intel IbexPeak", ALC889A_INTEL),
9178 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch), 8868 SND_PCI_QUIRK(0x8086, 0xd601, "D102GGC", ALC883_3ST_6ch),
9179 SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC883_SONY_VAIO_TT),
9180 {}
9181};
9182 8869
9183static hda_nid_t alc883_slave_dig_outs[] = { 8870 {}
9184 ALC1200_DIGOUT_NID, 0,
9185}; 8871};
9186 8872
9187static hda_nid_t alc1200_slave_dig_outs[] = { 8873/* codec SSID table for Intel Mac */
9188 ALC883_DIGOUT_NID, 0, 8874static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
8875 SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC885_MBP3),
8876 SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC885_MBP3),
8877 SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC885_MBP3),
8878 SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_MACPRO),
8879 SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24),
8880 SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24),
8881 SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3),
8882 SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31),
8883 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
8884 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
8885 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
8886 /* FIXME: HP jack sense seems not working for MBP 5,1, so apparently
8887 * no perfect solution yet
8888 */
8889 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5),
8890 {} /* terminator */
9189}; 8891};
9190 8892
9191static struct alc_config_preset alc883_presets[] = { 8893static struct alc_config_preset alc882_presets[] = {
8894 [ALC882_3ST_DIG] = {
8895 .mixers = { alc882_base_mixer },
8896 .init_verbs = { alc882_base_init_verbs,
8897 alc882_adc1_init_verbs },
8898 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8899 .dac_nids = alc882_dac_nids,
8900 .dig_out_nid = ALC882_DIGOUT_NID,
8901 .dig_in_nid = ALC882_DIGIN_NID,
8902 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
8903 .channel_mode = alc882_ch_modes,
8904 .need_dac_fix = 1,
8905 .input_mux = &alc882_capture_source,
8906 },
8907 [ALC882_6ST_DIG] = {
8908 .mixers = { alc882_base_mixer, alc882_chmode_mixer },
8909 .init_verbs = { alc882_base_init_verbs,
8910 alc882_adc1_init_verbs },
8911 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8912 .dac_nids = alc882_dac_nids,
8913 .dig_out_nid = ALC882_DIGOUT_NID,
8914 .dig_in_nid = ALC882_DIGIN_NID,
8915 .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
8916 .channel_mode = alc882_sixstack_modes,
8917 .input_mux = &alc882_capture_source,
8918 },
8919 [ALC882_ARIMA] = {
8920 .mixers = { alc882_base_mixer, alc882_chmode_mixer },
8921 .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
8922 alc882_eapd_verbs },
8923 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8924 .dac_nids = alc882_dac_nids,
8925 .num_channel_mode = ARRAY_SIZE(alc882_sixstack_modes),
8926 .channel_mode = alc882_sixstack_modes,
8927 .input_mux = &alc882_capture_source,
8928 },
8929 [ALC882_W2JC] = {
8930 .mixers = { alc882_w2jc_mixer, alc882_chmode_mixer },
8931 .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
8932 alc882_eapd_verbs, alc880_gpio1_init_verbs },
8933 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8934 .dac_nids = alc882_dac_nids,
8935 .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
8936 .channel_mode = alc880_threestack_modes,
8937 .need_dac_fix = 1,
8938 .input_mux = &alc882_capture_source,
8939 .dig_out_nid = ALC882_DIGOUT_NID,
8940 },
8941 [ALC885_MBP3] = {
8942 .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
8943 .init_verbs = { alc885_mbp3_init_verbs,
8944 alc880_gpio1_init_verbs },
8945 .num_dacs = 2,
8946 .dac_nids = alc882_dac_nids,
8947 .hp_nid = 0x04,
8948 .channel_mode = alc885_mbp_4ch_modes,
8949 .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
8950 .input_mux = &alc882_capture_source,
8951 .dig_out_nid = ALC882_DIGOUT_NID,
8952 .dig_in_nid = ALC882_DIGIN_NID,
8953 .unsol_event = alc_automute_amp_unsol_event,
8954 .setup = alc885_mbp3_setup,
8955 .init_hook = alc_automute_amp,
8956 },
8957 [ALC885_MB5] = {
8958 .mixers = { alc885_mb5_mixer, alc882_chmode_mixer },
8959 .init_verbs = { alc885_mb5_init_verbs,
8960 alc880_gpio1_init_verbs },
8961 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8962 .dac_nids = alc882_dac_nids,
8963 .channel_mode = alc885_mb5_6ch_modes,
8964 .num_channel_mode = ARRAY_SIZE(alc885_mb5_6ch_modes),
8965 .input_mux = &mb5_capture_source,
8966 .dig_out_nid = ALC882_DIGOUT_NID,
8967 .dig_in_nid = ALC882_DIGIN_NID,
8968 },
8969 [ALC885_MACPRO] = {
8970 .mixers = { alc882_macpro_mixer },
8971 .init_verbs = { alc882_macpro_init_verbs },
8972 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8973 .dac_nids = alc882_dac_nids,
8974 .dig_out_nid = ALC882_DIGOUT_NID,
8975 .dig_in_nid = ALC882_DIGIN_NID,
8976 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
8977 .channel_mode = alc882_ch_modes,
8978 .input_mux = &alc882_capture_source,
8979 .init_hook = alc885_macpro_init_hook,
8980 },
8981 [ALC885_IMAC24] = {
8982 .mixers = { alc885_imac24_mixer },
8983 .init_verbs = { alc885_imac24_init_verbs },
8984 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
8985 .dac_nids = alc882_dac_nids,
8986 .dig_out_nid = ALC882_DIGOUT_NID,
8987 .dig_in_nid = ALC882_DIGIN_NID,
8988 .num_channel_mode = ARRAY_SIZE(alc882_ch_modes),
8989 .channel_mode = alc882_ch_modes,
8990 .input_mux = &alc882_capture_source,
8991 .unsol_event = alc_automute_amp_unsol_event,
8992 .setup = alc885_imac24_setup,
8993 .init_hook = alc885_imac24_init_hook,
8994 },
8995 [ALC882_TARGA] = {
8996 .mixers = { alc882_targa_mixer, alc882_chmode_mixer },
8997 .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
8998 alc880_gpio3_init_verbs, alc882_targa_verbs},
8999 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
9000 .dac_nids = alc882_dac_nids,
9001 .dig_out_nid = ALC882_DIGOUT_NID,
9002 .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
9003 .adc_nids = alc882_adc_nids,
9004 .capsrc_nids = alc882_capsrc_nids,
9005 .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
9006 .channel_mode = alc882_3ST_6ch_modes,
9007 .need_dac_fix = 1,
9008 .input_mux = &alc882_capture_source,
9009 .unsol_event = alc882_targa_unsol_event,
9010 .setup = alc882_targa_setup,
9011 .init_hook = alc882_targa_automute,
9012 },
9013 [ALC882_ASUS_A7J] = {
9014 .mixers = { alc882_asus_a7j_mixer, alc882_chmode_mixer },
9015 .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
9016 alc882_asus_a7j_verbs},
9017 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
9018 .dac_nids = alc882_dac_nids,
9019 .dig_out_nid = ALC882_DIGOUT_NID,
9020 .num_adc_nids = ARRAY_SIZE(alc882_adc_nids),
9021 .adc_nids = alc882_adc_nids,
9022 .capsrc_nids = alc882_capsrc_nids,
9023 .num_channel_mode = ARRAY_SIZE(alc882_3ST_6ch_modes),
9024 .channel_mode = alc882_3ST_6ch_modes,
9025 .need_dac_fix = 1,
9026 .input_mux = &alc882_capture_source,
9027 },
9028 [ALC882_ASUS_A7M] = {
9029 .mixers = { alc882_asus_a7m_mixer, alc882_chmode_mixer },
9030 .init_verbs = { alc882_base_init_verbs, alc882_adc1_init_verbs,
9031 alc882_eapd_verbs, alc880_gpio1_init_verbs,
9032 alc882_asus_a7m_verbs },
9033 .num_dacs = ARRAY_SIZE(alc882_dac_nids),
9034 .dac_nids = alc882_dac_nids,
9035 .dig_out_nid = ALC882_DIGOUT_NID,
9036 .num_channel_mode = ARRAY_SIZE(alc880_threestack_modes),
9037 .channel_mode = alc880_threestack_modes,
9038 .need_dac_fix = 1,
9039 .input_mux = &alc882_capture_source,
9040 },
9192 [ALC883_3ST_2ch_DIG] = { 9041 [ALC883_3ST_2ch_DIG] = {
9193 .mixers = { alc883_3ST_2ch_mixer }, 9042 .mixers = { alc883_3ST_2ch_mixer },
9194 .init_verbs = { alc883_init_verbs }, 9043 .init_verbs = { alc883_init_verbs },
@@ -9235,6 +9084,46 @@ static struct alc_config_preset alc883_presets[] = {
9235 .need_dac_fix = 1, 9084 .need_dac_fix = 1,
9236 .input_mux = &alc883_3stack_6ch_intel, 9085 .input_mux = &alc883_3stack_6ch_intel,
9237 }, 9086 },
9087 [ALC889A_INTEL] = {
9088 .mixers = { alc885_8ch_intel_mixer, alc883_chmode_mixer },
9089 .init_verbs = { alc885_init_verbs, alc885_init_input_verbs,
9090 alc_hp15_unsol_verbs },
9091 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9092 .dac_nids = alc883_dac_nids,
9093 .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
9094 .adc_nids = alc889_adc_nids,
9095 .dig_out_nid = ALC883_DIGOUT_NID,
9096 .dig_in_nid = ALC883_DIGIN_NID,
9097 .slave_dig_outs = alc883_slave_dig_outs,
9098 .num_channel_mode = ARRAY_SIZE(alc889_8ch_intel_modes),
9099 .channel_mode = alc889_8ch_intel_modes,
9100 .capsrc_nids = alc889_capsrc_nids,
9101 .input_mux = &alc889_capture_source,
9102 .setup = alc889_automute_setup,
9103 .init_hook = alc_automute_amp,
9104 .unsol_event = alc_automute_amp_unsol_event,
9105 .need_dac_fix = 1,
9106 },
9107 [ALC889_INTEL] = {
9108 .mixers = { alc885_8ch_intel_mixer, alc883_chmode_mixer },
9109 .init_verbs = { alc885_init_verbs, alc889_init_input_verbs,
9110 alc889_eapd_verbs, alc_hp15_unsol_verbs},
9111 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9112 .dac_nids = alc883_dac_nids,
9113 .num_adc_nids = ARRAY_SIZE(alc889_adc_nids),
9114 .adc_nids = alc889_adc_nids,
9115 .dig_out_nid = ALC883_DIGOUT_NID,
9116 .dig_in_nid = ALC883_DIGIN_NID,
9117 .slave_dig_outs = alc883_slave_dig_outs,
9118 .num_channel_mode = ARRAY_SIZE(alc889_8ch_intel_modes),
9119 .channel_mode = alc889_8ch_intel_modes,
9120 .capsrc_nids = alc889_capsrc_nids,
9121 .input_mux = &alc889_capture_source,
9122 .setup = alc889_automute_setup,
9123 .init_hook = alc889_intel_init_hook,
9124 .unsol_event = alc_automute_amp_unsol_event,
9125 .need_dac_fix = 1,
9126 },
9238 [ALC883_6ST_DIG] = { 9127 [ALC883_6ST_DIG] = {
9239 .mixers = { alc883_base_mixer, alc883_chmode_mixer }, 9128 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
9240 .init_verbs = { alc883_init_verbs }, 9129 .init_verbs = { alc883_init_verbs },
@@ -9258,7 +9147,8 @@ static struct alc_config_preset alc883_presets[] = {
9258 .need_dac_fix = 1, 9147 .need_dac_fix = 1,
9259 .input_mux = &alc883_capture_source, 9148 .input_mux = &alc883_capture_source,
9260 .unsol_event = alc883_targa_unsol_event, 9149 .unsol_event = alc883_targa_unsol_event,
9261 .init_hook = alc883_targa_init_hook, 9150 .setup = alc882_targa_setup,
9151 .init_hook = alc882_targa_automute,
9262 }, 9152 },
9263 [ALC883_TARGA_2ch_DIG] = { 9153 [ALC883_TARGA_2ch_DIG] = {
9264 .mixers = { alc883_targa_2ch_mixer}, 9154 .mixers = { alc883_targa_2ch_mixer},
@@ -9273,7 +9163,8 @@ static struct alc_config_preset alc883_presets[] = {
9273 .channel_mode = alc883_3ST_2ch_modes, 9163 .channel_mode = alc883_3ST_2ch_modes,
9274 .input_mux = &alc883_capture_source, 9164 .input_mux = &alc883_capture_source,
9275 .unsol_event = alc883_targa_unsol_event, 9165 .unsol_event = alc883_targa_unsol_event,
9276 .init_hook = alc883_targa_init_hook, 9166 .setup = alc882_targa_setup,
9167 .init_hook = alc882_targa_automute,
9277 }, 9168 },
9278 [ALC883_TARGA_8ch_DIG] = { 9169 [ALC883_TARGA_8ch_DIG] = {
9279 .mixers = { alc883_base_mixer, alc883_chmode_mixer }, 9170 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
@@ -9291,7 +9182,8 @@ static struct alc_config_preset alc883_presets[] = {
9291 .need_dac_fix = 1, 9182 .need_dac_fix = 1,
9292 .input_mux = &alc883_capture_source, 9183 .input_mux = &alc883_capture_source,
9293 .unsol_event = alc883_targa_unsol_event, 9184 .unsol_event = alc883_targa_unsol_event,
9294 .init_hook = alc883_targa_init_hook, 9185 .setup = alc882_targa_setup,
9186 .init_hook = alc882_targa_automute,
9295 }, 9187 },
9296 [ALC883_ACER] = { 9188 [ALC883_ACER] = {
9297 .mixers = { alc883_base_mixer }, 9189 .mixers = { alc883_base_mixer },
@@ -9317,7 +9209,8 @@ static struct alc_config_preset alc883_presets[] = {
9317 .channel_mode = alc883_3ST_2ch_modes, 9209 .channel_mode = alc883_3ST_2ch_modes,
9318 .input_mux = &alc883_capture_source, 9210 .input_mux = &alc883_capture_source,
9319 .unsol_event = alc_automute_amp_unsol_event, 9211 .unsol_event = alc_automute_amp_unsol_event,
9320 .init_hook = alc883_acer_aspire_init_hook, 9212 .setup = alc883_acer_aspire_setup,
9213 .init_hook = alc_automute_amp,
9321 }, 9214 },
9322 [ALC888_ACER_ASPIRE_4930G] = { 9215 [ALC888_ACER_ASPIRE_4930G] = {
9323 .mixers = { alc888_base_mixer, 9216 .mixers = { alc888_base_mixer,
@@ -9337,7 +9230,8 @@ static struct alc_config_preset alc883_presets[] = {
9337 ARRAY_SIZE(alc888_2_capture_sources), 9230 ARRAY_SIZE(alc888_2_capture_sources),
9338 .input_mux = alc888_2_capture_sources, 9231 .input_mux = alc888_2_capture_sources,
9339 .unsol_event = alc_automute_amp_unsol_event, 9232 .unsol_event = alc_automute_amp_unsol_event,
9340 .init_hook = alc888_acer_aspire_4930g_init_hook, 9233 .setup = alc888_acer_aspire_4930g_setup,
9234 .init_hook = alc_automute_amp,
9341 }, 9235 },
9342 [ALC888_ACER_ASPIRE_6530G] = { 9236 [ALC888_ACER_ASPIRE_6530G] = {
9343 .mixers = { alc888_acer_aspire_6530_mixer }, 9237 .mixers = { alc888_acer_aspire_6530_mixer },
@@ -9355,7 +9249,8 @@ static struct alc_config_preset alc883_presets[] = {
9355 ARRAY_SIZE(alc888_2_capture_sources), 9249 ARRAY_SIZE(alc888_2_capture_sources),
9356 .input_mux = alc888_acer_aspire_6530_sources, 9250 .input_mux = alc888_acer_aspire_6530_sources,
9357 .unsol_event = alc_automute_amp_unsol_event, 9251 .unsol_event = alc_automute_amp_unsol_event,
9358 .init_hook = alc888_acer_aspire_6530g_init_hook, 9252 .setup = alc888_acer_aspire_6530g_setup,
9253 .init_hook = alc_automute_amp,
9359 }, 9254 },
9360 [ALC888_ACER_ASPIRE_8930G] = { 9255 [ALC888_ACER_ASPIRE_8930G] = {
9361 .mixers = { alc888_base_mixer, 9256 .mixers = { alc888_base_mixer,
@@ -9376,7 +9271,28 @@ static struct alc_config_preset alc883_presets[] = {
9376 ARRAY_SIZE(alc889_capture_sources), 9271 ARRAY_SIZE(alc889_capture_sources),
9377 .input_mux = alc889_capture_sources, 9272 .input_mux = alc889_capture_sources,
9378 .unsol_event = alc_automute_amp_unsol_event, 9273 .unsol_event = alc_automute_amp_unsol_event,
9379 .init_hook = alc889_acer_aspire_8930g_init_hook, 9274 .setup = alc889_acer_aspire_8930g_setup,
9275 .init_hook = alc_automute_amp,
9276 },
9277 [ALC888_ACER_ASPIRE_7730G] = {
9278 .mixers = { alc883_3ST_6ch_mixer,
9279 alc883_chmode_mixer },
9280 .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
9281 alc888_acer_aspire_7730G_verbs },
9282 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9283 .dac_nids = alc883_dac_nids,
9284 .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
9285 .adc_nids = alc883_adc_nids_rev,
9286 .capsrc_nids = alc883_capsrc_nids_rev,
9287 .dig_out_nid = ALC883_DIGOUT_NID,
9288 .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_modes),
9289 .channel_mode = alc883_3ST_6ch_modes,
9290 .need_dac_fix = 1,
9291 .const_channel_count = 6,
9292 .input_mux = &alc883_capture_source,
9293 .unsol_event = alc_automute_amp_unsol_event,
9294 .setup = alc888_acer_aspire_6530g_setup,
9295 .init_hook = alc_automute_amp,
9380 }, 9296 },
9381 [ALC883_MEDION] = { 9297 [ALC883_MEDION] = {
9382 .mixers = { alc883_fivestack_mixer, 9298 .mixers = { alc883_fivestack_mixer,
@@ -9401,7 +9317,8 @@ static struct alc_config_preset alc883_presets[] = {
9401 .channel_mode = alc883_3ST_2ch_modes, 9317 .channel_mode = alc883_3ST_2ch_modes,
9402 .input_mux = &alc883_capture_source, 9318 .input_mux = &alc883_capture_source,
9403 .unsol_event = alc_automute_amp_unsol_event, 9319 .unsol_event = alc_automute_amp_unsol_event,
9404 .init_hook = alc883_medion_md2_init_hook, 9320 .setup = alc883_medion_md2_setup,
9321 .init_hook = alc_automute_amp,
9405 }, 9322 },
9406 [ALC883_LAPTOP_EAPD] = { 9323 [ALC883_LAPTOP_EAPD] = {
9407 .mixers = { alc883_base_mixer }, 9324 .mixers = { alc883_base_mixer },
@@ -9412,6 +9329,21 @@ static struct alc_config_preset alc883_presets[] = {
9412 .channel_mode = alc883_3ST_2ch_modes, 9329 .channel_mode = alc883_3ST_2ch_modes,
9413 .input_mux = &alc883_capture_source, 9330 .input_mux = &alc883_capture_source,
9414 }, 9331 },
9332 [ALC883_CLEVO_M540R] = {
9333 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
9334 .init_verbs = { alc883_init_verbs, alc883_clevo_m540r_verbs },
9335 .num_dacs = ARRAY_SIZE(alc883_dac_nids),
9336 .dac_nids = alc883_dac_nids,
9337 .dig_out_nid = ALC883_DIGOUT_NID,
9338 .dig_in_nid = ALC883_DIGIN_NID,
9339 .num_channel_mode = ARRAY_SIZE(alc883_3ST_6ch_clevo_modes),
9340 .channel_mode = alc883_3ST_6ch_clevo_modes,
9341 .need_dac_fix = 1,
9342 .input_mux = &alc883_capture_source,
9343 /* This machine has the hardware HP auto-muting, thus
9344 * we need no software mute via unsol event
9345 */
9346 },
9415 [ALC883_CLEVO_M720] = { 9347 [ALC883_CLEVO_M720] = {
9416 .mixers = { alc883_clevo_m720_mixer }, 9348 .mixers = { alc883_clevo_m720_mixer },
9417 .init_verbs = { alc883_init_verbs, alc883_clevo_m720_verbs }, 9349 .init_verbs = { alc883_init_verbs, alc883_clevo_m720_verbs },
@@ -9422,6 +9354,7 @@ static struct alc_config_preset alc883_presets[] = {
9422 .channel_mode = alc883_3ST_2ch_modes, 9354 .channel_mode = alc883_3ST_2ch_modes,
9423 .input_mux = &alc883_capture_source, 9355 .input_mux = &alc883_capture_source,
9424 .unsol_event = alc883_clevo_m720_unsol_event, 9356 .unsol_event = alc883_clevo_m720_unsol_event,
9357 .setup = alc883_clevo_m720_setup,
9425 .init_hook = alc883_clevo_m720_init_hook, 9358 .init_hook = alc883_clevo_m720_init_hook,
9426 }, 9359 },
9427 [ALC883_LENOVO_101E_2ch] = { 9360 [ALC883_LENOVO_101E_2ch] = {
@@ -9447,7 +9380,8 @@ static struct alc_config_preset alc883_presets[] = {
9447 .need_dac_fix = 1, 9380 .need_dac_fix = 1,
9448 .input_mux = &alc883_lenovo_nb0763_capture_source, 9381 .input_mux = &alc883_lenovo_nb0763_capture_source,
9449 .unsol_event = alc_automute_amp_unsol_event, 9382 .unsol_event = alc_automute_amp_unsol_event,
9450 .init_hook = alc883_medion_md2_init_hook, 9383 .setup = alc883_medion_md2_setup,
9384 .init_hook = alc_automute_amp,
9451 }, 9385 },
9452 [ALC888_LENOVO_MS7195_DIG] = { 9386 [ALC888_LENOVO_MS7195_DIG] = {
9453 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9387 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9472,7 +9406,8 @@ static struct alc_config_preset alc883_presets[] = {
9472 .channel_mode = alc883_3ST_2ch_modes, 9406 .channel_mode = alc883_3ST_2ch_modes,
9473 .input_mux = &alc883_capture_source, 9407 .input_mux = &alc883_capture_source,
9474 .unsol_event = alc_automute_amp_unsol_event, 9408 .unsol_event = alc_automute_amp_unsol_event,
9475 .init_hook = alc883_haier_w66_init_hook, 9409 .setup = alc883_haier_w66_setup,
9410 .init_hook = alc_automute_amp,
9476 }, 9411 },
9477 [ALC888_3ST_HP] = { 9412 [ALC888_3ST_HP] = {
9478 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9413 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9484,7 +9419,8 @@ static struct alc_config_preset alc883_presets[] = {
9484 .need_dac_fix = 1, 9419 .need_dac_fix = 1,
9485 .input_mux = &alc883_capture_source, 9420 .input_mux = &alc883_capture_source,
9486 .unsol_event = alc_automute_amp_unsol_event, 9421 .unsol_event = alc_automute_amp_unsol_event,
9487 .init_hook = alc888_3st_hp_init_hook, 9422 .setup = alc888_3st_hp_setup,
9423 .init_hook = alc_automute_amp,
9488 }, 9424 },
9489 [ALC888_6ST_DELL] = { 9425 [ALC888_6ST_DELL] = {
9490 .mixers = { alc883_base_mixer, alc883_chmode_mixer }, 9426 .mixers = { alc883_base_mixer, alc883_chmode_mixer },
@@ -9497,7 +9433,8 @@ static struct alc_config_preset alc883_presets[] = {
9497 .channel_mode = alc883_sixstack_modes, 9433 .channel_mode = alc883_sixstack_modes,
9498 .input_mux = &alc883_capture_source, 9434 .input_mux = &alc883_capture_source,
9499 .unsol_event = alc_automute_amp_unsol_event, 9435 .unsol_event = alc_automute_amp_unsol_event,
9500 .init_hook = alc888_6st_dell_init_hook, 9436 .setup = alc888_6st_dell_setup,
9437 .init_hook = alc_automute_amp,
9501 }, 9438 },
9502 [ALC883_MITAC] = { 9439 [ALC883_MITAC] = {
9503 .mixers = { alc883_mitac_mixer }, 9440 .mixers = { alc883_mitac_mixer },
@@ -9508,7 +9445,8 @@ static struct alc_config_preset alc883_presets[] = {
9508 .channel_mode = alc883_3ST_2ch_modes, 9445 .channel_mode = alc883_3ST_2ch_modes,
9509 .input_mux = &alc883_capture_source, 9446 .input_mux = &alc883_capture_source,
9510 .unsol_event = alc_automute_amp_unsol_event, 9447 .unsol_event = alc_automute_amp_unsol_event,
9511 .init_hook = alc883_mitac_init_hook, 9448 .setup = alc883_mitac_setup,
9449 .init_hook = alc_automute_amp,
9512 }, 9450 },
9513 [ALC883_FUJITSU_PI2515] = { 9451 [ALC883_FUJITSU_PI2515] = {
9514 .mixers = { alc883_2ch_fujitsu_pi2515_mixer }, 9452 .mixers = { alc883_2ch_fujitsu_pi2515_mixer },
@@ -9521,7 +9459,8 @@ static struct alc_config_preset alc883_presets[] = {
9521 .channel_mode = alc883_3ST_2ch_modes, 9459 .channel_mode = alc883_3ST_2ch_modes,
9522 .input_mux = &alc883_fujitsu_pi2515_capture_source, 9460 .input_mux = &alc883_fujitsu_pi2515_capture_source,
9523 .unsol_event = alc_automute_amp_unsol_event, 9461 .unsol_event = alc_automute_amp_unsol_event,
9524 .init_hook = alc883_2ch_fujitsu_pi2515_init_hook, 9462 .setup = alc883_2ch_fujitsu_pi2515_setup,
9463 .init_hook = alc_automute_amp,
9525 }, 9464 },
9526 [ALC888_FUJITSU_XA3530] = { 9465 [ALC888_FUJITSU_XA3530] = {
9527 .mixers = { alc888_base_mixer, alc883_chmode_mixer }, 9466 .mixers = { alc888_base_mixer, alc883_chmode_mixer },
@@ -9539,7 +9478,8 @@ static struct alc_config_preset alc883_presets[] = {
9539 ARRAY_SIZE(alc888_2_capture_sources), 9478 ARRAY_SIZE(alc888_2_capture_sources),
9540 .input_mux = alc888_2_capture_sources, 9479 .input_mux = alc888_2_capture_sources,
9541 .unsol_event = alc_automute_amp_unsol_event, 9480 .unsol_event = alc_automute_amp_unsol_event,
9542 .init_hook = alc888_fujitsu_xa3530_init_hook, 9481 .setup = alc888_fujitsu_xa3530_setup,
9482 .init_hook = alc_automute_amp,
9543 }, 9483 },
9544 [ALC888_LENOVO_SKY] = { 9484 [ALC888_LENOVO_SKY] = {
9545 .mixers = { alc888_lenovo_sky_mixer, alc883_chmode_mixer }, 9485 .mixers = { alc888_lenovo_sky_mixer, alc883_chmode_mixer },
@@ -9552,7 +9492,8 @@ static struct alc_config_preset alc883_presets[] = {
9552 .need_dac_fix = 1, 9492 .need_dac_fix = 1,
9553 .input_mux = &alc883_lenovo_sky_capture_source, 9493 .input_mux = &alc883_lenovo_sky_capture_source,
9554 .unsol_event = alc_automute_amp_unsol_event, 9494 .unsol_event = alc_automute_amp_unsol_event,
9555 .init_hook = alc888_lenovo_sky_init_hook, 9495 .setup = alc888_lenovo_sky_setup,
9496 .init_hook = alc_automute_amp,
9556 }, 9497 },
9557 [ALC888_ASUS_M90V] = { 9498 [ALC888_ASUS_M90V] = {
9558 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer }, 9499 .mixers = { alc883_3ST_6ch_mixer, alc883_chmode_mixer },
@@ -9565,8 +9506,9 @@ static struct alc_config_preset alc883_presets[] = {
9565 .channel_mode = alc883_3ST_6ch_modes, 9506 .channel_mode = alc883_3ST_6ch_modes,
9566 .need_dac_fix = 1, 9507 .need_dac_fix = 1,
9567 .input_mux = &alc883_fujitsu_pi2515_capture_source, 9508 .input_mux = &alc883_fujitsu_pi2515_capture_source,
9568 .unsol_event = alc883_mode2_unsol_event, 9509 .unsol_event = alc_sku_unsol_event,
9569 .init_hook = alc883_mode2_inithook, 9510 .setup = alc883_mode2_setup,
9511 .init_hook = alc_inithook,
9570 }, 9512 },
9571 [ALC888_ASUS_EEE1601] = { 9513 [ALC888_ASUS_EEE1601] = {
9572 .mixers = { alc883_asus_eee1601_mixer }, 9514 .mixers = { alc883_asus_eee1601_mixer },
@@ -9619,15 +9561,45 @@ static struct alc_config_preset alc883_presets[] = {
9619 .channel_mode = alc883_3ST_2ch_modes, 9561 .channel_mode = alc883_3ST_2ch_modes,
9620 .input_mux = &alc883_capture_source, 9562 .input_mux = &alc883_capture_source,
9621 .unsol_event = alc_automute_amp_unsol_event, 9563 .unsol_event = alc_automute_amp_unsol_event,
9622 .init_hook = alc883_vaiott_init_hook, 9564 .setup = alc883_vaiott_setup,
9565 .init_hook = alc_automute_amp,
9623 }, 9566 },
9624}; 9567};
9625 9568
9626 9569
9627/* 9570/*
9571 * Pin config fixes
9572 */
9573enum {
9574 PINFIX_ABIT_AW9D_MAX
9575};
9576
9577static struct alc_pincfg alc882_abit_aw9d_pinfix[] = {
9578 { 0x15, 0x01080104 }, /* side */
9579 { 0x16, 0x01011012 }, /* rear */
9580 { 0x17, 0x01016011 }, /* clfe */
9581 { }
9582};
9583
9584static const struct alc_pincfg *alc882_pin_fixes[] = {
9585 [PINFIX_ABIT_AW9D_MAX] = alc882_abit_aw9d_pinfix,
9586};
9587
9588static struct snd_pci_quirk alc882_pinfix_tbl[] = {
9589 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
9590 {}
9591};
9592
9593/*
9628 * BIOS auto configuration 9594 * BIOS auto configuration
9629 */ 9595 */
9630static void alc883_auto_set_output_and_unmute(struct hda_codec *codec, 9596static int alc882_auto_create_input_ctls(struct hda_codec *codec,
9597 const struct auto_pin_cfg *cfg)
9598{
9599 return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x23, 0x22);
9600}
9601
9602static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
9631 hda_nid_t nid, int pin_type, 9603 hda_nid_t nid, int pin_type,
9632 int dac_idx) 9604 int dac_idx)
9633{ 9605{
@@ -9644,7 +9616,7 @@ static void alc883_auto_set_output_and_unmute(struct hda_codec *codec,
9644 9616
9645} 9617}
9646 9618
9647static void alc883_auto_init_multi_out(struct hda_codec *codec) 9619static void alc882_auto_init_multi_out(struct hda_codec *codec)
9648{ 9620{
9649 struct alc_spec *spec = codec->spec; 9621 struct alc_spec *spec = codec->spec;
9650 int i; 9622 int i;
@@ -9653,12 +9625,12 @@ static void alc883_auto_init_multi_out(struct hda_codec *codec)
9653 hda_nid_t nid = spec->autocfg.line_out_pins[i]; 9625 hda_nid_t nid = spec->autocfg.line_out_pins[i];
9654 int pin_type = get_pin_type(spec->autocfg.line_out_type); 9626 int pin_type = get_pin_type(spec->autocfg.line_out_type);
9655 if (nid) 9627 if (nid)
9656 alc883_auto_set_output_and_unmute(codec, nid, pin_type, 9628 alc882_auto_set_output_and_unmute(codec, nid, pin_type,
9657 i); 9629 i);
9658 } 9630 }
9659} 9631}
9660 9632
9661static void alc883_auto_init_hp_out(struct hda_codec *codec) 9633static void alc882_auto_init_hp_out(struct hda_codec *codec)
9662{ 9634{
9663 struct alc_spec *spec = codec->spec; 9635 struct alc_spec *spec = codec->spec;
9664 hda_nid_t pin; 9636 hda_nid_t pin;
@@ -9666,91 +9638,191 @@ static void alc883_auto_init_hp_out(struct hda_codec *codec)
9666 pin = spec->autocfg.hp_pins[0]; 9638 pin = spec->autocfg.hp_pins[0];
9667 if (pin) /* connect to front */ 9639 if (pin) /* connect to front */
9668 /* use dac 0 */ 9640 /* use dac 0 */
9669 alc883_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); 9641 alc882_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
9670 pin = spec->autocfg.speaker_pins[0]; 9642 pin = spec->autocfg.speaker_pins[0];
9671 if (pin) 9643 if (pin)
9672 alc883_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0); 9644 alc882_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
9673} 9645}
9674 9646
9675#define alc883_is_input_pin(nid) alc880_is_input_pin(nid) 9647static void alc882_auto_init_analog_input(struct hda_codec *codec)
9676#define ALC883_PIN_CD_NID ALC880_PIN_CD_NID
9677
9678static void alc883_auto_init_analog_input(struct hda_codec *codec)
9679{ 9648{
9680 struct alc_spec *spec = codec->spec; 9649 struct alc_spec *spec = codec->spec;
9681 int i; 9650 int i;
9682 9651
9683 for (i = 0; i < AUTO_PIN_LAST; i++) { 9652 for (i = 0; i < AUTO_PIN_LAST; i++) {
9684 hda_nid_t nid = spec->autocfg.input_pins[i]; 9653 hda_nid_t nid = spec->autocfg.input_pins[i];
9685 if (alc883_is_input_pin(nid)) { 9654 if (!nid)
9686 alc_set_input_pin(codec, nid, i); 9655 continue;
9687 if (nid != ALC883_PIN_CD_NID && 9656 alc_set_input_pin(codec, nid, i);
9688 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)) 9657 if (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)
9658 snd_hda_codec_write(codec, nid, 0,
9659 AC_VERB_SET_AMP_GAIN_MUTE,
9660 AMP_OUT_MUTE);
9661 }
9662}
9663
9664static void alc882_auto_init_input_src(struct hda_codec *codec)
9665{
9666 struct alc_spec *spec = codec->spec;
9667 int c;
9668
9669 for (c = 0; c < spec->num_adc_nids; c++) {
9670 hda_nid_t conn_list[HDA_MAX_NUM_INPUTS];
9671 hda_nid_t nid = spec->capsrc_nids[c];
9672 unsigned int mux_idx;
9673 const struct hda_input_mux *imux;
9674 int conns, mute, idx, item;
9675
9676 conns = snd_hda_get_connections(codec, nid, conn_list,
9677 ARRAY_SIZE(conn_list));
9678 if (conns < 0)
9679 continue;
9680 mux_idx = c >= spec->num_mux_defs ? 0 : c;
9681 imux = &spec->input_mux[mux_idx];
9682 for (idx = 0; idx < conns; idx++) {
9683 /* if the current connection is the selected one,
9684 * unmute it as default - otherwise mute it
9685 */
9686 mute = AMP_IN_MUTE(idx);
9687 for (item = 0; item < imux->num_items; item++) {
9688 if (imux->items[item].index == idx) {
9689 if (spec->cur_mux[c] == item)
9690 mute = AMP_IN_UNMUTE(idx);
9691 break;
9692 }
9693 }
9694 /* check if we have a selector or mixer
9695 * we could check for the widget type instead, but
9696 * just check for Amp-In presence (in case of mixer
9697 * without amp-in there is something wrong, this
9698 * function shouldn't be used or capsrc nid is wrong)
9699 */
9700 if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)
9689 snd_hda_codec_write(codec, nid, 0, 9701 snd_hda_codec_write(codec, nid, 0,
9690 AC_VERB_SET_AMP_GAIN_MUTE, 9702 AC_VERB_SET_AMP_GAIN_MUTE,
9691 AMP_OUT_MUTE); 9703 mute);
9704 else if (mute != AMP_IN_MUTE(idx))
9705 snd_hda_codec_write(codec, nid, 0,
9706 AC_VERB_SET_CONNECT_SEL,
9707 idx);
9692 } 9708 }
9693 } 9709 }
9694} 9710}
9695 9711
9696#define alc883_auto_init_input_src alc882_auto_init_input_src 9712/* add mic boosts if needed */
9713static int alc_auto_add_mic_boost(struct hda_codec *codec)
9714{
9715 struct alc_spec *spec = codec->spec;
9716 int err;
9717 hda_nid_t nid;
9718
9719 nid = spec->autocfg.input_pins[AUTO_PIN_MIC];
9720 if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
9721 err = add_control(spec, ALC_CTL_WIDGET_VOL,
9722 "Mic Boost",
9723 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
9724 if (err < 0)
9725 return err;
9726 }
9727 nid = spec->autocfg.input_pins[AUTO_PIN_FRONT_MIC];
9728 if (nid && (get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) {
9729 err = add_control(spec, ALC_CTL_WIDGET_VOL,
9730 "Front Mic Boost",
9731 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
9732 if (err < 0)
9733 return err;
9734 }
9735 return 0;
9736}
9697 9737
9698/* almost identical with ALC880 parser... */ 9738/* almost identical with ALC880 parser... */
9699static int alc883_parse_auto_config(struct hda_codec *codec) 9739static int alc882_parse_auto_config(struct hda_codec *codec)
9700{ 9740{
9701 struct alc_spec *spec = codec->spec; 9741 struct alc_spec *spec = codec->spec;
9702 int err = alc880_parse_auto_config(codec); 9742 static hda_nid_t alc882_ignore[] = { 0x1d, 0 };
9703 struct auto_pin_cfg *cfg = &spec->autocfg; 9743 int i, err;
9704 int i;
9705 9744
9745 err = snd_hda_parse_pin_def_config(codec, &spec->autocfg,
9746 alc882_ignore);
9706 if (err < 0) 9747 if (err < 0)
9707 return err; 9748 return err;
9708 else if (!err) 9749 if (!spec->autocfg.line_outs)
9709 return 0; /* no config found */ 9750 return 0; /* can't find valid BIOS pin config */
9710 9751
9711 err = alc_auto_add_mic_boost(codec); 9752 err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
9753 if (err < 0)
9754 return err;
9755 err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
9756 if (err < 0)
9757 return err;
9758 err = alc880_auto_create_extra_out(spec,
9759 spec->autocfg.speaker_pins[0],
9760 "Speaker");
9761 if (err < 0)
9762 return err;
9763 err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
9764 "Headphone");
9765 if (err < 0)
9766 return err;
9767 err = alc882_auto_create_input_ctls(codec, &spec->autocfg);
9712 if (err < 0) 9768 if (err < 0)
9713 return err; 9769 return err;
9714 9770
9715 /* hack - override the init verbs */ 9771 spec->multiout.max_channels = spec->multiout.num_dacs * 2;
9716 spec->init_verbs[0] = alc883_auto_init_verbs;
9717 9772
9718 /* setup input_mux for ALC889 */ 9773 /* check multiple SPDIF-out (for recent codecs) */
9719 if (codec->vendor_id == 0x10ec0889) { 9774 for (i = 0; i < spec->autocfg.dig_outs; i++) {
9720 /* digital-mic input pin is excluded in alc880_auto_create..() 9775 hda_nid_t dig_nid;
9721 * because it's under 0x18 9776 err = snd_hda_get_connections(codec,
9722 */ 9777 spec->autocfg.dig_out_pins[i],
9723 if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 || 9778 &dig_nid, 1);
9724 cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) { 9779 if (err < 0)
9725 struct hda_input_mux *imux = &spec->private_imux[0]; 9780 continue;
9726 for (i = 1; i < 3; i++) 9781 if (!i)
9727 memcpy(&spec->private_imux[i], 9782 spec->multiout.dig_out_nid = dig_nid;
9728 &spec->private_imux[0], 9783 else {
9729 sizeof(spec->private_imux[0])); 9784 spec->multiout.slave_dig_outs = spec->slave_dig_outs;
9730 imux->items[imux->num_items].label = "Int DMic"; 9785 spec->slave_dig_outs[i - 1] = dig_nid;
9731 imux->items[imux->num_items].index = 0x0b; 9786 if (i == ARRAY_SIZE(spec->slave_dig_outs) - 1)
9732 imux->num_items++; 9787 break;
9733 spec->num_mux_defs = 3;
9734 spec->input_mux = spec->private_imux;
9735 } 9788 }
9736 } 9789 }
9790 if (spec->autocfg.dig_in_pin)
9791 spec->dig_in_nid = ALC880_DIGIN_NID;
9792
9793 if (spec->kctls.list)
9794 add_mixer(spec, spec->kctls.list);
9795
9796 add_verb(spec, alc883_auto_init_verbs);
9797 /* if ADC 0x07 is available, initialize it, too */
9798 if (get_wcaps_type(get_wcaps(codec, 0x07)) == AC_WID_AUD_IN)
9799 add_verb(spec, alc882_adc1_init_verbs);
9800
9801 spec->num_mux_defs = 1;
9802 spec->input_mux = &spec->private_imux[0];
9803
9804 alc_ssid_check(codec, 0x15, 0x1b, 0x14);
9805
9806 err = alc_auto_add_mic_boost(codec);
9807 if (err < 0)
9808 return err;
9737 9809
9738 return 1; /* config found */ 9810 return 1; /* config found */
9739} 9811}
9740 9812
9741/* additional initialization for auto-configuration model */ 9813/* additional initialization for auto-configuration model */
9742static void alc883_auto_init(struct hda_codec *codec) 9814static void alc882_auto_init(struct hda_codec *codec)
9743{ 9815{
9744 struct alc_spec *spec = codec->spec; 9816 struct alc_spec *spec = codec->spec;
9745 alc883_auto_init_multi_out(codec); 9817 alc882_auto_init_multi_out(codec);
9746 alc883_auto_init_hp_out(codec); 9818 alc882_auto_init_hp_out(codec);
9747 alc883_auto_init_analog_input(codec); 9819 alc882_auto_init_analog_input(codec);
9748 alc883_auto_init_input_src(codec); 9820 alc882_auto_init_input_src(codec);
9749 if (spec->unsol_event) 9821 if (spec->unsol_event)
9750 alc_inithook(codec); 9822 alc_inithook(codec);
9751} 9823}
9752 9824
9753static int patch_alc883(struct hda_codec *codec) 9825static int patch_alc882(struct hda_codec *codec)
9754{ 9826{
9755 struct alc_spec *spec; 9827 struct alc_spec *spec;
9756 int err, board_config; 9828 int err, board_config;
@@ -9761,28 +9833,35 @@ static int patch_alc883(struct hda_codec *codec)
9761 9833
9762 codec->spec = spec; 9834 codec->spec = spec;
9763 9835
9764 alc_fix_pll_init(codec, 0x20, 0x0a, 10); 9836 switch (codec->vendor_id) {
9837 case 0x10ec0882:
9838 case 0x10ec0885:
9839 break;
9840 default:
9841 /* ALC883 and variants */
9842 alc_fix_pll_init(codec, 0x20, 0x0a, 10);
9843 break;
9844 }
9765 9845
9766 board_config = snd_hda_check_board_config(codec, ALC883_MODEL_LAST, 9846 board_config = snd_hda_check_board_config(codec, ALC882_MODEL_LAST,
9767 alc883_models, 9847 alc882_models,
9768 alc883_cfg_tbl); 9848 alc882_cfg_tbl);
9769 if (board_config < 0 || board_config >= ALC883_MODEL_LAST) { 9849
9770 /* Pick up systems that don't supply PCI SSID */ 9850 if (board_config < 0 || board_config >= ALC882_MODEL_LAST)
9771 switch (codec->subsystem_id) { 9851 board_config = snd_hda_check_board_codec_sid_config(codec,
9772 case 0x106b3600: /* Macbook 3.1 */ 9852 ALC882_MODEL_LAST, alc882_models, alc882_ssid_cfg_tbl);
9773 board_config = ALC889A_MB31; 9853
9774 break; 9854 if (board_config < 0 || board_config >= ALC882_MODEL_LAST) {
9775 default: 9855 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
9776 printk(KERN_INFO 9856 codec->chip_name);
9777 "hda_codec: Unknown model for %s, trying " 9857 board_config = ALC882_AUTO;
9778 "auto-probe from BIOS...\n", codec->chip_name);
9779 board_config = ALC883_AUTO;
9780 }
9781 } 9858 }
9782 9859
9783 if (board_config == ALC883_AUTO) { 9860 alc_fix_pincfg(codec, alc882_pinfix_tbl, alc882_pin_fixes);
9861
9862 if (board_config == ALC882_AUTO) {
9784 /* automatic parse from the BIOS config */ 9863 /* automatic parse from the BIOS config */
9785 err = alc883_parse_auto_config(codec); 9864 err = alc882_parse_auto_config(codec);
9786 if (err < 0) { 9865 if (err < 0) {
9787 alc_free(codec); 9866 alc_free(codec);
9788 return err; 9867 return err;
@@ -9790,7 +9869,7 @@ static int patch_alc883(struct hda_codec *codec)
9790 printk(KERN_INFO 9869 printk(KERN_INFO
9791 "hda_codec: Cannot set up configuration " 9870 "hda_codec: Cannot set up configuration "
9792 "from BIOS. Using base mode...\n"); 9871 "from BIOS. Using base mode...\n");
9793 board_config = ALC883_3ST_2ch_DIG; 9872 board_config = ALC882_3ST_DIG;
9794 } 9873 }
9795 } 9874 }
9796 9875
@@ -9800,63 +9879,61 @@ static int patch_alc883(struct hda_codec *codec)
9800 return err; 9879 return err;
9801 } 9880 }
9802 9881
9803 if (board_config != ALC883_AUTO) 9882 if (board_config != ALC882_AUTO)
9804 setup_preset(spec, &alc883_presets[board_config]); 9883 setup_preset(codec, &alc882_presets[board_config]);
9805 9884
9806 switch (codec->vendor_id) { 9885 spec->stream_analog_playback = &alc882_pcm_analog_playback;
9807 case 0x10ec0888: 9886 spec->stream_analog_capture = &alc882_pcm_analog_capture;
9808 if (!spec->num_adc_nids) { 9887 /* FIXME: setup DAC5 */
9809 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids); 9888 /*spec->stream_analog_alt_playback = &alc880_pcm_analog_alt_playback;*/
9810 spec->adc_nids = alc883_adc_nids; 9889 spec->stream_analog_alt_capture = &alc880_pcm_analog_alt_capture;
9811 } 9890
9812 if (!spec->capsrc_nids) 9891 spec->stream_digital_playback = &alc882_pcm_digital_playback;
9813 spec->capsrc_nids = alc883_capsrc_nids; 9892 spec->stream_digital_capture = &alc882_pcm_digital_capture;
9893
9894 if (codec->vendor_id == 0x10ec0888)
9814 spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */ 9895 spec->init_amp = ALC_INIT_DEFAULT; /* always initialize */
9815 break; 9896
9816 case 0x10ec0889: 9897 if (!spec->adc_nids && spec->input_mux) {
9817 if (!spec->num_adc_nids) { 9898 int i;
9818 spec->num_adc_nids = ARRAY_SIZE(alc889_adc_nids); 9899 spec->num_adc_nids = 0;
9819 spec->adc_nids = alc889_adc_nids; 9900 for (i = 0; i < ARRAY_SIZE(alc882_adc_nids); i++) {
9820 } 9901 hda_nid_t cap;
9821 if (!spec->capsrc_nids) 9902 hda_nid_t nid = alc882_adc_nids[i];
9822 spec->capsrc_nids = alc889_capsrc_nids; 9903 unsigned int wcap = get_wcaps(codec, nid);
9823 break; 9904 /* get type */
9824 default: 9905 wcap = get_wcaps_type(wcap);
9825 if (!spec->num_adc_nids) { 9906 if (wcap != AC_WID_AUD_IN)
9826 spec->num_adc_nids = ARRAY_SIZE(alc883_adc_nids); 9907 continue;
9827 spec->adc_nids = alc883_adc_nids; 9908 spec->private_adc_nids[spec->num_adc_nids] = nid;
9909 err = snd_hda_get_connections(codec, nid, &cap, 1);
9910 if (err < 0)
9911 continue;
9912 spec->private_capsrc_nids[spec->num_adc_nids] = cap;
9913 spec->num_adc_nids++;
9828 } 9914 }
9829 if (!spec->capsrc_nids) 9915 spec->adc_nids = spec->private_adc_nids;
9830 spec->capsrc_nids = alc883_capsrc_nids; 9916 spec->capsrc_nids = spec->private_capsrc_nids;
9831 break;
9832 } 9917 }
9833 9918
9834 spec->stream_analog_playback = &alc883_pcm_analog_playback; 9919 set_capture_mixer(codec);
9835 spec->stream_analog_capture = &alc883_pcm_analog_capture;
9836 spec->stream_analog_alt_capture = &alc883_pcm_analog_alt_capture;
9837
9838 spec->stream_digital_playback = &alc883_pcm_digital_playback;
9839 spec->stream_digital_capture = &alc883_pcm_digital_capture;
9840
9841 if (!spec->cap_mixer)
9842 set_capture_mixer(spec);
9843 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 9920 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
9844 9921
9845 spec->vmaster_nid = 0x0c; 9922 spec->vmaster_nid = 0x0c;
9846 9923
9847 codec->patch_ops = alc_patch_ops; 9924 codec->patch_ops = alc_patch_ops;
9848 if (board_config == ALC883_AUTO) 9925 if (board_config == ALC882_AUTO)
9849 spec->init_hook = alc883_auto_init; 9926 spec->init_hook = alc882_auto_init;
9850
9851#ifdef CONFIG_SND_HDA_POWER_SAVE 9927#ifdef CONFIG_SND_HDA_POWER_SAVE
9852 if (!spec->loopback.amplist) 9928 if (!spec->loopback.amplist)
9853 spec->loopback.amplist = alc883_loopbacks; 9929 spec->loopback.amplist = alc882_loopbacks;
9854#endif 9930#endif
9855 codec->proc_widget_hook = print_realtek_coef; 9931 codec->proc_widget_hook = print_realtek_coef;
9856 9932
9857 return 0; 9933 return 0;
9858} 9934}
9859 9935
9936
9860/* 9937/*
9861 * ALC262 support 9938 * ALC262 support
9862 */ 9939 */
@@ -10032,13 +10109,12 @@ static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
10032}; 10109};
10033 10110
10034/* mute/unmute internal speaker according to the hp jack and mute state */ 10111/* mute/unmute internal speaker according to the hp jack and mute state */
10035static void alc262_hp_t5735_init_hook(struct hda_codec *codec) 10112static void alc262_hp_t5735_setup(struct hda_codec *codec)
10036{ 10113{
10037 struct alc_spec *spec = codec->spec; 10114 struct alc_spec *spec = codec->spec;
10038 10115
10039 spec->autocfg.hp_pins[0] = 0x15; 10116 spec->autocfg.hp_pins[0] = 0x15;
10040 spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */ 10117 spec->autocfg.speaker_pins[0] = 0x0c; /* HACK: not actually a pin */
10041 alc_automute_amp(codec);
10042} 10118}
10043 10119
10044static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = { 10120static struct snd_kcontrol_new alc262_hp_t5735_mixer[] = {
@@ -10195,22 +10271,20 @@ static void alc262_hippo_unsol_event(struct hda_codec *codec, unsigned int res)
10195 alc262_hippo_automute(codec); 10271 alc262_hippo_automute(codec);
10196} 10272}
10197 10273
10198static void alc262_hippo_init_hook(struct hda_codec *codec) 10274static void alc262_hippo_setup(struct hda_codec *codec)
10199{ 10275{
10200 struct alc_spec *spec = codec->spec; 10276 struct alc_spec *spec = codec->spec;
10201 10277
10202 spec->autocfg.hp_pins[0] = 0x15; 10278 spec->autocfg.hp_pins[0] = 0x15;
10203 spec->autocfg.speaker_pins[0] = 0x14; 10279 spec->autocfg.speaker_pins[0] = 0x14;
10204 alc262_hippo_automute(codec);
10205} 10280}
10206 10281
10207static void alc262_hippo1_init_hook(struct hda_codec *codec) 10282static void alc262_hippo1_setup(struct hda_codec *codec)
10208{ 10283{
10209 struct alc_spec *spec = codec->spec; 10284 struct alc_spec *spec = codec->spec;
10210 10285
10211 spec->autocfg.hp_pins[0] = 0x1b; 10286 spec->autocfg.hp_pins[0] = 0x1b;
10212 spec->autocfg.speaker_pins[0] = 0x14; 10287 spec->autocfg.speaker_pins[0] = 0x14;
10213 alc262_hippo_automute(codec);
10214} 10288}
10215 10289
10216 10290
@@ -10267,13 +10341,12 @@ static struct hda_verb alc262_tyan_verbs[] = {
10267}; 10341};
10268 10342
10269/* unsolicited event for HP jack sensing */ 10343/* unsolicited event for HP jack sensing */
10270static void alc262_tyan_init_hook(struct hda_codec *codec) 10344static void alc262_tyan_setup(struct hda_codec *codec)
10271{ 10345{
10272 struct alc_spec *spec = codec->spec; 10346 struct alc_spec *spec = codec->spec;
10273 10347
10274 spec->autocfg.hp_pins[0] = 0x1b; 10348 spec->autocfg.hp_pins[0] = 0x1b;
10275 spec->autocfg.speaker_pins[0] = 0x15; 10349 spec->autocfg.speaker_pins[0] = 0x15;
10276 alc_automute_amp(codec);
10277} 10350}
10278 10351
10279 10352
@@ -10365,12 +10438,6 @@ static struct hda_verb alc262_eapd_verbs[] = {
10365 { } 10438 { }
10366}; 10439};
10367 10440
10368static struct hda_verb alc262_hippo_unsol_verbs[] = {
10369 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
10370 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
10371 {}
10372};
10373
10374static struct hda_verb alc262_hippo1_unsol_verbs[] = { 10441static struct hda_verb alc262_hippo1_unsol_verbs[] = {
10375 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0}, 10442 {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0},
10376 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00}, 10443 {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -10391,14 +10458,6 @@ static struct hda_verb alc262_sony_unsol_verbs[] = {
10391 {} 10458 {}
10392}; 10459};
10393 10460
10394static struct hda_input_mux alc262_dmic_capture_source = {
10395 .num_items = 2,
10396 .items = {
10397 { "Int DMic", 0x9 },
10398 { "Mic", 0x0 },
10399 },
10400};
10401
10402static struct snd_kcontrol_new alc262_toshiba_s06_mixer[] = { 10461static struct snd_kcontrol_new alc262_toshiba_s06_mixer[] = {
10403 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x0, HDA_OUTPUT), 10462 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
10404 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT), 10463 HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
@@ -10420,35 +10479,17 @@ static struct hda_verb alc262_toshiba_s06_verbs[] = {
10420 {} 10479 {}
10421}; 10480};
10422 10481
10423static void alc262_dmic_automute(struct hda_codec *codec) 10482static void alc262_toshiba_s06_setup(struct hda_codec *codec)
10424{
10425 unsigned int present;
10426
10427 present = snd_hda_codec_read(codec, 0x18, 0,
10428 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
10429 snd_hda_codec_write(codec, 0x22, 0,
10430 AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x09);
10431}
10432
10433
10434/* unsolicited event for HP jack sensing */
10435static void alc262_toshiba_s06_unsol_event(struct hda_codec *codec,
10436 unsigned int res)
10437{
10438 if ((res >> 26) == ALC880_MIC_EVENT)
10439 alc262_dmic_automute(codec);
10440 else
10441 alc_sku_unsol_event(codec, res);
10442}
10443
10444static void alc262_toshiba_s06_init_hook(struct hda_codec *codec)
10445{ 10483{
10446 struct alc_spec *spec = codec->spec; 10484 struct alc_spec *spec = codec->spec;
10447 10485
10448 spec->autocfg.hp_pins[0] = 0x15; 10486 spec->autocfg.hp_pins[0] = 0x15;
10449 spec->autocfg.speaker_pins[0] = 0x14; 10487 spec->autocfg.speaker_pins[0] = 0x14;
10450 alc_automute_pin(codec); 10488 spec->ext_mic.pin = 0x18;
10451 alc262_dmic_automute(codec); 10489 spec->ext_mic.mux_idx = 0;
10490 spec->int_mic.pin = 0x12;
10491 spec->int_mic.mux_idx = 9;
10492 spec->auto_mic = 1;
10452} 10493}
10453 10494
10454/* 10495/*
@@ -10866,104 +10907,111 @@ static struct snd_kcontrol_new alc262_ultra_capture_mixer[] = {
10866 { } /* end */ 10907 { } /* end */
10867}; 10908};
10868 10909
10910/* We use two mixers depending on the output pin; 0x16 is a mono output
10911 * and thus it's bound with a different mixer.
10912 * This function returns which mixer amp should be used.
10913 */
10914static int alc262_check_volbit(hda_nid_t nid)
10915{
10916 if (!nid)
10917 return 0;
10918 else if (nid == 0x16)
10919 return 2;
10920 else
10921 return 1;
10922}
10923
10924static int alc262_add_out_vol_ctl(struct alc_spec *spec, hda_nid_t nid,
10925 const char *pfx, int *vbits)
10926{
10927 char name[32];
10928 unsigned long val;
10929 int vbit;
10930
10931 vbit = alc262_check_volbit(nid);
10932 if (!vbit)
10933 return 0;
10934 if (*vbits & vbit) /* a volume control for this mixer already there */
10935 return 0;
10936 *vbits |= vbit;
10937 snprintf(name, sizeof(name), "%s Playback Volume", pfx);
10938 if (vbit == 2)
10939 val = HDA_COMPOSE_AMP_VAL(0x0e, 2, 0, HDA_OUTPUT);
10940 else
10941 val = HDA_COMPOSE_AMP_VAL(0x0c, 3, 0, HDA_OUTPUT);
10942 return add_control(spec, ALC_CTL_WIDGET_VOL, name, val);
10943}
10944
10945static int alc262_add_out_sw_ctl(struct alc_spec *spec, hda_nid_t nid,
10946 const char *pfx)
10947{
10948 char name[32];
10949 unsigned long val;
10950
10951 if (!nid)
10952 return 0;
10953 snprintf(name, sizeof(name), "%s Playback Switch", pfx);
10954 if (nid == 0x16)
10955 val = HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_OUTPUT);
10956 else
10957 val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
10958 return add_control(spec, ALC_CTL_WIDGET_MUTE, name, val);
10959}
10960
10869/* add playback controls from the parsed DAC table */ 10961/* add playback controls from the parsed DAC table */
10870static int alc262_auto_create_multi_out_ctls(struct alc_spec *spec, 10962static int alc262_auto_create_multi_out_ctls(struct alc_spec *spec,
10871 const struct auto_pin_cfg *cfg) 10963 const struct auto_pin_cfg *cfg)
10872{ 10964{
10873 hda_nid_t nid; 10965 const char *pfx;
10966 int vbits;
10874 int err; 10967 int err;
10875 10968
10876 spec->multiout.num_dacs = 1; /* only use one dac */ 10969 spec->multiout.num_dacs = 1; /* only use one dac */
10877 spec->multiout.dac_nids = spec->private_dac_nids; 10970 spec->multiout.dac_nids = spec->private_dac_nids;
10878 spec->multiout.dac_nids[0] = 2; 10971 spec->multiout.dac_nids[0] = 2;
10879 10972
10880 nid = cfg->line_out_pins[0]; 10973 if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
10881 if (nid) { 10974 pfx = "Master";
10882 err = add_control(spec, ALC_CTL_WIDGET_VOL, 10975 else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
10883 "Front Playback Volume", 10976 pfx = "Speaker";
10884 HDA_COMPOSE_AMP_VAL(0x0c, 3, 0, HDA_OUTPUT)); 10977 else
10885 if (err < 0) 10978 pfx = "Front";
10886 return err; 10979 err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[0], pfx);
10887 err = add_control(spec, ALC_CTL_WIDGET_MUTE, 10980 if (err < 0)
10888 "Front Playback Switch", 10981 return err;
10889 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT)); 10982 err = alc262_add_out_sw_ctl(spec, cfg->speaker_pins[0], "Speaker");
10890 if (err < 0) 10983 if (err < 0)
10891 return err; 10984 return err;
10892 } 10985 err = alc262_add_out_sw_ctl(spec, cfg->hp_pins[0], "Headphone");
10893 10986 if (err < 0)
10894 nid = cfg->speaker_pins[0]; 10987 return err;
10895 if (nid) {
10896 if (nid == 0x16) {
10897 err = add_control(spec, ALC_CTL_WIDGET_VOL,
10898 "Speaker Playback Volume",
10899 HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
10900 HDA_OUTPUT));
10901 if (err < 0)
10902 return err;
10903 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
10904 "Speaker Playback Switch",
10905 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
10906 HDA_OUTPUT));
10907 if (err < 0)
10908 return err;
10909 } else {
10910 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
10911 "Speaker Playback Switch",
10912 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
10913 HDA_OUTPUT));
10914 if (err < 0)
10915 return err;
10916 }
10917 }
10918 nid = cfg->hp_pins[0];
10919 if (nid) {
10920 /* spec->multiout.hp_nid = 2; */
10921 if (nid == 0x16) {
10922 err = add_control(spec, ALC_CTL_WIDGET_VOL,
10923 "Headphone Playback Volume",
10924 HDA_COMPOSE_AMP_VAL(0x0e, 2, 0,
10925 HDA_OUTPUT));
10926 if (err < 0)
10927 return err;
10928 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
10929 "Headphone Playback Switch",
10930 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
10931 HDA_OUTPUT));
10932 if (err < 0)
10933 return err;
10934 } else {
10935 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
10936 "Headphone Playback Switch",
10937 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
10938 HDA_OUTPUT));
10939 if (err < 0)
10940 return err;
10941 }
10942 }
10943 return 0;
10944}
10945
10946static int alc262_auto_create_analog_input_ctls(struct alc_spec *spec,
10947 const struct auto_pin_cfg *cfg)
10948{
10949 int err;
10950 10988
10951 err = alc880_auto_create_analog_input_ctls(spec, cfg); 10989 vbits = alc262_check_volbit(cfg->line_out_pins[0]) |
10990 alc262_check_volbit(cfg->speaker_pins[0]) |
10991 alc262_check_volbit(cfg->hp_pins[0]);
10992 if (vbits == 1 || vbits == 2)
10993 pfx = "Master"; /* only one mixer is used */
10994 else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
10995 pfx = "Speaker";
10996 else
10997 pfx = "Front";
10998 vbits = 0;
10999 err = alc262_add_out_vol_ctl(spec, cfg->line_out_pins[0], pfx, &vbits);
11000 if (err < 0)
11001 return err;
11002 err = alc262_add_out_vol_ctl(spec, cfg->speaker_pins[0], "Speaker",
11003 &vbits);
11004 if (err < 0)
11005 return err;
11006 err = alc262_add_out_vol_ctl(spec, cfg->hp_pins[0], "Headphone",
11007 &vbits);
10952 if (err < 0) 11008 if (err < 0)
10953 return err; 11009 return err;
10954 /* digital-mic input pin is excluded in alc880_auto_create..()
10955 * because it's under 0x18
10956 */
10957 if (cfg->input_pins[AUTO_PIN_MIC] == 0x12 ||
10958 cfg->input_pins[AUTO_PIN_FRONT_MIC] == 0x12) {
10959 struct hda_input_mux *imux = &spec->private_imux[0];
10960 imux->items[imux->num_items].label = "Int Mic";
10961 imux->items[imux->num_items].index = 0x09;
10962 imux->num_items++;
10963 }
10964 return 0; 11010 return 0;
10965} 11011}
10966 11012
11013#define alc262_auto_create_input_ctls \
11014 alc880_auto_create_input_ctls
10967 11015
10968/* 11016/*
10969 * generic initialization of ADC, input mixers and output mixers 11017 * generic initialization of ADC, input mixers and output mixers
@@ -11281,7 +11329,7 @@ static int alc262_parse_auto_config(struct hda_codec *codec)
11281 err = alc262_auto_create_multi_out_ctls(spec, &spec->autocfg); 11329 err = alc262_auto_create_multi_out_ctls(spec, &spec->autocfg);
11282 if (err < 0) 11330 if (err < 0)
11283 return err; 11331 return err;
11284 err = alc262_auto_create_analog_input_ctls(spec, &spec->autocfg); 11332 err = alc262_auto_create_input_ctls(codec, &spec->autocfg);
11285 if (err < 0) 11333 if (err < 0)
11286 return err; 11334 return err;
11287 11335
@@ -11412,7 +11460,7 @@ static struct alc_config_preset alc262_presets[] = {
11412 }, 11460 },
11413 [ALC262_HIPPO] = { 11461 [ALC262_HIPPO] = {
11414 .mixers = { alc262_hippo_mixer }, 11462 .mixers = { alc262_hippo_mixer },
11415 .init_verbs = { alc262_init_verbs, alc262_hippo_unsol_verbs}, 11463 .init_verbs = { alc262_init_verbs, alc_hp15_unsol_verbs},
11416 .num_dacs = ARRAY_SIZE(alc262_dac_nids), 11464 .num_dacs = ARRAY_SIZE(alc262_dac_nids),
11417 .dac_nids = alc262_dac_nids, 11465 .dac_nids = alc262_dac_nids,
11418 .hp_nid = 0x03, 11466 .hp_nid = 0x03,
@@ -11421,7 +11469,8 @@ static struct alc_config_preset alc262_presets[] = {
11421 .channel_mode = alc262_modes, 11469 .channel_mode = alc262_modes,
11422 .input_mux = &alc262_capture_source, 11470 .input_mux = &alc262_capture_source,
11423 .unsol_event = alc262_hippo_unsol_event, 11471 .unsol_event = alc262_hippo_unsol_event,
11424 .init_hook = alc262_hippo_init_hook, 11472 .setup = alc262_hippo_setup,
11473 .init_hook = alc262_hippo_automute,
11425 }, 11474 },
11426 [ALC262_HIPPO_1] = { 11475 [ALC262_HIPPO_1] = {
11427 .mixers = { alc262_hippo1_mixer }, 11476 .mixers = { alc262_hippo1_mixer },
@@ -11434,7 +11483,8 @@ static struct alc_config_preset alc262_presets[] = {
11434 .channel_mode = alc262_modes, 11483 .channel_mode = alc262_modes,
11435 .input_mux = &alc262_capture_source, 11484 .input_mux = &alc262_capture_source,
11436 .unsol_event = alc262_hippo_unsol_event, 11485 .unsol_event = alc262_hippo_unsol_event,
11437 .init_hook = alc262_hippo1_init_hook, 11486 .setup = alc262_hippo1_setup,
11487 .init_hook = alc262_hippo_automute,
11438 }, 11488 },
11439 [ALC262_FUJITSU] = { 11489 [ALC262_FUJITSU] = {
11440 .mixers = { alc262_fujitsu_mixer }, 11490 .mixers = { alc262_fujitsu_mixer },
@@ -11497,7 +11547,8 @@ static struct alc_config_preset alc262_presets[] = {
11497 .channel_mode = alc262_modes, 11547 .channel_mode = alc262_modes,
11498 .input_mux = &alc262_capture_source, 11548 .input_mux = &alc262_capture_source,
11499 .unsol_event = alc_automute_amp_unsol_event, 11549 .unsol_event = alc_automute_amp_unsol_event,
11500 .init_hook = alc262_hp_t5735_init_hook, 11550 .setup = alc262_hp_t5735_setup,
11551 .init_hook = alc_automute_amp,
11501 }, 11552 },
11502 [ALC262_HP_RP5700] = { 11553 [ALC262_HP_RP5700] = {
11503 .mixers = { alc262_hp_rp5700_mixer }, 11554 .mixers = { alc262_hp_rp5700_mixer },
@@ -11528,11 +11579,13 @@ static struct alc_config_preset alc262_presets[] = {
11528 .channel_mode = alc262_modes, 11579 .channel_mode = alc262_modes,
11529 .input_mux = &alc262_capture_source, 11580 .input_mux = &alc262_capture_source,
11530 .unsol_event = alc262_hippo_unsol_event, 11581 .unsol_event = alc262_hippo_unsol_event,
11531 .init_hook = alc262_hippo_init_hook, 11582 .setup = alc262_hippo_setup,
11583 .init_hook = alc262_hippo_automute,
11532 }, 11584 },
11533 [ALC262_BENQ_T31] = { 11585 [ALC262_BENQ_T31] = {
11534 .mixers = { alc262_benq_t31_mixer }, 11586 .mixers = { alc262_benq_t31_mixer },
11535 .init_verbs = { alc262_init_verbs, alc262_benq_t31_EAPD_verbs, alc262_hippo_unsol_verbs }, 11587 .init_verbs = { alc262_init_verbs, alc262_benq_t31_EAPD_verbs,
11588 alc_hp15_unsol_verbs },
11536 .num_dacs = ARRAY_SIZE(alc262_dac_nids), 11589 .num_dacs = ARRAY_SIZE(alc262_dac_nids),
11537 .dac_nids = alc262_dac_nids, 11590 .dac_nids = alc262_dac_nids,
11538 .hp_nid = 0x03, 11591 .hp_nid = 0x03,
@@ -11540,7 +11593,8 @@ static struct alc_config_preset alc262_presets[] = {
11540 .channel_mode = alc262_modes, 11593 .channel_mode = alc262_modes,
11541 .input_mux = &alc262_capture_source, 11594 .input_mux = &alc262_capture_source,
11542 .unsol_event = alc262_hippo_unsol_event, 11595 .unsol_event = alc262_hippo_unsol_event,
11543 .init_hook = alc262_hippo_init_hook, 11596 .setup = alc262_hippo_setup,
11597 .init_hook = alc262_hippo_automute,
11544 }, 11598 },
11545 [ALC262_ULTRA] = { 11599 [ALC262_ULTRA] = {
11546 .mixers = { alc262_ultra_mixer }, 11600 .mixers = { alc262_ultra_mixer },
@@ -11592,9 +11646,9 @@ static struct alc_config_preset alc262_presets[] = {
11592 .dig_out_nid = ALC262_DIGOUT_NID, 11646 .dig_out_nid = ALC262_DIGOUT_NID,
11593 .num_channel_mode = ARRAY_SIZE(alc262_modes), 11647 .num_channel_mode = ARRAY_SIZE(alc262_modes),
11594 .channel_mode = alc262_modes, 11648 .channel_mode = alc262_modes,
11595 .input_mux = &alc262_dmic_capture_source, 11649 .unsol_event = alc_sku_unsol_event,
11596 .unsol_event = alc262_toshiba_s06_unsol_event, 11650 .setup = alc262_toshiba_s06_setup,
11597 .init_hook = alc262_toshiba_s06_init_hook, 11651 .init_hook = alc_inithook,
11598 }, 11652 },
11599 [ALC262_TOSHIBA_RX1] = { 11653 [ALC262_TOSHIBA_RX1] = {
11600 .mixers = { alc262_toshiba_rx1_mixer }, 11654 .mixers = { alc262_toshiba_rx1_mixer },
@@ -11606,7 +11660,8 @@ static struct alc_config_preset alc262_presets[] = {
11606 .channel_mode = alc262_modes, 11660 .channel_mode = alc262_modes,
11607 .input_mux = &alc262_capture_source, 11661 .input_mux = &alc262_capture_source,
11608 .unsol_event = alc262_hippo_unsol_event, 11662 .unsol_event = alc262_hippo_unsol_event,
11609 .init_hook = alc262_hippo_init_hook, 11663 .setup = alc262_hippo_setup,
11664 .init_hook = alc262_hippo_automute,
11610 }, 11665 },
11611 [ALC262_TYAN] = { 11666 [ALC262_TYAN] = {
11612 .mixers = { alc262_tyan_mixer }, 11667 .mixers = { alc262_tyan_mixer },
@@ -11619,7 +11674,8 @@ static struct alc_config_preset alc262_presets[] = {
11619 .channel_mode = alc262_modes, 11674 .channel_mode = alc262_modes,
11620 .input_mux = &alc262_capture_source, 11675 .input_mux = &alc262_capture_source,
11621 .unsol_event = alc_automute_amp_unsol_event, 11676 .unsol_event = alc_automute_amp_unsol_event,
11622 .init_hook = alc262_tyan_init_hook, 11677 .setup = alc262_tyan_setup,
11678 .init_hook = alc_automute_amp,
11623 }, 11679 },
11624}; 11680};
11625 11681
@@ -11654,8 +11710,8 @@ static int patch_alc262(struct hda_codec *codec)
11654 alc262_cfg_tbl); 11710 alc262_cfg_tbl);
11655 11711
11656 if (board_config < 0) { 11712 if (board_config < 0) {
11657 printk(KERN_INFO "hda_codec: Unknown model for %s, " 11713 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
11658 "trying auto-probe from BIOS...\n", codec->chip_name); 11714 codec->chip_name);
11659 board_config = ALC262_AUTO; 11715 board_config = ALC262_AUTO;
11660 } 11716 }
11661 11717
@@ -11682,7 +11738,7 @@ static int patch_alc262(struct hda_codec *codec)
11682 } 11738 }
11683 11739
11684 if (board_config != ALC262_AUTO) 11740 if (board_config != ALC262_AUTO)
11685 setup_preset(spec, &alc262_presets[board_config]); 11741 setup_preset(codec, &alc262_presets[board_config]);
11686 11742
11687 spec->stream_analog_playback = &alc262_pcm_analog_playback; 11743 spec->stream_analog_playback = &alc262_pcm_analog_playback;
11688 spec->stream_analog_capture = &alc262_pcm_analog_capture; 11744 spec->stream_analog_capture = &alc262_pcm_analog_capture;
@@ -11708,7 +11764,7 @@ static int patch_alc262(struct hda_codec *codec)
11708 unsigned int wcap = get_wcaps(codec, 0x07); 11764 unsigned int wcap = get_wcaps(codec, 0x07);
11709 11765
11710 /* get type */ 11766 /* get type */
11711 wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 11767 wcap = get_wcaps_type(wcap);
11712 if (wcap != AC_WID_AUD_IN) { 11768 if (wcap != AC_WID_AUD_IN) {
11713 spec->adc_nids = alc262_adc_nids_alt; 11769 spec->adc_nids = alc262_adc_nids_alt;
11714 spec->num_adc_nids = 11770 spec->num_adc_nids =
@@ -11723,7 +11779,7 @@ static int patch_alc262(struct hda_codec *codec)
11723 } 11779 }
11724 } 11780 }
11725 if (!spec->cap_mixer && !spec->no_analog) 11781 if (!spec->cap_mixer && !spec->no_analog)
11726 set_capture_mixer(spec); 11782 set_capture_mixer(codec);
11727 if (!spec->no_analog) 11783 if (!spec->no_analog)
11728 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 11784 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
11729 11785
@@ -11815,14 +11871,6 @@ static struct hda_verb alc268_toshiba_verbs[] = {
11815 { } /* end */ 11871 { } /* end */
11816}; 11872};
11817 11873
11818static struct hda_input_mux alc268_acer_lc_capture_source = {
11819 .num_items = 2,
11820 .items = {
11821 { "i-Mic", 0x6 },
11822 { "E-Mic", 0x0 },
11823 },
11824};
11825
11826/* Acer specific */ 11874/* Acer specific */
11827/* bind volumes of both NID 0x02 and 0x03 */ 11875/* bind volumes of both NID 0x02 and 0x03 */
11828static struct hda_bind_ctls alc268_acer_bind_master_vol = { 11876static struct hda_bind_ctls alc268_acer_bind_master_vol = {
@@ -11941,7 +11989,8 @@ static struct hda_verb alc268_acer_verbs[] = {
11941 11989
11942/* unsolicited event for HP jack sensing */ 11990/* unsolicited event for HP jack sensing */
11943#define alc268_toshiba_unsol_event alc262_hippo_unsol_event 11991#define alc268_toshiba_unsol_event alc262_hippo_unsol_event
11944#define alc268_toshiba_init_hook alc262_hippo_init_hook 11992#define alc268_toshiba_setup alc262_hippo_setup
11993#define alc268_toshiba_automute alc262_hippo_automute
11945 11994
11946static void alc268_acer_unsol_event(struct hda_codec *codec, 11995static void alc268_acer_unsol_event(struct hda_codec *codec,
11947 unsigned int res) 11996 unsigned int res)
@@ -11971,30 +12020,33 @@ static void alc268_aspire_one_speaker_automute(struct hda_codec *codec)
11971 AMP_IN_MUTE(0), bits); 12020 AMP_IN_MUTE(0), bits);
11972} 12021}
11973 12022
11974
11975static void alc268_acer_mic_automute(struct hda_codec *codec)
11976{
11977 unsigned int present;
11978
11979 present = snd_hda_codec_read(codec, 0x18, 0,
11980 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
11981 snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_CONNECT_SEL,
11982 present ? 0x0 : 0x6);
11983}
11984
11985static void alc268_acer_lc_unsol_event(struct hda_codec *codec, 12023static void alc268_acer_lc_unsol_event(struct hda_codec *codec,
11986 unsigned int res) 12024 unsigned int res)
11987{ 12025{
11988 if ((res >> 26) == ALC880_HP_EVENT) 12026 switch (res >> 26) {
12027 case ALC880_HP_EVENT:
11989 alc268_aspire_one_speaker_automute(codec); 12028 alc268_aspire_one_speaker_automute(codec);
11990 if ((res >> 26) == ALC880_MIC_EVENT) 12029 break;
11991 alc268_acer_mic_automute(codec); 12030 case ALC880_MIC_EVENT:
12031 alc_mic_automute(codec);
12032 break;
12033 }
12034}
12035
12036static void alc268_acer_lc_setup(struct hda_codec *codec)
12037{
12038 struct alc_spec *spec = codec->spec;
12039 spec->ext_mic.pin = 0x18;
12040 spec->ext_mic.mux_idx = 0;
12041 spec->int_mic.pin = 0x12;
12042 spec->int_mic.mux_idx = 6;
12043 spec->auto_mic = 1;
11992} 12044}
11993 12045
11994static void alc268_acer_lc_init_hook(struct hda_codec *codec) 12046static void alc268_acer_lc_init_hook(struct hda_codec *codec)
11995{ 12047{
11996 alc268_aspire_one_speaker_automute(codec); 12048 alc268_aspire_one_speaker_automute(codec);
11997 alc268_acer_mic_automute(codec); 12049 alc_mic_automute(codec);
11998} 12050}
11999 12051
12000static struct snd_kcontrol_new alc268_dell_mixer[] = { 12052static struct snd_kcontrol_new alc268_dell_mixer[] = {
@@ -12012,17 +12064,22 @@ static struct hda_verb alc268_dell_verbs[] = {
12012 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 12064 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
12013 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP}, 12065 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
12014 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, 12066 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
12067 {0x18, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_MIC_EVENT | AC_USRSP_EN},
12015 { } 12068 { }
12016}; 12069};
12017 12070
12018/* mute/unmute internal speaker according to the hp jack and mute state */ 12071/* mute/unmute internal speaker according to the hp jack and mute state */
12019static void alc268_dell_init_hook(struct hda_codec *codec) 12072static void alc268_dell_setup(struct hda_codec *codec)
12020{ 12073{
12021 struct alc_spec *spec = codec->spec; 12074 struct alc_spec *spec = codec->spec;
12022 12075
12023 spec->autocfg.hp_pins[0] = 0x15; 12076 spec->autocfg.hp_pins[0] = 0x15;
12024 spec->autocfg.speaker_pins[0] = 0x14; 12077 spec->autocfg.speaker_pins[0] = 0x14;
12025 alc_automute_pin(codec); 12078 spec->ext_mic.pin = 0x18;
12079 spec->ext_mic.mux_idx = 0;
12080 spec->int_mic.pin = 0x19;
12081 spec->int_mic.mux_idx = 1;
12082 spec->auto_mic = 1;
12026} 12083}
12027 12084
12028static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = { 12085static struct snd_kcontrol_new alc267_quanta_il1_mixer[] = {
@@ -12043,38 +12100,16 @@ static struct hda_verb alc267_quanta_il1_verbs[] = {
12043 { } 12100 { }
12044}; 12101};
12045 12102
12046static void alc267_quanta_il1_mic_automute(struct hda_codec *codec) 12103static void alc267_quanta_il1_setup(struct hda_codec *codec)
12047{
12048 unsigned int present;
12049
12050 present = snd_hda_codec_read(codec, 0x18, 0,
12051 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
12052 snd_hda_codec_write(codec, 0x23, 0,
12053 AC_VERB_SET_CONNECT_SEL,
12054 present ? 0x00 : 0x01);
12055}
12056
12057static void alc267_quanta_il1_init_hook(struct hda_codec *codec)
12058{ 12104{
12059 struct alc_spec *spec = codec->spec; 12105 struct alc_spec *spec = codec->spec;
12060
12061 spec->autocfg.hp_pins[0] = 0x15; 12106 spec->autocfg.hp_pins[0] = 0x15;
12062 spec->autocfg.speaker_pins[0] = 0x14; 12107 spec->autocfg.speaker_pins[0] = 0x14;
12063 alc_automute_pin(codec); 12108 spec->ext_mic.pin = 0x18;
12064 alc267_quanta_il1_mic_automute(codec); 12109 spec->ext_mic.mux_idx = 0;
12065} 12110 spec->int_mic.pin = 0x19;
12066 12111 spec->int_mic.mux_idx = 1;
12067static void alc267_quanta_il1_unsol_event(struct hda_codec *codec, 12112 spec->auto_mic = 1;
12068 unsigned int res)
12069{
12070 switch (res >> 26) {
12071 case ALC880_MIC_EVENT:
12072 alc267_quanta_il1_mic_automute(codec);
12073 break;
12074 default:
12075 alc_sku_unsol_event(codec, res);
12076 break;
12077 }
12078} 12113}
12079 12114
12080/* 12115/*
@@ -12154,21 +12189,16 @@ static struct hda_verb alc268_volume_init_verbs[] = {
12154 { } 12189 { }
12155}; 12190};
12156 12191
12192static struct snd_kcontrol_new alc268_capture_nosrc_mixer[] = {
12193 HDA_CODEC_VOLUME("Capture Volume", 0x23, 0x0, HDA_OUTPUT),
12194 HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
12195 { } /* end */
12196};
12197
12157static struct snd_kcontrol_new alc268_capture_alt_mixer[] = { 12198static struct snd_kcontrol_new alc268_capture_alt_mixer[] = {
12158 HDA_CODEC_VOLUME("Capture Volume", 0x23, 0x0, HDA_OUTPUT), 12199 HDA_CODEC_VOLUME("Capture Volume", 0x23, 0x0, HDA_OUTPUT),
12159 HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT), 12200 HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
12160 { 12201 _DEFINE_CAPSRC(1),
12161 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
12162 /* The multiple "Capture Source" controls confuse alsamixer
12163 * So call somewhat different..
12164 */
12165 /* .name = "Capture Source", */
12166 .name = "Input Source",
12167 .count = 1,
12168 .info = alc_mux_enum_info,
12169 .get = alc_mux_enum_get,
12170 .put = alc_mux_enum_put,
12171 },
12172 { } /* end */ 12202 { } /* end */
12173}; 12203};
12174 12204
@@ -12177,18 +12207,7 @@ static struct snd_kcontrol_new alc268_capture_mixer[] = {
12177 HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT), 12207 HDA_CODEC_MUTE("Capture Switch", 0x23, 0x0, HDA_OUTPUT),
12178 HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x24, 0x0, HDA_OUTPUT), 12208 HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x24, 0x0, HDA_OUTPUT),
12179 HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x24, 0x0, HDA_OUTPUT), 12209 HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x24, 0x0, HDA_OUTPUT),
12180 { 12210 _DEFINE_CAPSRC(2),
12181 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
12182 /* The multiple "Capture Source" controls confuse alsamixer
12183 * So call somewhat different..
12184 */
12185 /* .name = "Capture Source", */
12186 .name = "Input Source",
12187 .count = 2,
12188 .info = alc_mux_enum_info,
12189 .get = alc_mux_enum_get,
12190 .put = alc_mux_enum_put,
12191 },
12192 { } /* end */ 12211 { } /* end */
12193}; 12212};
12194 12213
@@ -12275,26 +12294,38 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
12275 const char *ctlname, int idx) 12294 const char *ctlname, int idx)
12276{ 12295{
12277 char name[32]; 12296 char name[32];
12297 hda_nid_t dac;
12278 int err; 12298 int err;
12279 12299
12280 sprintf(name, "%s Playback Volume", ctlname); 12300 sprintf(name, "%s Playback Volume", ctlname);
12281 if (nid == 0x14) { 12301 switch (nid) {
12282 err = add_control(spec, ALC_CTL_WIDGET_VOL, name, 12302 case 0x14:
12283 HDA_COMPOSE_AMP_VAL(0x02, 3, idx, 12303 case 0x16:
12284 HDA_OUTPUT)); 12304 dac = 0x02;
12285 if (err < 0) 12305 break;
12286 return err; 12306 case 0x15:
12287 } else if (nid == 0x15) { 12307 dac = 0x03;
12308 break;
12309 default:
12310 return 0;
12311 }
12312 if (spec->multiout.dac_nids[0] != dac &&
12313 spec->multiout.dac_nids[1] != dac) {
12288 err = add_control(spec, ALC_CTL_WIDGET_VOL, name, 12314 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
12289 HDA_COMPOSE_AMP_VAL(0x03, 3, idx, 12315 HDA_COMPOSE_AMP_VAL(dac, 3, idx,
12290 HDA_OUTPUT)); 12316 HDA_OUTPUT));
12291 if (err < 0) 12317 if (err < 0)
12292 return err; 12318 return err;
12293 } else 12319 spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
12294 return -1; 12320 }
12321
12295 sprintf(name, "%s Playback Switch", ctlname); 12322 sprintf(name, "%s Playback Switch", ctlname);
12296 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name, 12323 if (nid != 0x16)
12324 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
12297 HDA_COMPOSE_AMP_VAL(nid, 3, idx, HDA_OUTPUT)); 12325 HDA_COMPOSE_AMP_VAL(nid, 3, idx, HDA_OUTPUT));
12326 else /* mono */
12327 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
12328 HDA_COMPOSE_AMP_VAL(nid, 2, idx, HDA_OUTPUT));
12298 if (err < 0) 12329 if (err < 0)
12299 return err; 12330 return err;
12300 return 0; 12331 return 0;
@@ -12307,14 +12338,19 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
12307 hda_nid_t nid; 12338 hda_nid_t nid;
12308 int err; 12339 int err;
12309 12340
12310 spec->multiout.num_dacs = 2; /* only use one dac */
12311 spec->multiout.dac_nids = spec->private_dac_nids; 12341 spec->multiout.dac_nids = spec->private_dac_nids;
12312 spec->multiout.dac_nids[0] = 2;
12313 spec->multiout.dac_nids[1] = 3;
12314 12342
12315 nid = cfg->line_out_pins[0]; 12343 nid = cfg->line_out_pins[0];
12316 if (nid) 12344 if (nid) {
12317 alc268_new_analog_output(spec, nid, "Front", 0); 12345 const char *name;
12346 if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
12347 name = "Speaker";
12348 else
12349 name = "Front";
12350 err = alc268_new_analog_output(spec, nid, name, 0);
12351 if (err < 0)
12352 return err;
12353 }
12318 12354
12319 nid = cfg->speaker_pins[0]; 12355 nid = cfg->speaker_pins[0];
12320 if (nid == 0x1d) { 12356 if (nid == 0x1d) {
@@ -12323,16 +12359,23 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
12323 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT)); 12359 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
12324 if (err < 0) 12360 if (err < 0)
12325 return err; 12361 return err;
12362 } else {
12363 err = alc268_new_analog_output(spec, nid, "Speaker", 0);
12364 if (err < 0)
12365 return err;
12326 } 12366 }
12327 nid = cfg->hp_pins[0]; 12367 nid = cfg->hp_pins[0];
12328 if (nid) 12368 if (nid) {
12329 alc268_new_analog_output(spec, nid, "Headphone", 0); 12369 err = alc268_new_analog_output(spec, nid, "Headphone", 0);
12370 if (err < 0)
12371 return err;
12372 }
12330 12373
12331 nid = cfg->line_out_pins[1] | cfg->line_out_pins[2]; 12374 nid = cfg->line_out_pins[1] | cfg->line_out_pins[2];
12332 if (nid == 0x16) { 12375 if (nid == 0x16) {
12333 err = add_control(spec, ALC_CTL_WIDGET_MUTE, 12376 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
12334 "Mono Playback Switch", 12377 "Mono Playback Switch",
12335 HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_INPUT)); 12378 HDA_COMPOSE_AMP_VAL(nid, 2, 0, HDA_OUTPUT));
12336 if (err < 0) 12379 if (err < 0)
12337 return err; 12380 return err;
12338 } 12381 }
@@ -12340,38 +12383,46 @@ static int alc268_auto_create_multi_out_ctls(struct alc_spec *spec,
12340} 12383}
12341 12384
12342/* create playback/capture controls for input pins */ 12385/* create playback/capture controls for input pins */
12343static int alc268_auto_create_analog_input_ctls(struct alc_spec *spec, 12386static int alc268_auto_create_input_ctls(struct hda_codec *codec,
12344 const struct auto_pin_cfg *cfg) 12387 const struct auto_pin_cfg *cfg)
12345{ 12388{
12346 struct hda_input_mux *imux = &spec->private_imux[0]; 12389 return alc_auto_create_input_ctls(codec, cfg, 0, 0x23, 0x24);
12347 int i, idx1; 12390}
12348 12391
12349 for (i = 0; i < AUTO_PIN_LAST; i++) { 12392static void alc268_auto_set_output_and_unmute(struct hda_codec *codec,
12350 switch(cfg->input_pins[i]) { 12393 hda_nid_t nid, int pin_type)
12351 case 0x18: 12394{
12352 idx1 = 0; /* Mic 1 */ 12395 int idx;
12353 break; 12396
12354 case 0x19: 12397 alc_set_pin_output(codec, nid, pin_type);
12355 idx1 = 1; /* Mic 2 */ 12398 if (nid == 0x14 || nid == 0x16)
12356 break; 12399 idx = 0;
12357 case 0x1a: 12400 else
12358 idx1 = 2; /* Line In */ 12401 idx = 1;
12359 break; 12402 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
12360 case 0x1c: 12403}
12361 idx1 = 3; /* CD */ 12404
12362 break; 12405static void alc268_auto_init_multi_out(struct hda_codec *codec)
12363 case 0x12: 12406{
12364 case 0x13: 12407 struct alc_spec *spec = codec->spec;
12365 idx1 = 6; /* digital mics */ 12408 hda_nid_t nid = spec->autocfg.line_out_pins[0];
12366 break; 12409 if (nid) {
12367 default: 12410 int pin_type = get_pin_type(spec->autocfg.line_out_type);
12368 continue; 12411 alc268_auto_set_output_and_unmute(codec, nid, pin_type);
12369 }
12370 imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
12371 imux->items[imux->num_items].index = idx1;
12372 imux->num_items++;
12373 } 12412 }
12374 return 0; 12413}
12414
12415static void alc268_auto_init_hp_out(struct hda_codec *codec)
12416{
12417 struct alc_spec *spec = codec->spec;
12418 hda_nid_t pin;
12419
12420 pin = spec->autocfg.hp_pins[0];
12421 if (pin)
12422 alc268_auto_set_output_and_unmute(codec, pin, PIN_HP);
12423 pin = spec->autocfg.speaker_pins[0];
12424 if (pin)
12425 alc268_auto_set_output_and_unmute(codec, pin, PIN_OUT);
12375} 12426}
12376 12427
12377static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec) 12428static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
@@ -12382,9 +12433,10 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
12382 hda_nid_t line_nid = spec->autocfg.line_out_pins[0]; 12433 hda_nid_t line_nid = spec->autocfg.line_out_pins[0];
12383 unsigned int dac_vol1, dac_vol2; 12434 unsigned int dac_vol1, dac_vol2;
12384 12435
12385 if (speaker_nid) { 12436 if (line_nid == 0x1d || speaker_nid == 0x1d) {
12386 snd_hda_codec_write(codec, speaker_nid, 0, 12437 snd_hda_codec_write(codec, speaker_nid, 0,
12387 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); 12438 AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
12439 /* mute mixer inputs from 0x1d */
12388 snd_hda_codec_write(codec, 0x0f, 0, 12440 snd_hda_codec_write(codec, 0x0f, 0,
12389 AC_VERB_SET_AMP_GAIN_MUTE, 12441 AC_VERB_SET_AMP_GAIN_MUTE,
12390 AMP_IN_UNMUTE(1)); 12442 AMP_IN_UNMUTE(1));
@@ -12392,6 +12444,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
12392 AC_VERB_SET_AMP_GAIN_MUTE, 12444 AC_VERB_SET_AMP_GAIN_MUTE,
12393 AMP_IN_UNMUTE(1)); 12445 AMP_IN_UNMUTE(1));
12394 } else { 12446 } else {
12447 /* unmute mixer inputs from 0x1d */
12395 snd_hda_codec_write(codec, 0x0f, 0, 12448 snd_hda_codec_write(codec, 0x0f, 0,
12396 AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)); 12449 AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1));
12397 snd_hda_codec_write(codec, 0x10, 0, 12450 snd_hda_codec_write(codec, 0x10, 0,
@@ -12448,7 +12501,7 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
12448 err = alc268_auto_create_multi_out_ctls(spec, &spec->autocfg); 12501 err = alc268_auto_create_multi_out_ctls(spec, &spec->autocfg);
12449 if (err < 0) 12502 if (err < 0)
12450 return err; 12503 return err;
12451 err = alc268_auto_create_analog_input_ctls(spec, &spec->autocfg); 12504 err = alc268_auto_create_input_ctls(codec, &spec->autocfg);
12452 if (err < 0) 12505 if (err < 0)
12453 return err; 12506 return err;
12454 12507
@@ -12467,7 +12520,7 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
12467 add_mixer(spec, alc268_beep_mixer); 12520 add_mixer(spec, alc268_beep_mixer);
12468 12521
12469 add_verb(spec, alc268_volume_init_verbs); 12522 add_verb(spec, alc268_volume_init_verbs);
12470 spec->num_mux_defs = 1; 12523 spec->num_mux_defs = 2;
12471 spec->input_mux = &spec->private_imux[0]; 12524 spec->input_mux = &spec->private_imux[0];
12472 12525
12473 err = alc_auto_add_mic_boost(codec); 12526 err = alc_auto_add_mic_boost(codec);
@@ -12479,8 +12532,6 @@ static int alc268_parse_auto_config(struct hda_codec *codec)
12479 return 1; 12532 return 1;
12480} 12533}
12481 12534
12482#define alc268_auto_init_multi_out alc882_auto_init_multi_out
12483#define alc268_auto_init_hp_out alc882_auto_init_hp_out
12484#define alc268_auto_init_analog_input alc882_auto_init_analog_input 12535#define alc268_auto_init_analog_input alc882_auto_init_analog_input
12485 12536
12486/* init callback for auto-configuration model -- overriding the default init */ 12537/* init callback for auto-configuration model -- overriding the default init */
@@ -12523,8 +12574,11 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = {
12523 ALC268_ACER_ASPIRE_ONE), 12574 ALC268_ACER_ASPIRE_ONE),
12524 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL), 12575 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
12525 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL), 12576 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron Mini9", ALC268_DELL),
12577 /* almost compatible with toshiba but with optional digital outs;
12578 * auto-probing seems working fine
12579 */
12526 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP TX25xx series", 12580 SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3000, "HP TX25xx series",
12527 ALC268_TOSHIBA), 12581 ALC268_AUTO),
12528 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST), 12582 SND_PCI_QUIRK(0x1043, 0x1205, "ASUS W7J", ALC268_3ST),
12529 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO), 12583 SND_PCI_QUIRK(0x1170, 0x0040, "ZEPTO", ALC268_ZEPTO),
12530 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), 12584 SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA),
@@ -12545,7 +12599,8 @@ static struct snd_pci_quirk alc268_ssid_cfg_tbl[] = {
12545 12599
12546static struct alc_config_preset alc268_presets[] = { 12600static struct alc_config_preset alc268_presets[] = {
12547 [ALC267_QUANTA_IL1] = { 12601 [ALC267_QUANTA_IL1] = {
12548 .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer }, 12602 .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer,
12603 alc268_capture_nosrc_mixer },
12549 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12604 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12550 alc267_quanta_il1_verbs }, 12605 alc267_quanta_il1_verbs },
12551 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 12606 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -12555,9 +12610,9 @@ static struct alc_config_preset alc268_presets[] = {
12555 .hp_nid = 0x03, 12610 .hp_nid = 0x03,
12556 .num_channel_mode = ARRAY_SIZE(alc268_modes), 12611 .num_channel_mode = ARRAY_SIZE(alc268_modes),
12557 .channel_mode = alc268_modes, 12612 .channel_mode = alc268_modes,
12558 .input_mux = &alc268_capture_source, 12613 .unsol_event = alc_sku_unsol_event,
12559 .unsol_event = alc267_quanta_il1_unsol_event, 12614 .setup = alc267_quanta_il1_setup,
12560 .init_hook = alc267_quanta_il1_init_hook, 12615 .init_hook = alc_inithook,
12561 }, 12616 },
12562 [ALC268_3ST] = { 12617 [ALC268_3ST] = {
12563 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 12618 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer,
@@ -12589,10 +12644,11 @@ static struct alc_config_preset alc268_presets[] = {
12589 .channel_mode = alc268_modes, 12644 .channel_mode = alc268_modes,
12590 .input_mux = &alc268_capture_source, 12645 .input_mux = &alc268_capture_source,
12591 .unsol_event = alc268_toshiba_unsol_event, 12646 .unsol_event = alc268_toshiba_unsol_event,
12592 .init_hook = alc268_toshiba_init_hook, 12647 .setup = alc268_toshiba_setup,
12648 .init_hook = alc268_toshiba_automute,
12593 }, 12649 },
12594 [ALC268_ACER] = { 12650 [ALC268_ACER] = {
12595 .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer, 12651 .mixers = { alc268_acer_mixer, alc268_capture_nosrc_mixer,
12596 alc268_beep_mixer }, 12652 alc268_beep_mixer },
12597 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12653 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12598 alc268_acer_verbs }, 12654 alc268_acer_verbs },
@@ -12628,7 +12684,7 @@ static struct alc_config_preset alc268_presets[] = {
12628 [ALC268_ACER_ASPIRE_ONE] = { 12684 [ALC268_ACER_ASPIRE_ONE] = {
12629 .mixers = { alc268_acer_aspire_one_mixer, 12685 .mixers = { alc268_acer_aspire_one_mixer,
12630 alc268_beep_mixer, 12686 alc268_beep_mixer,
12631 alc268_capture_alt_mixer }, 12687 alc268_capture_nosrc_mixer },
12632 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12688 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12633 alc268_acer_aspire_one_verbs }, 12689 alc268_acer_aspire_one_verbs },
12634 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 12690 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
@@ -12639,22 +12695,26 @@ static struct alc_config_preset alc268_presets[] = {
12639 .hp_nid = 0x03, 12695 .hp_nid = 0x03,
12640 .num_channel_mode = ARRAY_SIZE(alc268_modes), 12696 .num_channel_mode = ARRAY_SIZE(alc268_modes),
12641 .channel_mode = alc268_modes, 12697 .channel_mode = alc268_modes,
12642 .input_mux = &alc268_acer_lc_capture_source,
12643 .unsol_event = alc268_acer_lc_unsol_event, 12698 .unsol_event = alc268_acer_lc_unsol_event,
12699 .setup = alc268_acer_lc_setup,
12644 .init_hook = alc268_acer_lc_init_hook, 12700 .init_hook = alc268_acer_lc_init_hook,
12645 }, 12701 },
12646 [ALC268_DELL] = { 12702 [ALC268_DELL] = {
12647 .mixers = { alc268_dell_mixer, alc268_beep_mixer }, 12703 .mixers = { alc268_dell_mixer, alc268_beep_mixer,
12704 alc268_capture_nosrc_mixer },
12648 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, 12705 .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs,
12649 alc268_dell_verbs }, 12706 alc268_dell_verbs },
12650 .num_dacs = ARRAY_SIZE(alc268_dac_nids), 12707 .num_dacs = ARRAY_SIZE(alc268_dac_nids),
12651 .dac_nids = alc268_dac_nids, 12708 .dac_nids = alc268_dac_nids,
12709 .num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt),
12710 .adc_nids = alc268_adc_nids_alt,
12711 .capsrc_nids = alc268_capsrc_nids,
12652 .hp_nid = 0x02, 12712 .hp_nid = 0x02,
12653 .num_channel_mode = ARRAY_SIZE(alc268_modes), 12713 .num_channel_mode = ARRAY_SIZE(alc268_modes),
12654 .channel_mode = alc268_modes, 12714 .channel_mode = alc268_modes,
12655 .unsol_event = alc_sku_unsol_event, 12715 .unsol_event = alc_sku_unsol_event,
12656 .init_hook = alc268_dell_init_hook, 12716 .setup = alc268_dell_setup,
12657 .input_mux = &alc268_capture_source, 12717 .init_hook = alc_inithook,
12658 }, 12718 },
12659 [ALC268_ZEPTO] = { 12719 [ALC268_ZEPTO] = {
12660 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, 12720 .mixers = { alc268_base_mixer, alc268_capture_alt_mixer,
@@ -12671,8 +12731,8 @@ static struct alc_config_preset alc268_presets[] = {
12671 .num_channel_mode = ARRAY_SIZE(alc268_modes), 12731 .num_channel_mode = ARRAY_SIZE(alc268_modes),
12672 .channel_mode = alc268_modes, 12732 .channel_mode = alc268_modes,
12673 .input_mux = &alc268_capture_source, 12733 .input_mux = &alc268_capture_source,
12674 .unsol_event = alc268_toshiba_unsol_event, 12734 .setup = alc268_toshiba_setup,
12675 .init_hook = alc268_toshiba_init_hook 12735 .init_hook = alc268_toshiba_automute,
12676 }, 12736 },
12677#ifdef CONFIG_SND_DEBUG 12737#ifdef CONFIG_SND_DEBUG
12678 [ALC268_TEST] = { 12738 [ALC268_TEST] = {
@@ -12714,8 +12774,8 @@ static int patch_alc268(struct hda_codec *codec)
12714 ALC882_MODEL_LAST, alc268_models, alc268_ssid_cfg_tbl); 12774 ALC882_MODEL_LAST, alc268_models, alc268_ssid_cfg_tbl);
12715 12775
12716 if (board_config < 0 || board_config >= ALC268_MODEL_LAST) { 12776 if (board_config < 0 || board_config >= ALC268_MODEL_LAST) {
12717 printk(KERN_INFO "hda_codec: Unknown model for %s, " 12777 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
12718 "trying auto-probe from BIOS...\n", codec->chip_name); 12778 codec->chip_name);
12719 board_config = ALC268_AUTO; 12779 board_config = ALC268_AUTO;
12720 } 12780 }
12721 12781
@@ -12734,7 +12794,7 @@ static int patch_alc268(struct hda_codec *codec)
12734 } 12794 }
12735 12795
12736 if (board_config != ALC268_AUTO) 12796 if (board_config != ALC268_AUTO)
12737 setup_preset(spec, &alc268_presets[board_config]); 12797 setup_preset(codec, &alc268_presets[board_config]);
12738 12798
12739 spec->stream_analog_playback = &alc268_pcm_analog_playback; 12799 spec->stream_analog_playback = &alc268_pcm_analog_playback;
12740 spec->stream_analog_capture = &alc268_pcm_analog_capture; 12800 spec->stream_analog_capture = &alc268_pcm_analog_capture;
@@ -12771,11 +12831,15 @@ static int patch_alc268(struct hda_codec *codec)
12771 int i; 12831 int i;
12772 12832
12773 /* get type */ 12833 /* get type */
12774 wcap = (wcap & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 12834 wcap = get_wcaps_type(wcap);
12775 if (wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) { 12835 if (spec->auto_mic ||
12836 wcap != AC_WID_AUD_IN || spec->input_mux->num_items == 1) {
12776 spec->adc_nids = alc268_adc_nids_alt; 12837 spec->adc_nids = alc268_adc_nids_alt;
12777 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt); 12838 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids_alt);
12778 add_mixer(spec, alc268_capture_alt_mixer); 12839 if (spec->auto_mic || spec->input_mux->num_items == 1)
12840 add_mixer(spec, alc268_capture_nosrc_mixer);
12841 else
12842 add_mixer(spec, alc268_capture_alt_mixer);
12779 } else { 12843 } else {
12780 spec->adc_nids = alc268_adc_nids; 12844 spec->adc_nids = alc268_adc_nids;
12781 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids); 12845 spec->num_adc_nids = ARRAY_SIZE(alc268_adc_nids);
@@ -12786,6 +12850,8 @@ static int patch_alc268(struct hda_codec *codec)
12786 for (i = 0; i < spec->num_adc_nids; i++) 12850 for (i = 0; i < spec->num_adc_nids; i++)
12787 snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i], 12851 snd_hda_codec_write_cache(codec, alc268_capsrc_nids[i],
12788 0, AC_VERB_SET_CONNECT_SEL, 12852 0, AC_VERB_SET_CONNECT_SEL,
12853 i < spec->num_mux_defs ?
12854 spec->input_mux[i].items[0].index :
12789 spec->input_mux->items[0].index); 12855 spec->input_mux->items[0].index);
12790 } 12856 }
12791 12857
@@ -12820,22 +12886,6 @@ static hda_nid_t alc269_capsrc_nids[1] = {
12820 * not a mux! 12886 * not a mux!
12821 */ 12887 */
12822 12888
12823static struct hda_input_mux alc269_eeepc_dmic_capture_source = {
12824 .num_items = 2,
12825 .items = {
12826 { "i-Mic", 0x5 },
12827 { "e-Mic", 0x0 },
12828 },
12829};
12830
12831static struct hda_input_mux alc269_eeepc_amic_capture_source = {
12832 .num_items = 2,
12833 .items = {
12834 { "i-Mic", 0x1 },
12835 { "e-Mic", 0x0 },
12836 },
12837};
12838
12839#define alc269_modes alc260_modes 12889#define alc269_modes alc260_modes
12840#define alc269_capture_source alc880_lg_lw_capture_source 12890#define alc269_capture_source alc880_lg_lw_capture_source
12841 12891
@@ -12997,16 +13047,6 @@ static void alc269_lifebook_speaker_automute(struct hda_codec *codec)
12997 AC_VERB_SET_PROC_COEF, 0x480); 13047 AC_VERB_SET_PROC_COEF, 0x480);
12998} 13048}
12999 13049
13000static void alc269_quanta_fl1_mic_automute(struct hda_codec *codec)
13001{
13002 unsigned int present;
13003
13004 present = snd_hda_codec_read(codec, 0x18, 0,
13005 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
13006 snd_hda_codec_write(codec, 0x23, 0,
13007 AC_VERB_SET_CONNECT_SEL, present ? 0x0 : 0x1);
13008}
13009
13010static void alc269_lifebook_mic_autoswitch(struct hda_codec *codec) 13050static void alc269_lifebook_mic_autoswitch(struct hda_codec *codec)
13011{ 13051{
13012 unsigned int present_laptop; 13052 unsigned int present_laptop;
@@ -13033,10 +13073,14 @@ static void alc269_lifebook_mic_autoswitch(struct hda_codec *codec)
13033static void alc269_quanta_fl1_unsol_event(struct hda_codec *codec, 13073static void alc269_quanta_fl1_unsol_event(struct hda_codec *codec,
13034 unsigned int res) 13074 unsigned int res)
13035{ 13075{
13036 if ((res >> 26) == ALC880_HP_EVENT) 13076 switch (res >> 26) {
13077 case ALC880_HP_EVENT:
13037 alc269_quanta_fl1_speaker_automute(codec); 13078 alc269_quanta_fl1_speaker_automute(codec);
13038 if ((res >> 26) == ALC880_MIC_EVENT) 13079 break;
13039 alc269_quanta_fl1_mic_automute(codec); 13080 case ALC880_MIC_EVENT:
13081 alc_mic_automute(codec);
13082 break;
13083 }
13040} 13084}
13041 13085
13042static void alc269_lifebook_unsol_event(struct hda_codec *codec, 13086static void alc269_lifebook_unsol_event(struct hda_codec *codec,
@@ -13048,10 +13092,20 @@ static void alc269_lifebook_unsol_event(struct hda_codec *codec,
13048 alc269_lifebook_mic_autoswitch(codec); 13092 alc269_lifebook_mic_autoswitch(codec);
13049} 13093}
13050 13094
13095static void alc269_quanta_fl1_setup(struct hda_codec *codec)
13096{
13097 struct alc_spec *spec = codec->spec;
13098 spec->ext_mic.pin = 0x18;
13099 spec->ext_mic.mux_idx = 0;
13100 spec->int_mic.pin = 0x19;
13101 spec->int_mic.mux_idx = 1;
13102 spec->auto_mic = 1;
13103}
13104
13051static void alc269_quanta_fl1_init_hook(struct hda_codec *codec) 13105static void alc269_quanta_fl1_init_hook(struct hda_codec *codec)
13052{ 13106{
13053 alc269_quanta_fl1_speaker_automute(codec); 13107 alc269_quanta_fl1_speaker_automute(codec);
13054 alc269_quanta_fl1_mic_automute(codec); 13108 alc_mic_automute(codec);
13055} 13109}
13056 13110
13057static void alc269_lifebook_init_hook(struct hda_codec *codec) 13111static void alc269_lifebook_init_hook(struct hda_codec *codec)
@@ -13096,60 +13150,44 @@ static void alc269_speaker_automute(struct hda_codec *codec)
13096 AMP_IN_MUTE(0), bits); 13150 AMP_IN_MUTE(0), bits);
13097} 13151}
13098 13152
13099static void alc269_eeepc_dmic_automute(struct hda_codec *codec)
13100{
13101 unsigned int present;
13102
13103 present = snd_hda_codec_read(codec, 0x18, 0,
13104 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
13105 snd_hda_codec_write(codec, 0x23, 0,
13106 AC_VERB_SET_CONNECT_SEL, (present ? 0 : 5));
13107}
13108
13109static void alc269_eeepc_amic_automute(struct hda_codec *codec)
13110{
13111 unsigned int present;
13112
13113 present = snd_hda_codec_read(codec, 0x18, 0,
13114 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
13115 snd_hda_codec_write(codec, 0x24, 0, AC_VERB_SET_AMP_GAIN_MUTE,
13116 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
13117 snd_hda_codec_write(codec, 0x24, 0, AC_VERB_SET_AMP_GAIN_MUTE,
13118 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
13119}
13120
13121/* unsolicited event for HP jack sensing */ 13153/* unsolicited event for HP jack sensing */
13122static void alc269_eeepc_dmic_unsol_event(struct hda_codec *codec, 13154static void alc269_eeepc_unsol_event(struct hda_codec *codec,
13123 unsigned int res) 13155 unsigned int res)
13124{ 13156{
13125 if ((res >> 26) == ALC880_HP_EVENT) 13157 switch (res >> 26) {
13158 case ALC880_HP_EVENT:
13126 alc269_speaker_automute(codec); 13159 alc269_speaker_automute(codec);
13127 13160 break;
13128 if ((res >> 26) == ALC880_MIC_EVENT) 13161 case ALC880_MIC_EVENT:
13129 alc269_eeepc_dmic_automute(codec); 13162 alc_mic_automute(codec);
13163 break;
13164 }
13130} 13165}
13131 13166
13132static void alc269_eeepc_dmic_inithook(struct hda_codec *codec) 13167static void alc269_eeepc_dmic_setup(struct hda_codec *codec)
13133{ 13168{
13134 alc269_speaker_automute(codec); 13169 struct alc_spec *spec = codec->spec;
13135 alc269_eeepc_dmic_automute(codec); 13170 spec->ext_mic.pin = 0x18;
13171 spec->ext_mic.mux_idx = 0;
13172 spec->int_mic.pin = 0x12;
13173 spec->int_mic.mux_idx = 5;
13174 spec->auto_mic = 1;
13136} 13175}
13137 13176
13138/* unsolicited event for HP jack sensing */ 13177static void alc269_eeepc_amic_setup(struct hda_codec *codec)
13139static void alc269_eeepc_amic_unsol_event(struct hda_codec *codec,
13140 unsigned int res)
13141{ 13178{
13142 if ((res >> 26) == ALC880_HP_EVENT) 13179 struct alc_spec *spec = codec->spec;
13143 alc269_speaker_automute(codec); 13180 spec->ext_mic.pin = 0x18;
13144 13181 spec->ext_mic.mux_idx = 0;
13145 if ((res >> 26) == ALC880_MIC_EVENT) 13182 spec->int_mic.pin = 0x19;
13146 alc269_eeepc_amic_automute(codec); 13183 spec->int_mic.mux_idx = 1;
13184 spec->auto_mic = 1;
13147} 13185}
13148 13186
13149static void alc269_eeepc_amic_inithook(struct hda_codec *codec) 13187static void alc269_eeepc_inithook(struct hda_codec *codec)
13150{ 13188{
13151 alc269_speaker_automute(codec); 13189 alc269_speaker_automute(codec);
13152 alc269_eeepc_amic_automute(codec); 13190 alc_mic_automute(codec);
13153} 13191}
13154 13192
13155/* 13193/*
@@ -13222,89 +13260,10 @@ static struct hda_verb alc269_init_verbs[] = {
13222 { } 13260 { }
13223}; 13261};
13224 13262
13225/* add playback controls from the parsed DAC table */ 13263#define alc269_auto_create_multi_out_ctls \
13226static int alc269_auto_create_multi_out_ctls(struct alc_spec *spec, 13264 alc268_auto_create_multi_out_ctls
13227 const struct auto_pin_cfg *cfg) 13265#define alc269_auto_create_input_ctls \
13228{ 13266 alc268_auto_create_input_ctls
13229 hda_nid_t nid;
13230 int err;
13231
13232 spec->multiout.num_dacs = 1; /* only use one dac */
13233 spec->multiout.dac_nids = spec->private_dac_nids;
13234 spec->multiout.dac_nids[0] = 2;
13235
13236 nid = cfg->line_out_pins[0];
13237 if (nid) {
13238 err = add_control(spec, ALC_CTL_WIDGET_VOL,
13239 "Front Playback Volume",
13240 HDA_COMPOSE_AMP_VAL(0x02, 3, 0, HDA_OUTPUT));
13241 if (err < 0)
13242 return err;
13243 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
13244 "Front Playback Switch",
13245 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT));
13246 if (err < 0)
13247 return err;
13248 }
13249
13250 nid = cfg->speaker_pins[0];
13251 if (nid) {
13252 if (!cfg->line_out_pins[0]) {
13253 err = add_control(spec, ALC_CTL_WIDGET_VOL,
13254 "Speaker Playback Volume",
13255 HDA_COMPOSE_AMP_VAL(0x02, 3, 0,
13256 HDA_OUTPUT));
13257 if (err < 0)
13258 return err;
13259 }
13260 if (nid == 0x16) {
13261 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
13262 "Speaker Playback Switch",
13263 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
13264 HDA_OUTPUT));
13265 if (err < 0)
13266 return err;
13267 } else {
13268 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
13269 "Speaker Playback Switch",
13270 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
13271 HDA_OUTPUT));
13272 if (err < 0)
13273 return err;
13274 }
13275 }
13276 nid = cfg->hp_pins[0];
13277 if (nid) {
13278 /* spec->multiout.hp_nid = 2; */
13279 if (!cfg->line_out_pins[0] && !cfg->speaker_pins[0]) {
13280 err = add_control(spec, ALC_CTL_WIDGET_VOL,
13281 "Headphone Playback Volume",
13282 HDA_COMPOSE_AMP_VAL(0x02, 3, 0,
13283 HDA_OUTPUT));
13284 if (err < 0)
13285 return err;
13286 }
13287 if (nid == 0x16) {
13288 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
13289 "Headphone Playback Switch",
13290 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
13291 HDA_OUTPUT));
13292 if (err < 0)
13293 return err;
13294 } else {
13295 err = add_control(spec, ALC_CTL_WIDGET_MUTE,
13296 "Headphone Playback Switch",
13297 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
13298 HDA_OUTPUT));
13299 if (err < 0)
13300 return err;
13301 }
13302 }
13303 return 0;
13304}
13305
13306#define alc269_auto_create_analog_input_ctls \
13307 alc262_auto_create_analog_input_ctls
13308 13267
13309#ifdef CONFIG_SND_HDA_POWER_SAVE 13268#ifdef CONFIG_SND_HDA_POWER_SAVE
13310#define alc269_loopbacks alc880_loopbacks 13269#define alc269_loopbacks alc880_loopbacks
@@ -13354,7 +13313,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
13354 err = alc269_auto_create_multi_out_ctls(spec, &spec->autocfg); 13313 err = alc269_auto_create_multi_out_ctls(spec, &spec->autocfg);
13355 if (err < 0) 13314 if (err < 0)
13356 return err; 13315 return err;
13357 err = alc269_auto_create_analog_input_ctls(spec, &spec->autocfg); 13316 err = alc269_auto_create_input_ctls(codec, &spec->autocfg);
13358 if (err < 0) 13317 if (err < 0)
13359 return err; 13318 return err;
13360 13319
@@ -13379,15 +13338,15 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
13379 return err; 13338 return err;
13380 13339
13381 if (!spec->cap_mixer && !spec->no_analog) 13340 if (!spec->cap_mixer && !spec->no_analog)
13382 set_capture_mixer(spec); 13341 set_capture_mixer(codec);
13383 13342
13384 alc_ssid_check(codec, 0x15, 0x1b, 0x14); 13343 alc_ssid_check(codec, 0x15, 0x1b, 0x14);
13385 13344
13386 return 1; 13345 return 1;
13387} 13346}
13388 13347
13389#define alc269_auto_init_multi_out alc882_auto_init_multi_out 13348#define alc269_auto_init_multi_out alc268_auto_init_multi_out
13390#define alc269_auto_init_hp_out alc882_auto_init_hp_out 13349#define alc269_auto_init_hp_out alc268_auto_init_hp_out
13391#define alc269_auto_init_analog_input alc882_auto_init_analog_input 13350#define alc269_auto_init_analog_input alc882_auto_init_analog_input
13392 13351
13393 13352
@@ -13455,6 +13414,7 @@ static struct alc_config_preset alc269_presets[] = {
13455 .channel_mode = alc269_modes, 13414 .channel_mode = alc269_modes,
13456 .input_mux = &alc269_capture_source, 13415 .input_mux = &alc269_capture_source,
13457 .unsol_event = alc269_quanta_fl1_unsol_event, 13416 .unsol_event = alc269_quanta_fl1_unsol_event,
13417 .setup = alc269_quanta_fl1_setup,
13458 .init_hook = alc269_quanta_fl1_init_hook, 13418 .init_hook = alc269_quanta_fl1_init_hook,
13459 }, 13419 },
13460 [ALC269_ASUS_EEEPC_P703] = { 13420 [ALC269_ASUS_EEEPC_P703] = {
@@ -13467,9 +13427,9 @@ static struct alc_config_preset alc269_presets[] = {
13467 .hp_nid = 0x03, 13427 .hp_nid = 0x03,
13468 .num_channel_mode = ARRAY_SIZE(alc269_modes), 13428 .num_channel_mode = ARRAY_SIZE(alc269_modes),
13469 .channel_mode = alc269_modes, 13429 .channel_mode = alc269_modes,
13470 .input_mux = &alc269_eeepc_amic_capture_source, 13430 .unsol_event = alc269_eeepc_unsol_event,
13471 .unsol_event = alc269_eeepc_amic_unsol_event, 13431 .setup = alc269_eeepc_amic_setup,
13472 .init_hook = alc269_eeepc_amic_inithook, 13432 .init_hook = alc269_eeepc_inithook,
13473 }, 13433 },
13474 [ALC269_ASUS_EEEPC_P901] = { 13434 [ALC269_ASUS_EEEPC_P901] = {
13475 .mixers = { alc269_eeepc_mixer }, 13435 .mixers = { alc269_eeepc_mixer },
@@ -13481,9 +13441,9 @@ static struct alc_config_preset alc269_presets[] = {
13481 .hp_nid = 0x03, 13441 .hp_nid = 0x03,
13482 .num_channel_mode = ARRAY_SIZE(alc269_modes), 13442 .num_channel_mode = ARRAY_SIZE(alc269_modes),
13483 .channel_mode = alc269_modes, 13443 .channel_mode = alc269_modes,
13484 .input_mux = &alc269_eeepc_dmic_capture_source, 13444 .unsol_event = alc269_eeepc_unsol_event,
13485 .unsol_event = alc269_eeepc_dmic_unsol_event, 13445 .setup = alc269_eeepc_dmic_setup,
13486 .init_hook = alc269_eeepc_dmic_inithook, 13446 .init_hook = alc269_eeepc_inithook,
13487 }, 13447 },
13488 [ALC269_FUJITSU] = { 13448 [ALC269_FUJITSU] = {
13489 .mixers = { alc269_fujitsu_mixer }, 13449 .mixers = { alc269_fujitsu_mixer },
@@ -13495,9 +13455,9 @@ static struct alc_config_preset alc269_presets[] = {
13495 .hp_nid = 0x03, 13455 .hp_nid = 0x03,
13496 .num_channel_mode = ARRAY_SIZE(alc269_modes), 13456 .num_channel_mode = ARRAY_SIZE(alc269_modes),
13497 .channel_mode = alc269_modes, 13457 .channel_mode = alc269_modes,
13498 .input_mux = &alc269_eeepc_dmic_capture_source, 13458 .unsol_event = alc269_eeepc_unsol_event,
13499 .unsol_event = alc269_eeepc_dmic_unsol_event, 13459 .setup = alc269_eeepc_dmic_setup,
13500 .init_hook = alc269_eeepc_dmic_inithook, 13460 .init_hook = alc269_eeepc_inithook,
13501 }, 13461 },
13502 [ALC269_LIFEBOOK] = { 13462 [ALC269_LIFEBOOK] = {
13503 .mixers = { alc269_lifebook_mixer }, 13463 .mixers = { alc269_lifebook_mixer },
@@ -13532,8 +13492,8 @@ static int patch_alc269(struct hda_codec *codec)
13532 alc269_cfg_tbl); 13492 alc269_cfg_tbl);
13533 13493
13534 if (board_config < 0) { 13494 if (board_config < 0) {
13535 printk(KERN_INFO "hda_codec: Unknown model for %s, " 13495 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
13536 "trying auto-probe from BIOS...\n", codec->chip_name); 13496 codec->chip_name);
13537 board_config = ALC269_AUTO; 13497 board_config = ALC269_AUTO;
13538 } 13498 }
13539 13499
@@ -13558,7 +13518,7 @@ static int patch_alc269(struct hda_codec *codec)
13558 } 13518 }
13559 13519
13560 if (board_config != ALC269_AUTO) 13520 if (board_config != ALC269_AUTO)
13561 setup_preset(spec, &alc269_presets[board_config]); 13521 setup_preset(codec, &alc269_presets[board_config]);
13562 13522
13563 if (codec->subsystem_id == 0x17aa3bf8) { 13523 if (codec->subsystem_id == 0x17aa3bf8) {
13564 /* Due to a hardware problem on Lenovo Ideadpad, we need to 13524 /* Due to a hardware problem on Lenovo Ideadpad, we need to
@@ -13577,7 +13537,7 @@ static int patch_alc269(struct hda_codec *codec)
13577 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); 13537 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
13578 spec->capsrc_nids = alc269_capsrc_nids; 13538 spec->capsrc_nids = alc269_capsrc_nids;
13579 if (!spec->cap_mixer) 13539 if (!spec->cap_mixer)
13580 set_capture_mixer(spec); 13540 set_capture_mixer(codec);
13581 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT); 13541 set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
13582 13542
13583 spec->vmaster_nid = 0x02; 13543 spec->vmaster_nid = 0x02;
@@ -14127,23 +14087,23 @@ static struct hda_verb alc861_auto_init_verbs[] = {
14127 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)}, 14087 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)},
14128 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb00c}, 14088 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb00c},
14129 14089
14130 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 14090 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14131 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 14091 {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14132 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 14092 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14133 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 14093 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14134 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 14094 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14135 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 14095 {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14136 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 14096 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14137 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, 14097 {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14138 14098
14139 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 14099 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14140 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 14100 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14141 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)}, 14101 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
14142 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, 14102 {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
14143 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, 14103 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
14144 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, 14104 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
14145 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(2)}, 14105 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
14146 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3)}, 14106 {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
14147 14107
14148 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, /* set Mic 1 */ 14108 {0x08, AC_VERB_SET_CONNECT_SEL, 0x00}, /* set Mic 1 */
14149 14109
@@ -14215,64 +14175,96 @@ static struct hda_input_mux alc861_capture_source = {
14215 }, 14175 },
14216}; 14176};
14217 14177
14178static hda_nid_t alc861_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
14179{
14180 struct alc_spec *spec = codec->spec;
14181 hda_nid_t mix, srcs[5];
14182 int i, j, num;
14183
14184 if (snd_hda_get_connections(codec, pin, &mix, 1) != 1)
14185 return 0;
14186 num = snd_hda_get_connections(codec, mix, srcs, ARRAY_SIZE(srcs));
14187 if (num < 0)
14188 return 0;
14189 for (i = 0; i < num; i++) {
14190 unsigned int type;
14191 type = get_wcaps_type(get_wcaps(codec, srcs[i]));
14192 if (type != AC_WID_AUD_OUT)
14193 continue;
14194 for (j = 0; j < spec->multiout.num_dacs; j++)
14195 if (spec->multiout.dac_nids[j] == srcs[i])
14196 break;
14197 if (j >= spec->multiout.num_dacs)
14198 return srcs[i];
14199 }
14200 return 0;
14201}
14202
14218/* fill in the dac_nids table from the parsed pin configuration */ 14203/* fill in the dac_nids table from the parsed pin configuration */
14219static int alc861_auto_fill_dac_nids(struct alc_spec *spec, 14204static int alc861_auto_fill_dac_nids(struct hda_codec *codec,
14220 const struct auto_pin_cfg *cfg) 14205 const struct auto_pin_cfg *cfg)
14221{ 14206{
14207 struct alc_spec *spec = codec->spec;
14222 int i; 14208 int i;
14223 hda_nid_t nid; 14209 hda_nid_t nid, dac;
14224 14210
14225 spec->multiout.dac_nids = spec->private_dac_nids; 14211 spec->multiout.dac_nids = spec->private_dac_nids;
14226 for (i = 0; i < cfg->line_outs; i++) { 14212 for (i = 0; i < cfg->line_outs; i++) {
14227 nid = cfg->line_out_pins[i]; 14213 nid = cfg->line_out_pins[i];
14228 if (nid) { 14214 dac = alc861_look_for_dac(codec, nid);
14229 if (i >= ARRAY_SIZE(alc861_dac_nids)) 14215 if (!dac)
14230 continue; 14216 continue;
14231 spec->multiout.dac_nids[i] = alc861_dac_nids[i]; 14217 spec->multiout.dac_nids[spec->multiout.num_dacs++] = dac;
14232 }
14233 } 14218 }
14234 spec->multiout.num_dacs = cfg->line_outs;
14235 return 0; 14219 return 0;
14236} 14220}
14237 14221
14222static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
14223 hda_nid_t nid, unsigned int chs)
14224{
14225 char name[32];
14226 snprintf(name, sizeof(name), "%s Playback Switch", pfx);
14227 return add_control(codec->spec, ALC_CTL_WIDGET_MUTE, name,
14228 HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
14229}
14230
14238/* add playback controls from the parsed DAC table */ 14231/* add playback controls from the parsed DAC table */
14239static int alc861_auto_create_multi_out_ctls(struct alc_spec *spec, 14232static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
14240 const struct auto_pin_cfg *cfg) 14233 const struct auto_pin_cfg *cfg)
14241{ 14234{
14242 char name[32]; 14235 struct alc_spec *spec = codec->spec;
14243 static const char *chname[4] = { 14236 static const char *chname[4] = {
14244 "Front", "Surround", NULL /*CLFE*/, "Side" 14237 "Front", "Surround", NULL /*CLFE*/, "Side"
14245 }; 14238 };
14246 hda_nid_t nid; 14239 hda_nid_t nid;
14247 int i, idx, err; 14240 int i, err;
14241
14242 if (cfg->line_outs == 1) {
14243 const char *pfx = NULL;
14244 if (!cfg->hp_outs)
14245 pfx = "Master";
14246 else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
14247 pfx = "Speaker";
14248 if (pfx) {
14249 nid = spec->multiout.dac_nids[0];
14250 return alc861_create_out_sw(codec, pfx, nid, 3);
14251 }
14252 }
14248 14253
14249 for (i = 0; i < cfg->line_outs; i++) { 14254 for (i = 0; i < cfg->line_outs; i++) {
14250 nid = spec->multiout.dac_nids[i]; 14255 nid = spec->multiout.dac_nids[i];
14251 if (!nid) 14256 if (!nid)
14252 continue; 14257 continue;
14253 if (nid == 0x05) { 14258 if (i == 2) {
14254 /* Center/LFE */ 14259 /* Center/LFE */
14255 err = add_control(spec, ALC_CTL_BIND_MUTE, 14260 err = alc861_create_out_sw(codec, "Center", nid, 1);
14256 "Center Playback Switch",
14257 HDA_COMPOSE_AMP_VAL(nid, 1, 0,
14258 HDA_OUTPUT));
14259 if (err < 0) 14261 if (err < 0)
14260 return err; 14262 return err;
14261 err = add_control(spec, ALC_CTL_BIND_MUTE, 14263 err = alc861_create_out_sw(codec, "LFE", nid, 2);
14262 "LFE Playback Switch",
14263 HDA_COMPOSE_AMP_VAL(nid, 2, 0,
14264 HDA_OUTPUT));
14265 if (err < 0) 14264 if (err < 0)
14266 return err; 14265 return err;
14267 } else { 14266 } else {
14268 for (idx = 0; idx < ARRAY_SIZE(alc861_dac_nids) - 1; 14267 err = alc861_create_out_sw(codec, chname[i], nid, 3);
14269 idx++)
14270 if (nid == alc861_dac_nids[idx])
14271 break;
14272 sprintf(name, "%s Playback Switch", chname[idx]);
14273 err = add_control(spec, ALC_CTL_BIND_MUTE, name,
14274 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
14275 HDA_OUTPUT));
14276 if (err < 0) 14268 if (err < 0)
14277 return err; 14269 return err;
14278 } 14270 }
@@ -14280,8 +14272,9 @@ static int alc861_auto_create_multi_out_ctls(struct alc_spec *spec,
14280 return 0; 14272 return 0;
14281} 14273}
14282 14274
14283static int alc861_auto_create_hp_ctls(struct alc_spec *spec, hda_nid_t pin) 14275static int alc861_auto_create_hp_ctls(struct hda_codec *codec, hda_nid_t pin)
14284{ 14276{
14277 struct alc_spec *spec = codec->spec;
14285 int err; 14278 int err;
14286 hda_nid_t nid; 14279 hda_nid_t nid;
14287 14280
@@ -14289,70 +14282,49 @@ static int alc861_auto_create_hp_ctls(struct alc_spec *spec, hda_nid_t pin)
14289 return 0; 14282 return 0;
14290 14283
14291 if ((pin >= 0x0b && pin <= 0x10) || pin == 0x1f || pin == 0x20) { 14284 if ((pin >= 0x0b && pin <= 0x10) || pin == 0x1f || pin == 0x20) {
14292 nid = 0x03; 14285 nid = alc861_look_for_dac(codec, pin);
14293 err = add_control(spec, ALC_CTL_WIDGET_MUTE, 14286 if (nid) {
14294 "Headphone Playback Switch", 14287 err = alc861_create_out_sw(codec, "Headphone", nid, 3);
14295 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT)); 14288 if (err < 0)
14296 if (err < 0) 14289 return err;
14297 return err; 14290 spec->multiout.hp_nid = nid;
14298 spec->multiout.hp_nid = nid; 14291 }
14299 } 14292 }
14300 return 0; 14293 return 0;
14301} 14294}
14302 14295
14303/* create playback/capture controls for input pins */ 14296/* create playback/capture controls for input pins */
14304static int alc861_auto_create_analog_input_ctls(struct alc_spec *spec, 14297static int alc861_auto_create_input_ctls(struct hda_codec *codec,
14305 const struct auto_pin_cfg *cfg) 14298 const struct auto_pin_cfg *cfg)
14306{ 14299{
14307 struct hda_input_mux *imux = &spec->private_imux[0]; 14300 return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x08, 0);
14308 int i, err, idx, idx1;
14309
14310 for (i = 0; i < AUTO_PIN_LAST; i++) {
14311 switch (cfg->input_pins[i]) {
14312 case 0x0c:
14313 idx1 = 1;
14314 idx = 2; /* Line In */
14315 break;
14316 case 0x0f:
14317 idx1 = 2;
14318 idx = 2; /* Line In */
14319 break;
14320 case 0x0d:
14321 idx1 = 0;
14322 idx = 1; /* Mic In */
14323 break;
14324 case 0x10:
14325 idx1 = 3;
14326 idx = 1; /* Mic In */
14327 break;
14328 case 0x11:
14329 idx1 = 4;
14330 idx = 0; /* CD */
14331 break;
14332 default:
14333 continue;
14334 }
14335
14336 err = new_analog_input(spec, cfg->input_pins[i],
14337 auto_pin_cfg_labels[i], idx, 0x15);
14338 if (err < 0)
14339 return err;
14340
14341 imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
14342 imux->items[imux->num_items].index = idx1;
14343 imux->num_items++;
14344 }
14345 return 0;
14346} 14301}
14347 14302
14348static void alc861_auto_set_output_and_unmute(struct hda_codec *codec, 14303static void alc861_auto_set_output_and_unmute(struct hda_codec *codec,
14349 hda_nid_t nid, 14304 hda_nid_t nid,
14350 int pin_type, int dac_idx) 14305 int pin_type, hda_nid_t dac)
14351{ 14306{
14307 hda_nid_t mix, srcs[5];
14308 int i, num;
14309
14352 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 14310 snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
14353 pin_type); 14311 pin_type);
14354 snd_hda_codec_write(codec, dac_idx, 0, AC_VERB_SET_AMP_GAIN_MUTE, 14312 snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE,
14355 AMP_OUT_UNMUTE); 14313 AMP_OUT_UNMUTE);
14314 if (snd_hda_get_connections(codec, nid, &mix, 1) != 1)
14315 return;
14316 num = snd_hda_get_connections(codec, mix, srcs, ARRAY_SIZE(srcs));
14317 if (num < 0)
14318 return;
14319 for (i = 0; i < num; i++) {
14320 unsigned int mute;
14321 if (srcs[i] == dac || srcs[i] == 0x15)
14322 mute = AMP_IN_UNMUTE(i);
14323 else
14324 mute = AMP_IN_MUTE(i);
14325 snd_hda_codec_write(codec, mix, 0, AC_VERB_SET_AMP_GAIN_MUTE,
14326 mute);
14327 }
14356} 14328}
14357 14329
14358static void alc861_auto_init_multi_out(struct hda_codec *codec) 14330static void alc861_auto_init_multi_out(struct hda_codec *codec)
@@ -14375,12 +14347,13 @@ static void alc861_auto_init_hp_out(struct hda_codec *codec)
14375 hda_nid_t pin; 14347 hda_nid_t pin;
14376 14348
14377 pin = spec->autocfg.hp_pins[0]; 14349 pin = spec->autocfg.hp_pins[0];
14378 if (pin) /* connect to front */ 14350 if (pin)
14379 alc861_auto_set_output_and_unmute(codec, pin, PIN_HP, 14351 alc861_auto_set_output_and_unmute(codec, pin, PIN_HP,
14380 spec->multiout.dac_nids[0]); 14352 spec->multiout.hp_nid);
14381 pin = spec->autocfg.speaker_pins[0]; 14353 pin = spec->autocfg.speaker_pins[0];
14382 if (pin) 14354 if (pin)
14383 alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0); 14355 alc861_auto_set_output_and_unmute(codec, pin, PIN_OUT,
14356 spec->multiout.dac_nids[0]);
14384} 14357}
14385 14358
14386static void alc861_auto_init_analog_input(struct hda_codec *codec) 14359static void alc861_auto_init_analog_input(struct hda_codec *codec)
@@ -14412,16 +14385,16 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
14412 if (!spec->autocfg.line_outs) 14385 if (!spec->autocfg.line_outs)
14413 return 0; /* can't find valid BIOS pin config */ 14386 return 0; /* can't find valid BIOS pin config */
14414 14387
14415 err = alc861_auto_fill_dac_nids(spec, &spec->autocfg); 14388 err = alc861_auto_fill_dac_nids(codec, &spec->autocfg);
14416 if (err < 0) 14389 if (err < 0)
14417 return err; 14390 return err;
14418 err = alc861_auto_create_multi_out_ctls(spec, &spec->autocfg); 14391 err = alc861_auto_create_multi_out_ctls(codec, &spec->autocfg);
14419 if (err < 0) 14392 if (err < 0)
14420 return err; 14393 return err;
14421 err = alc861_auto_create_hp_ctls(spec, spec->autocfg.hp_pins[0]); 14394 err = alc861_auto_create_hp_ctls(codec, spec->autocfg.hp_pins[0]);
14422 if (err < 0) 14395 if (err < 0)
14423 return err; 14396 return err;
14424 err = alc861_auto_create_analog_input_ctls(spec, &spec->autocfg); 14397 err = alc861_auto_create_input_ctls(codec, &spec->autocfg);
14425 if (err < 0) 14398 if (err < 0)
14426 return err; 14399 return err;
14427 14400
@@ -14440,7 +14413,7 @@ static int alc861_parse_auto_config(struct hda_codec *codec)
14440 14413
14441 spec->adc_nids = alc861_adc_nids; 14414 spec->adc_nids = alc861_adc_nids;
14442 spec->num_adc_nids = ARRAY_SIZE(alc861_adc_nids); 14415 spec->num_adc_nids = ARRAY_SIZE(alc861_adc_nids);
14443 set_capture_mixer(spec); 14416 set_capture_mixer(codec);
14444 14417
14445 alc_ssid_check(codec, 0x0e, 0x0f, 0x0b); 14418 alc_ssid_check(codec, 0x0e, 0x0f, 0x0b);
14446 14419
@@ -14633,8 +14606,8 @@ static int patch_alc861(struct hda_codec *codec)
14633 alc861_cfg_tbl); 14606 alc861_cfg_tbl);
14634 14607
14635 if (board_config < 0) { 14608 if (board_config < 0) {
14636 printk(KERN_INFO "hda_codec: Unknown model for %s, " 14609 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
14637 "trying auto-probe from BIOS...\n", codec->chip_name); 14610 codec->chip_name);
14638 board_config = ALC861_AUTO; 14611 board_config = ALC861_AUTO;
14639 } 14612 }
14640 14613
@@ -14659,7 +14632,7 @@ static int patch_alc861(struct hda_codec *codec)
14659 } 14632 }
14660 14633
14661 if (board_config != ALC861_AUTO) 14634 if (board_config != ALC861_AUTO)
14662 setup_preset(spec, &alc861_presets[board_config]); 14635 setup_preset(codec, &alc861_presets[board_config]);
14663 14636
14664 spec->stream_analog_playback = &alc861_pcm_analog_playback; 14637 spec->stream_analog_playback = &alc861_pcm_analog_playback;
14665 spec->stream_analog_capture = &alc861_pcm_analog_capture; 14638 spec->stream_analog_capture = &alc861_pcm_analog_capture;
@@ -15062,12 +15035,15 @@ static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
15062 HDA_AMP_MUTE, bits); 15035 HDA_AMP_MUTE, bits);
15063} 15036}
15064 15037
15065static void alc861vd_lenovo_init_hook(struct hda_codec *codec) 15038static void alc861vd_lenovo_setup(struct hda_codec *codec)
15066{ 15039{
15067 struct alc_spec *spec = codec->spec; 15040 struct alc_spec *spec = codec->spec;
15068
15069 spec->autocfg.hp_pins[0] = 0x1b; 15041 spec->autocfg.hp_pins[0] = 0x1b;
15070 spec->autocfg.speaker_pins[0] = 0x14; 15042 spec->autocfg.speaker_pins[0] = 0x14;
15043}
15044
15045static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
15046{
15071 alc_automute_amp(codec); 15047 alc_automute_amp(codec);
15072 alc861vd_lenovo_mic_automute(codec); 15048 alc861vd_lenovo_mic_automute(codec);
15073} 15049}
@@ -15131,13 +15107,12 @@ static struct hda_verb alc861vd_dallas_verbs[] = {
15131}; 15107};
15132 15108
15133/* toggle speaker-output according to the hp-jack state */ 15109/* toggle speaker-output according to the hp-jack state */
15134static void alc861vd_dallas_init_hook(struct hda_codec *codec) 15110static void alc861vd_dallas_setup(struct hda_codec *codec)
15135{ 15111{
15136 struct alc_spec *spec = codec->spec; 15112 struct alc_spec *spec = codec->spec;
15137 15113
15138 spec->autocfg.hp_pins[0] = 0x15; 15114 spec->autocfg.hp_pins[0] = 0x15;
15139 spec->autocfg.speaker_pins[0] = 0x14; 15115 spec->autocfg.speaker_pins[0] = 0x14;
15140 alc_automute_amp(codec);
15141} 15116}
15142 15117
15143#ifdef CONFIG_SND_HDA_POWER_SAVE 15118#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -15251,6 +15226,7 @@ static struct alc_config_preset alc861vd_presets[] = {
15251 .channel_mode = alc861vd_3stack_2ch_modes, 15226 .channel_mode = alc861vd_3stack_2ch_modes,
15252 .input_mux = &alc861vd_capture_source, 15227 .input_mux = &alc861vd_capture_source,
15253 .unsol_event = alc861vd_lenovo_unsol_event, 15228 .unsol_event = alc861vd_lenovo_unsol_event,
15229 .setup = alc861vd_lenovo_setup,
15254 .init_hook = alc861vd_lenovo_init_hook, 15230 .init_hook = alc861vd_lenovo_init_hook,
15255 }, 15231 },
15256 [ALC861VD_DALLAS] = { 15232 [ALC861VD_DALLAS] = {
@@ -15262,7 +15238,8 @@ static struct alc_config_preset alc861vd_presets[] = {
15262 .channel_mode = alc861vd_3stack_2ch_modes, 15238 .channel_mode = alc861vd_3stack_2ch_modes,
15263 .input_mux = &alc861vd_dallas_capture_source, 15239 .input_mux = &alc861vd_dallas_capture_source,
15264 .unsol_event = alc_automute_amp_unsol_event, 15240 .unsol_event = alc_automute_amp_unsol_event,
15265 .init_hook = alc861vd_dallas_init_hook, 15241 .setup = alc861vd_dallas_setup,
15242 .init_hook = alc_automute_amp,
15266 }, 15243 },
15267 [ALC861VD_HP] = { 15244 [ALC861VD_HP] = {
15268 .mixers = { alc861vd_hp_mixer }, 15245 .mixers = { alc861vd_hp_mixer },
@@ -15274,7 +15251,8 @@ static struct alc_config_preset alc861vd_presets[] = {
15274 .channel_mode = alc861vd_3stack_2ch_modes, 15251 .channel_mode = alc861vd_3stack_2ch_modes,
15275 .input_mux = &alc861vd_hp_capture_source, 15252 .input_mux = &alc861vd_hp_capture_source,
15276 .unsol_event = alc_automute_amp_unsol_event, 15253 .unsol_event = alc_automute_amp_unsol_event,
15277 .init_hook = alc861vd_dallas_init_hook, 15254 .setup = alc861vd_dallas_setup,
15255 .init_hook = alc_automute_amp,
15278 }, 15256 },
15279 [ALC660VD_ASUS_V1S] = { 15257 [ALC660VD_ASUS_V1S] = {
15280 .mixers = { alc861vd_lenovo_mixer }, 15258 .mixers = { alc861vd_lenovo_mixer },
@@ -15289,6 +15267,7 @@ static struct alc_config_preset alc861vd_presets[] = {
15289 .channel_mode = alc861vd_3stack_2ch_modes, 15267 .channel_mode = alc861vd_3stack_2ch_modes,
15290 .input_mux = &alc861vd_capture_source, 15268 .input_mux = &alc861vd_capture_source,
15291 .unsol_event = alc861vd_lenovo_unsol_event, 15269 .unsol_event = alc861vd_lenovo_unsol_event,
15270 .setup = alc861vd_lenovo_setup,
15292 .init_hook = alc861vd_lenovo_init_hook, 15271 .init_hook = alc861vd_lenovo_init_hook,
15293 }, 15272 },
15294}; 15273};
@@ -15296,6 +15275,13 @@ static struct alc_config_preset alc861vd_presets[] = {
15296/* 15275/*
15297 * BIOS auto configuration 15276 * BIOS auto configuration
15298 */ 15277 */
15278static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
15279 const struct auto_pin_cfg *cfg)
15280{
15281 return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
15282}
15283
15284
15299static void alc861vd_auto_set_output_and_unmute(struct hda_codec *codec, 15285static void alc861vd_auto_set_output_and_unmute(struct hda_codec *codec,
15300 hda_nid_t nid, int pin_type, int dac_idx) 15286 hda_nid_t nid, int pin_type, int dac_idx)
15301{ 15287{
@@ -15330,7 +15316,6 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec)
15330 alc861vd_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0); 15316 alc861vd_auto_set_output_and_unmute(codec, pin, PIN_OUT, 0);
15331} 15317}
15332 15318
15333#define alc861vd_is_input_pin(nid) alc880_is_input_pin(nid)
15334#define ALC861VD_PIN_CD_NID ALC880_PIN_CD_NID 15319#define ALC861VD_PIN_CD_NID ALC880_PIN_CD_NID
15335 15320
15336static void alc861vd_auto_init_analog_input(struct hda_codec *codec) 15321static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
@@ -15340,7 +15325,7 @@ static void alc861vd_auto_init_analog_input(struct hda_codec *codec)
15340 15325
15341 for (i = 0; i < AUTO_PIN_LAST; i++) { 15326 for (i = 0; i < AUTO_PIN_LAST; i++) {
15342 hda_nid_t nid = spec->autocfg.input_pins[i]; 15327 hda_nid_t nid = spec->autocfg.input_pins[i];
15343 if (alc861vd_is_input_pin(nid)) { 15328 if (alc_is_input_pin(codec, nid)) {
15344 alc_set_input_pin(codec, nid, i); 15329 alc_set_input_pin(codec, nid, i);
15345 if (nid != ALC861VD_PIN_CD_NID && 15330 if (nid != ALC861VD_PIN_CD_NID &&
15346 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)) 15331 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -15404,13 +15389,25 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
15404 if (err < 0) 15389 if (err < 0)
15405 return err; 15390 return err;
15406 } else { 15391 } else {
15407 sprintf(name, "%s Playback Volume", chname[i]); 15392 const char *pfx;
15393 if (cfg->line_outs == 1 &&
15394 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
15395 if (!cfg->hp_pins)
15396 pfx = "Speaker";
15397 else
15398 pfx = "PCM";
15399 } else
15400 pfx = chname[i];
15401 sprintf(name, "%s Playback Volume", pfx);
15408 err = add_control(spec, ALC_CTL_WIDGET_VOL, name, 15402 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
15409 HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, 15403 HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
15410 HDA_OUTPUT)); 15404 HDA_OUTPUT));
15411 if (err < 0) 15405 if (err < 0)
15412 return err; 15406 return err;
15413 sprintf(name, "%s Playback Switch", chname[i]); 15407 if (cfg->line_outs == 1 &&
15408 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
15409 pfx = "Speaker";
15410 sprintf(name, "%s Playback Switch", pfx);
15414 err = add_control(spec, ALC_CTL_BIND_MUTE, name, 15411 err = add_control(spec, ALC_CTL_BIND_MUTE, name,
15415 HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, 15412 HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
15416 HDA_INPUT)); 15413 HDA_INPUT));
@@ -15503,7 +15500,7 @@ static int alc861vd_parse_auto_config(struct hda_codec *codec)
15503 "Headphone"); 15500 "Headphone");
15504 if (err < 0) 15501 if (err < 0)
15505 return err; 15502 return err;
15506 err = alc880_auto_create_analog_input_ctls(spec, &spec->autocfg); 15503 err = alc861vd_auto_create_input_ctls(codec, &spec->autocfg);
15507 if (err < 0) 15504 if (err < 0)
15508 return err; 15505 return err;
15509 15506
@@ -15557,8 +15554,8 @@ static int patch_alc861vd(struct hda_codec *codec)
15557 alc861vd_cfg_tbl); 15554 alc861vd_cfg_tbl);
15558 15555
15559 if (board_config < 0 || board_config >= ALC861VD_MODEL_LAST) { 15556 if (board_config < 0 || board_config >= ALC861VD_MODEL_LAST) {
15560 printk(KERN_INFO "hda_codec: Unknown model for %s, " 15557 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
15561 "trying auto-probe from BIOS...\n", codec->chip_name); 15558 codec->chip_name);
15562 board_config = ALC861VD_AUTO; 15559 board_config = ALC861VD_AUTO;
15563 } 15560 }
15564 15561
@@ -15583,7 +15580,7 @@ static int patch_alc861vd(struct hda_codec *codec)
15583 } 15580 }
15584 15581
15585 if (board_config != ALC861VD_AUTO) 15582 if (board_config != ALC861VD_AUTO)
15586 setup_preset(spec, &alc861vd_presets[board_config]); 15583 setup_preset(codec, &alc861vd_presets[board_config]);
15587 15584
15588 if (codec->vendor_id == 0x10ec0660) { 15585 if (codec->vendor_id == 0x10ec0660) {
15589 /* always turn on EAPD */ 15586 /* always turn on EAPD */
@@ -15603,7 +15600,7 @@ static int patch_alc861vd(struct hda_codec *codec)
15603 if (!spec->capsrc_nids) 15600 if (!spec->capsrc_nids)
15604 spec->capsrc_nids = alc861vd_capsrc_nids; 15601 spec->capsrc_nids = alc861vd_capsrc_nids;
15605 15602
15606 set_capture_mixer(spec); 15603 set_capture_mixer(codec);
15607 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 15604 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
15608 15605
15609 spec->vmaster_nid = 0x02; 15606 spec->vmaster_nid = 0x02;
@@ -15644,9 +15641,9 @@ static hda_nid_t alc272_dac_nids[2] = {
15644 0x02, 0x03 15641 0x02, 0x03
15645}; 15642};
15646 15643
15647static hda_nid_t alc662_adc_nids[1] = { 15644static hda_nid_t alc662_adc_nids[2] = {
15648 /* ADC1-2 */ 15645 /* ADC1-2 */
15649 0x09, 15646 0x09, 0x08
15650}; 15647};
15651 15648
15652static hda_nid_t alc272_adc_nids[1] = { 15649static hda_nid_t alc272_adc_nids[1] = {
@@ -15654,7 +15651,7 @@ static hda_nid_t alc272_adc_nids[1] = {
15654 0x08, 15651 0x08,
15655}; 15652};
15656 15653
15657static hda_nid_t alc662_capsrc_nids[1] = { 0x22 }; 15654static hda_nid_t alc662_capsrc_nids[2] = { 0x22, 0x23 };
15658static hda_nid_t alc272_capsrc_nids[1] = { 0x23 }; 15655static hda_nid_t alc272_capsrc_nids[1] = { 0x23 };
15659 15656
15660 15657
@@ -15678,14 +15675,6 @@ static struct hda_input_mux alc662_lenovo_101e_capture_source = {
15678 }, 15675 },
15679}; 15676};
15680 15677
15681static struct hda_input_mux alc662_eeepc_capture_source = {
15682 .num_items = 2,
15683 .items = {
15684 { "i-Mic", 0x1 },
15685 { "e-Mic", 0x0 },
15686 },
15687};
15688
15689static struct hda_input_mux alc663_capture_source = { 15678static struct hda_input_mux alc663_capture_source = {
15690 .num_items = 3, 15679 .num_items = 3,
15691 .items = { 15680 .items = {
@@ -15695,23 +15684,7 @@ static struct hda_input_mux alc663_capture_source = {
15695 }, 15684 },
15696}; 15685};
15697 15686
15698static struct hda_input_mux alc663_m51va_capture_source = { 15687#if 0 /* set to 1 for testing other input sources below */
15699 .num_items = 2,
15700 .items = {
15701 { "Ext-Mic", 0x0 },
15702 { "D-Mic", 0x9 },
15703 },
15704};
15705
15706#if 1 /* set to 0 for testing other input sources below */
15707static struct hda_input_mux alc272_nc10_capture_source = {
15708 .num_items = 2,
15709 .items = {
15710 { "Autoselect Mic", 0x0 },
15711 { "Internal Mic", 0x1 },
15712 },
15713};
15714#else
15715static struct hda_input_mux alc272_nc10_capture_source = { 15688static struct hda_input_mux alc272_nc10_capture_source = {
15716 .num_items = 16, 15689 .num_items = 16,
15717 .items = { 15690 .items = {
@@ -16380,47 +16353,44 @@ static void alc662_lenovo_101e_unsol_event(struct hda_codec *codec,
16380 alc662_lenovo_101e_ispeaker_automute(codec); 16353 alc662_lenovo_101e_ispeaker_automute(codec);
16381} 16354}
16382 16355
16383static void alc662_eeepc_mic_automute(struct hda_codec *codec)
16384{
16385 unsigned int present;
16386
16387 present = snd_hda_codec_read(codec, 0x18, 0,
16388 AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
16389 snd_hda_codec_write(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16390 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
16391 snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16392 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
16393 snd_hda_codec_write(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16394 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
16395 snd_hda_codec_write(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16396 0x7000 | (0x01 << 8) | (present ? 0x80 : 0));
16397}
16398
16399/* unsolicited event for HP jack sensing */ 16356/* unsolicited event for HP jack sensing */
16400static void alc662_eeepc_unsol_event(struct hda_codec *codec, 16357static void alc662_eeepc_unsol_event(struct hda_codec *codec,
16401 unsigned int res) 16358 unsigned int res)
16402{ 16359{
16403 if ((res >> 26) == ALC880_MIC_EVENT) 16360 if ((res >> 26) == ALC880_MIC_EVENT)
16404 alc662_eeepc_mic_automute(codec); 16361 alc_mic_automute(codec);
16405 else 16362 else
16406 alc262_hippo_unsol_event(codec, res); 16363 alc262_hippo_unsol_event(codec, res);
16407} 16364}
16408 16365
16366static void alc662_eeepc_setup(struct hda_codec *codec)
16367{
16368 struct alc_spec *spec = codec->spec;
16369
16370 alc262_hippo1_setup(codec);
16371 spec->ext_mic.pin = 0x18;
16372 spec->ext_mic.mux_idx = 0;
16373 spec->int_mic.pin = 0x19;
16374 spec->int_mic.mux_idx = 1;
16375 spec->auto_mic = 1;
16376}
16377
16409static void alc662_eeepc_inithook(struct hda_codec *codec) 16378static void alc662_eeepc_inithook(struct hda_codec *codec)
16410{ 16379{
16411 alc262_hippo1_init_hook(codec); 16380 alc262_hippo_automute(codec);
16412 alc662_eeepc_mic_automute(codec); 16381 alc_mic_automute(codec);
16413} 16382}
16414 16383
16415static void alc662_eeepc_ep20_inithook(struct hda_codec *codec) 16384static void alc662_eeepc_ep20_setup(struct hda_codec *codec)
16416{ 16385{
16417 struct alc_spec *spec = codec->spec; 16386 struct alc_spec *spec = codec->spec;
16418 16387
16419 spec->autocfg.hp_pins[0] = 0x14; 16388 spec->autocfg.hp_pins[0] = 0x14;
16420 spec->autocfg.speaker_pins[0] = 0x1b; 16389 spec->autocfg.speaker_pins[0] = 0x1b;
16421 alc262_hippo_master_update(codec);
16422} 16390}
16423 16391
16392#define alc662_eeepc_ep20_inithook alc262_hippo_master_update
16393
16424static void alc663_m51va_speaker_automute(struct hda_codec *codec) 16394static void alc663_m51va_speaker_automute(struct hda_codec *codec)
16425{ 16395{
16426 unsigned int present; 16396 unsigned int present;
@@ -16531,23 +16501,6 @@ static void alc663_two_hp_m2_speaker_automute(struct hda_codec *codec)
16531 } 16501 }
16532} 16502}
16533 16503
16534static void alc663_m51va_mic_automute(struct hda_codec *codec)
16535{
16536 unsigned int present;
16537
16538 present = snd_hda_codec_read(codec, 0x18, 0,
16539 AC_VERB_GET_PIN_SENSE, 0)
16540 & AC_PINSENSE_PRESENCE;
16541 snd_hda_codec_write_cache(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16542 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
16543 snd_hda_codec_write_cache(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16544 0x7000 | (0x00 << 8) | (present ? 0 : 0x80));
16545 snd_hda_codec_write_cache(codec, 0x22, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16546 0x7000 | (0x09 << 8) | (present ? 0x80 : 0));
16547 snd_hda_codec_write_cache(codec, 0x23, 0, AC_VERB_SET_AMP_GAIN_MUTE,
16548 0x7000 | (0x09 << 8) | (present ? 0x80 : 0));
16549}
16550
16551static void alc663_m51va_unsol_event(struct hda_codec *codec, 16504static void alc663_m51va_unsol_event(struct hda_codec *codec,
16552 unsigned int res) 16505 unsigned int res)
16553{ 16506{
@@ -16556,36 +16509,32 @@ static void alc663_m51va_unsol_event(struct hda_codec *codec,
16556 alc663_m51va_speaker_automute(codec); 16509 alc663_m51va_speaker_automute(codec);
16557 break; 16510 break;
16558 case ALC880_MIC_EVENT: 16511 case ALC880_MIC_EVENT:
16559 alc663_m51va_mic_automute(codec); 16512 alc_mic_automute(codec);
16560 break; 16513 break;
16561 } 16514 }
16562} 16515}
16563 16516
16517static void alc663_m51va_setup(struct hda_codec *codec)
16518{
16519 struct alc_spec *spec = codec->spec;
16520 spec->ext_mic.pin = 0x18;
16521 spec->ext_mic.mux_idx = 0;
16522 spec->int_mic.pin = 0x12;
16523 spec->int_mic.mux_idx = 1;
16524 spec->auto_mic = 1;
16525}
16526
16564static void alc663_m51va_inithook(struct hda_codec *codec) 16527static void alc663_m51va_inithook(struct hda_codec *codec)
16565{ 16528{
16566 alc663_m51va_speaker_automute(codec); 16529 alc663_m51va_speaker_automute(codec);
16567 alc663_m51va_mic_automute(codec); 16530 alc_mic_automute(codec);
16568} 16531}
16569 16532
16570/* ***************** Mode1 ******************************/ 16533/* ***************** Mode1 ******************************/
16571static void alc663_mode1_unsol_event(struct hda_codec *codec, 16534#define alc663_mode1_unsol_event alc663_m51va_unsol_event
16572 unsigned int res) 16535#define alc663_mode1_setup alc663_m51va_setup
16573{ 16536#define alc663_mode1_inithook alc663_m51va_inithook
16574 switch (res >> 26) {
16575 case ALC880_HP_EVENT:
16576 alc663_m51va_speaker_automute(codec);
16577 break;
16578 case ALC880_MIC_EVENT:
16579 alc662_eeepc_mic_automute(codec);
16580 break;
16581 }
16582}
16583 16537
16584static void alc663_mode1_inithook(struct hda_codec *codec)
16585{
16586 alc663_m51va_speaker_automute(codec);
16587 alc662_eeepc_mic_automute(codec);
16588}
16589/* ***************** Mode2 ******************************/ 16538/* ***************** Mode2 ******************************/
16590static void alc662_mode2_unsol_event(struct hda_codec *codec, 16539static void alc662_mode2_unsol_event(struct hda_codec *codec,
16591 unsigned int res) 16540 unsigned int res)
@@ -16595,15 +16544,17 @@ static void alc662_mode2_unsol_event(struct hda_codec *codec,
16595 alc662_f5z_speaker_automute(codec); 16544 alc662_f5z_speaker_automute(codec);
16596 break; 16545 break;
16597 case ALC880_MIC_EVENT: 16546 case ALC880_MIC_EVENT:
16598 alc662_eeepc_mic_automute(codec); 16547 alc_mic_automute(codec);
16599 break; 16548 break;
16600 } 16549 }
16601} 16550}
16602 16551
16552#define alc662_mode2_setup alc663_m51va_setup
16553
16603static void alc662_mode2_inithook(struct hda_codec *codec) 16554static void alc662_mode2_inithook(struct hda_codec *codec)
16604{ 16555{
16605 alc662_f5z_speaker_automute(codec); 16556 alc662_f5z_speaker_automute(codec);
16606 alc662_eeepc_mic_automute(codec); 16557 alc_mic_automute(codec);
16607} 16558}
16608/* ***************** Mode3 ******************************/ 16559/* ***************** Mode3 ******************************/
16609static void alc663_mode3_unsol_event(struct hda_codec *codec, 16560static void alc663_mode3_unsol_event(struct hda_codec *codec,
@@ -16614,15 +16565,17 @@ static void alc663_mode3_unsol_event(struct hda_codec *codec,
16614 alc663_two_hp_m1_speaker_automute(codec); 16565 alc663_two_hp_m1_speaker_automute(codec);
16615 break; 16566 break;
16616 case ALC880_MIC_EVENT: 16567 case ALC880_MIC_EVENT:
16617 alc662_eeepc_mic_automute(codec); 16568 alc_mic_automute(codec);
16618 break; 16569 break;
16619 } 16570 }
16620} 16571}
16621 16572
16573#define alc663_mode3_setup alc663_m51va_setup
16574
16622static void alc663_mode3_inithook(struct hda_codec *codec) 16575static void alc663_mode3_inithook(struct hda_codec *codec)
16623{ 16576{
16624 alc663_two_hp_m1_speaker_automute(codec); 16577 alc663_two_hp_m1_speaker_automute(codec);
16625 alc662_eeepc_mic_automute(codec); 16578 alc_mic_automute(codec);
16626} 16579}
16627/* ***************** Mode4 ******************************/ 16580/* ***************** Mode4 ******************************/
16628static void alc663_mode4_unsol_event(struct hda_codec *codec, 16581static void alc663_mode4_unsol_event(struct hda_codec *codec,
@@ -16633,15 +16586,17 @@ static void alc663_mode4_unsol_event(struct hda_codec *codec,
16633 alc663_21jd_two_speaker_automute(codec); 16586 alc663_21jd_two_speaker_automute(codec);
16634 break; 16587 break;
16635 case ALC880_MIC_EVENT: 16588 case ALC880_MIC_EVENT:
16636 alc662_eeepc_mic_automute(codec); 16589 alc_mic_automute(codec);
16637 break; 16590 break;
16638 } 16591 }
16639} 16592}
16640 16593
16594#define alc663_mode4_setup alc663_m51va_setup
16595
16641static void alc663_mode4_inithook(struct hda_codec *codec) 16596static void alc663_mode4_inithook(struct hda_codec *codec)
16642{ 16597{
16643 alc663_21jd_two_speaker_automute(codec); 16598 alc663_21jd_two_speaker_automute(codec);
16644 alc662_eeepc_mic_automute(codec); 16599 alc_mic_automute(codec);
16645} 16600}
16646/* ***************** Mode5 ******************************/ 16601/* ***************** Mode5 ******************************/
16647static void alc663_mode5_unsol_event(struct hda_codec *codec, 16602static void alc663_mode5_unsol_event(struct hda_codec *codec,
@@ -16652,15 +16607,17 @@ static void alc663_mode5_unsol_event(struct hda_codec *codec,
16652 alc663_15jd_two_speaker_automute(codec); 16607 alc663_15jd_two_speaker_automute(codec);
16653 break; 16608 break;
16654 case ALC880_MIC_EVENT: 16609 case ALC880_MIC_EVENT:
16655 alc662_eeepc_mic_automute(codec); 16610 alc_mic_automute(codec);
16656 break; 16611 break;
16657 } 16612 }
16658} 16613}
16659 16614
16615#define alc663_mode5_setup alc663_m51va_setup
16616
16660static void alc663_mode5_inithook(struct hda_codec *codec) 16617static void alc663_mode5_inithook(struct hda_codec *codec)
16661{ 16618{
16662 alc663_15jd_two_speaker_automute(codec); 16619 alc663_15jd_two_speaker_automute(codec);
16663 alc662_eeepc_mic_automute(codec); 16620 alc_mic_automute(codec);
16664} 16621}
16665/* ***************** Mode6 ******************************/ 16622/* ***************** Mode6 ******************************/
16666static void alc663_mode6_unsol_event(struct hda_codec *codec, 16623static void alc663_mode6_unsol_event(struct hda_codec *codec,
@@ -16671,15 +16628,17 @@ static void alc663_mode6_unsol_event(struct hda_codec *codec,
16671 alc663_two_hp_m2_speaker_automute(codec); 16628 alc663_two_hp_m2_speaker_automute(codec);
16672 break; 16629 break;
16673 case ALC880_MIC_EVENT: 16630 case ALC880_MIC_EVENT:
16674 alc662_eeepc_mic_automute(codec); 16631 alc_mic_automute(codec);
16675 break; 16632 break;
16676 } 16633 }
16677} 16634}
16678 16635
16636#define alc663_mode6_setup alc663_m51va_setup
16637
16679static void alc663_mode6_inithook(struct hda_codec *codec) 16638static void alc663_mode6_inithook(struct hda_codec *codec)
16680{ 16639{
16681 alc663_two_hp_m2_speaker_automute(codec); 16640 alc663_two_hp_m2_speaker_automute(codec);
16682 alc662_eeepc_mic_automute(codec); 16641 alc_mic_automute(codec);
16683} 16642}
16684 16643
16685static void alc663_g71v_hp_automute(struct hda_codec *codec) 16644static void alc663_g71v_hp_automute(struct hda_codec *codec)
@@ -16721,16 +16680,18 @@ static void alc663_g71v_unsol_event(struct hda_codec *codec,
16721 alc663_g71v_front_automute(codec); 16680 alc663_g71v_front_automute(codec);
16722 break; 16681 break;
16723 case ALC880_MIC_EVENT: 16682 case ALC880_MIC_EVENT:
16724 alc662_eeepc_mic_automute(codec); 16683 alc_mic_automute(codec);
16725 break; 16684 break;
16726 } 16685 }
16727} 16686}
16728 16687
16688#define alc663_g71v_setup alc663_m51va_setup
16689
16729static void alc663_g71v_inithook(struct hda_codec *codec) 16690static void alc663_g71v_inithook(struct hda_codec *codec)
16730{ 16691{
16731 alc663_g71v_front_automute(codec); 16692 alc663_g71v_front_automute(codec);
16732 alc663_g71v_hp_automute(codec); 16693 alc663_g71v_hp_automute(codec);
16733 alc662_eeepc_mic_automute(codec); 16694 alc_mic_automute(codec);
16734} 16695}
16735 16696
16736static void alc663_g50v_unsol_event(struct hda_codec *codec, 16697static void alc663_g50v_unsol_event(struct hda_codec *codec,
@@ -16741,15 +16702,17 @@ static void alc663_g50v_unsol_event(struct hda_codec *codec,
16741 alc663_m51va_speaker_automute(codec); 16702 alc663_m51va_speaker_automute(codec);
16742 break; 16703 break;
16743 case ALC880_MIC_EVENT: 16704 case ALC880_MIC_EVENT:
16744 alc662_eeepc_mic_automute(codec); 16705 alc_mic_automute(codec);
16745 break; 16706 break;
16746 } 16707 }
16747} 16708}
16748 16709
16710#define alc663_g50v_setup alc663_m51va_setup
16711
16749static void alc663_g50v_inithook(struct hda_codec *codec) 16712static void alc663_g50v_inithook(struct hda_codec *codec)
16750{ 16713{
16751 alc663_m51va_speaker_automute(codec); 16714 alc663_m51va_speaker_automute(codec);
16752 alc662_eeepc_mic_automute(codec); 16715 alc_mic_automute(codec);
16753} 16716}
16754 16717
16755static struct snd_kcontrol_new alc662_ecs_mixer[] = { 16718static struct snd_kcontrol_new alc662_ecs_mixer[] = {
@@ -16953,8 +16916,8 @@ static struct alc_config_preset alc662_presets[] = {
16953 .dac_nids = alc662_dac_nids, 16916 .dac_nids = alc662_dac_nids,
16954 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 16917 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
16955 .channel_mode = alc662_3ST_2ch_modes, 16918 .channel_mode = alc662_3ST_2ch_modes,
16956 .input_mux = &alc662_eeepc_capture_source,
16957 .unsol_event = alc662_eeepc_unsol_event, 16919 .unsol_event = alc662_eeepc_unsol_event,
16920 .setup = alc662_eeepc_setup,
16958 .init_hook = alc662_eeepc_inithook, 16921 .init_hook = alc662_eeepc_inithook,
16959 }, 16922 },
16960 [ALC662_ASUS_EEEPC_EP20] = { 16923 [ALC662_ASUS_EEEPC_EP20] = {
@@ -16968,6 +16931,7 @@ static struct alc_config_preset alc662_presets[] = {
16968 .channel_mode = alc662_3ST_6ch_modes, 16931 .channel_mode = alc662_3ST_6ch_modes,
16969 .input_mux = &alc662_lenovo_101e_capture_source, 16932 .input_mux = &alc662_lenovo_101e_capture_source,
16970 .unsol_event = alc662_eeepc_unsol_event, 16933 .unsol_event = alc662_eeepc_unsol_event,
16934 .setup = alc662_eeepc_ep20_setup,
16971 .init_hook = alc662_eeepc_ep20_inithook, 16935 .init_hook = alc662_eeepc_ep20_inithook,
16972 }, 16936 },
16973 [ALC662_ECS] = { 16937 [ALC662_ECS] = {
@@ -16978,8 +16942,8 @@ static struct alc_config_preset alc662_presets[] = {
16978 .dac_nids = alc662_dac_nids, 16942 .dac_nids = alc662_dac_nids,
16979 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 16943 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
16980 .channel_mode = alc662_3ST_2ch_modes, 16944 .channel_mode = alc662_3ST_2ch_modes,
16981 .input_mux = &alc662_eeepc_capture_source,
16982 .unsol_event = alc662_eeepc_unsol_event, 16945 .unsol_event = alc662_eeepc_unsol_event,
16946 .setup = alc662_eeepc_setup,
16983 .init_hook = alc662_eeepc_inithook, 16947 .init_hook = alc662_eeepc_inithook,
16984 }, 16948 },
16985 [ALC663_ASUS_M51VA] = { 16949 [ALC663_ASUS_M51VA] = {
@@ -16990,8 +16954,8 @@ static struct alc_config_preset alc662_presets[] = {
16990 .dig_out_nid = ALC662_DIGOUT_NID, 16954 .dig_out_nid = ALC662_DIGOUT_NID,
16991 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 16955 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
16992 .channel_mode = alc662_3ST_2ch_modes, 16956 .channel_mode = alc662_3ST_2ch_modes,
16993 .input_mux = &alc663_m51va_capture_source,
16994 .unsol_event = alc663_m51va_unsol_event, 16957 .unsol_event = alc663_m51va_unsol_event,
16958 .setup = alc663_m51va_setup,
16995 .init_hook = alc663_m51va_inithook, 16959 .init_hook = alc663_m51va_inithook,
16996 }, 16960 },
16997 [ALC663_ASUS_G71V] = { 16961 [ALC663_ASUS_G71V] = {
@@ -17002,8 +16966,8 @@ static struct alc_config_preset alc662_presets[] = {
17002 .dig_out_nid = ALC662_DIGOUT_NID, 16966 .dig_out_nid = ALC662_DIGOUT_NID,
17003 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 16967 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17004 .channel_mode = alc662_3ST_2ch_modes, 16968 .channel_mode = alc662_3ST_2ch_modes,
17005 .input_mux = &alc662_eeepc_capture_source,
17006 .unsol_event = alc663_g71v_unsol_event, 16969 .unsol_event = alc663_g71v_unsol_event,
16970 .setup = alc663_g71v_setup,
17007 .init_hook = alc663_g71v_inithook, 16971 .init_hook = alc663_g71v_inithook,
17008 }, 16972 },
17009 [ALC663_ASUS_H13] = { 16973 [ALC663_ASUS_H13] = {
@@ -17013,7 +16977,6 @@ static struct alc_config_preset alc662_presets[] = {
17013 .dac_nids = alc662_dac_nids, 16977 .dac_nids = alc662_dac_nids,
17014 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 16978 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17015 .channel_mode = alc662_3ST_2ch_modes, 16979 .channel_mode = alc662_3ST_2ch_modes,
17016 .input_mux = &alc663_m51va_capture_source,
17017 .unsol_event = alc663_m51va_unsol_event, 16980 .unsol_event = alc663_m51va_unsol_event,
17018 .init_hook = alc663_m51va_inithook, 16981 .init_hook = alc663_m51va_inithook,
17019 }, 16982 },
@@ -17027,6 +16990,7 @@ static struct alc_config_preset alc662_presets[] = {
17027 .channel_mode = alc662_3ST_6ch_modes, 16990 .channel_mode = alc662_3ST_6ch_modes,
17028 .input_mux = &alc663_capture_source, 16991 .input_mux = &alc663_capture_source,
17029 .unsol_event = alc663_g50v_unsol_event, 16992 .unsol_event = alc663_g50v_unsol_event,
16993 .setup = alc663_g50v_setup,
17030 .init_hook = alc663_g50v_inithook, 16994 .init_hook = alc663_g50v_inithook,
17031 }, 16995 },
17032 [ALC663_ASUS_MODE1] = { 16996 [ALC663_ASUS_MODE1] = {
@@ -17040,8 +17004,8 @@ static struct alc_config_preset alc662_presets[] = {
17040 .dig_out_nid = ALC662_DIGOUT_NID, 17004 .dig_out_nid = ALC662_DIGOUT_NID,
17041 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17005 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17042 .channel_mode = alc662_3ST_2ch_modes, 17006 .channel_mode = alc662_3ST_2ch_modes,
17043 .input_mux = &alc662_eeepc_capture_source,
17044 .unsol_event = alc663_mode1_unsol_event, 17007 .unsol_event = alc663_mode1_unsol_event,
17008 .setup = alc663_mode1_setup,
17045 .init_hook = alc663_mode1_inithook, 17009 .init_hook = alc663_mode1_inithook,
17046 }, 17010 },
17047 [ALC662_ASUS_MODE2] = { 17011 [ALC662_ASUS_MODE2] = {
@@ -17054,8 +17018,8 @@ static struct alc_config_preset alc662_presets[] = {
17054 .dig_out_nid = ALC662_DIGOUT_NID, 17018 .dig_out_nid = ALC662_DIGOUT_NID,
17055 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17019 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17056 .channel_mode = alc662_3ST_2ch_modes, 17020 .channel_mode = alc662_3ST_2ch_modes,
17057 .input_mux = &alc662_eeepc_capture_source,
17058 .unsol_event = alc662_mode2_unsol_event, 17021 .unsol_event = alc662_mode2_unsol_event,
17022 .setup = alc662_mode2_setup,
17059 .init_hook = alc662_mode2_inithook, 17023 .init_hook = alc662_mode2_inithook,
17060 }, 17024 },
17061 [ALC663_ASUS_MODE3] = { 17025 [ALC663_ASUS_MODE3] = {
@@ -17069,8 +17033,8 @@ static struct alc_config_preset alc662_presets[] = {
17069 .dig_out_nid = ALC662_DIGOUT_NID, 17033 .dig_out_nid = ALC662_DIGOUT_NID,
17070 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17034 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17071 .channel_mode = alc662_3ST_2ch_modes, 17035 .channel_mode = alc662_3ST_2ch_modes,
17072 .input_mux = &alc662_eeepc_capture_source,
17073 .unsol_event = alc663_mode3_unsol_event, 17036 .unsol_event = alc663_mode3_unsol_event,
17037 .setup = alc663_mode3_setup,
17074 .init_hook = alc663_mode3_inithook, 17038 .init_hook = alc663_mode3_inithook,
17075 }, 17039 },
17076 [ALC663_ASUS_MODE4] = { 17040 [ALC663_ASUS_MODE4] = {
@@ -17084,8 +17048,8 @@ static struct alc_config_preset alc662_presets[] = {
17084 .dig_out_nid = ALC662_DIGOUT_NID, 17048 .dig_out_nid = ALC662_DIGOUT_NID,
17085 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17049 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17086 .channel_mode = alc662_3ST_2ch_modes, 17050 .channel_mode = alc662_3ST_2ch_modes,
17087 .input_mux = &alc662_eeepc_capture_source,
17088 .unsol_event = alc663_mode4_unsol_event, 17051 .unsol_event = alc663_mode4_unsol_event,
17052 .setup = alc663_mode4_setup,
17089 .init_hook = alc663_mode4_inithook, 17053 .init_hook = alc663_mode4_inithook,
17090 }, 17054 },
17091 [ALC663_ASUS_MODE5] = { 17055 [ALC663_ASUS_MODE5] = {
@@ -17099,8 +17063,8 @@ static struct alc_config_preset alc662_presets[] = {
17099 .dig_out_nid = ALC662_DIGOUT_NID, 17063 .dig_out_nid = ALC662_DIGOUT_NID,
17100 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17064 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17101 .channel_mode = alc662_3ST_2ch_modes, 17065 .channel_mode = alc662_3ST_2ch_modes,
17102 .input_mux = &alc662_eeepc_capture_source,
17103 .unsol_event = alc663_mode5_unsol_event, 17066 .unsol_event = alc663_mode5_unsol_event,
17067 .setup = alc663_mode5_setup,
17104 .init_hook = alc663_mode5_inithook, 17068 .init_hook = alc663_mode5_inithook,
17105 }, 17069 },
17106 [ALC663_ASUS_MODE6] = { 17070 [ALC663_ASUS_MODE6] = {
@@ -17114,8 +17078,8 @@ static struct alc_config_preset alc662_presets[] = {
17114 .dig_out_nid = ALC662_DIGOUT_NID, 17078 .dig_out_nid = ALC662_DIGOUT_NID,
17115 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17079 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17116 .channel_mode = alc662_3ST_2ch_modes, 17080 .channel_mode = alc662_3ST_2ch_modes,
17117 .input_mux = &alc662_eeepc_capture_source,
17118 .unsol_event = alc663_mode6_unsol_event, 17081 .unsol_event = alc663_mode6_unsol_event,
17082 .setup = alc663_mode6_setup,
17119 .init_hook = alc663_mode6_inithook, 17083 .init_hook = alc663_mode6_inithook,
17120 }, 17084 },
17121 [ALC272_DELL] = { 17085 [ALC272_DELL] = {
@@ -17129,8 +17093,8 @@ static struct alc_config_preset alc662_presets[] = {
17129 .num_adc_nids = ARRAY_SIZE(alc272_adc_nids), 17093 .num_adc_nids = ARRAY_SIZE(alc272_adc_nids),
17130 .capsrc_nids = alc272_capsrc_nids, 17094 .capsrc_nids = alc272_capsrc_nids,
17131 .channel_mode = alc662_3ST_2ch_modes, 17095 .channel_mode = alc662_3ST_2ch_modes,
17132 .input_mux = &alc663_m51va_capture_source,
17133 .unsol_event = alc663_m51va_unsol_event, 17096 .unsol_event = alc663_m51va_unsol_event,
17097 .setup = alc663_m51va_setup,
17134 .init_hook = alc663_m51va_inithook, 17098 .init_hook = alc663_m51va_inithook,
17135 }, 17099 },
17136 [ALC272_DELL_ZM1] = { 17100 [ALC272_DELL_ZM1] = {
@@ -17141,11 +17105,11 @@ static struct alc_config_preset alc662_presets[] = {
17141 .dac_nids = alc662_dac_nids, 17105 .dac_nids = alc662_dac_nids,
17142 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17106 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17143 .adc_nids = alc662_adc_nids, 17107 .adc_nids = alc662_adc_nids,
17144 .num_adc_nids = ARRAY_SIZE(alc662_adc_nids), 17108 .num_adc_nids = 1,
17145 .capsrc_nids = alc662_capsrc_nids, 17109 .capsrc_nids = alc662_capsrc_nids,
17146 .channel_mode = alc662_3ST_2ch_modes, 17110 .channel_mode = alc662_3ST_2ch_modes,
17147 .input_mux = &alc663_m51va_capture_source,
17148 .unsol_event = alc663_m51va_unsol_event, 17111 .unsol_event = alc663_m51va_unsol_event,
17112 .setup = alc663_m51va_setup,
17149 .init_hook = alc663_m51va_inithook, 17113 .init_hook = alc663_m51va_inithook,
17150 }, 17114 },
17151 [ALC272_SAMSUNG_NC10] = { 17115 [ALC272_SAMSUNG_NC10] = {
@@ -17156,8 +17120,9 @@ static struct alc_config_preset alc662_presets[] = {
17156 .dac_nids = alc272_dac_nids, 17120 .dac_nids = alc272_dac_nids,
17157 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes), 17121 .num_channel_mode = ARRAY_SIZE(alc662_3ST_2ch_modes),
17158 .channel_mode = alc662_3ST_2ch_modes, 17122 .channel_mode = alc662_3ST_2ch_modes,
17159 .input_mux = &alc272_nc10_capture_source, 17123 /*.input_mux = &alc272_nc10_capture_source,*/
17160 .unsol_event = alc663_mode4_unsol_event, 17124 .unsol_event = alc663_mode4_unsol_event,
17125 .setup = alc663_mode4_setup,
17161 .init_hook = alc663_mode4_inithook, 17126 .init_hook = alc663_mode4_inithook,
17162 }, 17127 },
17163}; 17128};
@@ -17209,13 +17174,25 @@ static int alc662_auto_create_multi_out_ctls(struct alc_spec *spec,
17209 if (err < 0) 17174 if (err < 0)
17210 return err; 17175 return err;
17211 } else { 17176 } else {
17212 sprintf(name, "%s Playback Volume", chname[i]); 17177 const char *pfx;
17178 if (cfg->line_outs == 1 &&
17179 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
17180 if (!cfg->hp_pins)
17181 pfx = "Speaker";
17182 else
17183 pfx = "PCM";
17184 } else
17185 pfx = chname[i];
17186 sprintf(name, "%s Playback Volume", pfx);
17213 err = add_control(spec, ALC_CTL_WIDGET_VOL, name, 17187 err = add_control(spec, ALC_CTL_WIDGET_VOL, name,
17214 HDA_COMPOSE_AMP_VAL(nid, 3, 0, 17188 HDA_COMPOSE_AMP_VAL(nid, 3, 0,
17215 HDA_OUTPUT)); 17189 HDA_OUTPUT));
17216 if (err < 0) 17190 if (err < 0)
17217 return err; 17191 return err;
17218 sprintf(name, "%s Playback Switch", chname[i]); 17192 if (cfg->line_outs == 1 &&
17193 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
17194 pfx = "Speaker";
17195 sprintf(name, "%s Playback Switch", pfx);
17219 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name, 17196 err = add_control(spec, ALC_CTL_WIDGET_MUTE, name,
17220 HDA_COMPOSE_AMP_VAL(alc880_idx_to_mixer(i), 17197 HDA_COMPOSE_AMP_VAL(alc880_idx_to_mixer(i),
17221 3, 0, HDA_INPUT)); 17198 3, 0, HDA_INPUT));
@@ -17277,62 +17254,9 @@ static int alc662_auto_create_extra_out(struct alc_spec *spec, hda_nid_t pin,
17277 return 0; 17254 return 0;
17278} 17255}
17279 17256
17280/* return the index of the src widget from the connection list of the nid.
17281 * return -1 if not found
17282 */
17283static int alc662_input_pin_idx(struct hda_codec *codec, hda_nid_t nid,
17284 hda_nid_t src)
17285{
17286 hda_nid_t conn_list[HDA_MAX_CONNECTIONS];
17287 int i, conns;
17288
17289 conns = snd_hda_get_connections(codec, nid, conn_list,
17290 ARRAY_SIZE(conn_list));
17291 if (conns < 0)
17292 return -1;
17293 for (i = 0; i < conns; i++)
17294 if (conn_list[i] == src)
17295 return i;
17296 return -1;
17297}
17298
17299static int alc662_is_input_pin(struct hda_codec *codec, hda_nid_t nid)
17300{
17301 unsigned int pincap = snd_hda_query_pin_caps(codec, nid);
17302 return (pincap & AC_PINCAP_IN) != 0;
17303}
17304
17305/* create playback/capture controls for input pins */ 17257/* create playback/capture controls for input pins */
17306static int alc662_auto_create_analog_input_ctls(struct hda_codec *codec, 17258#define alc662_auto_create_input_ctls \
17307 const struct auto_pin_cfg *cfg) 17259 alc880_auto_create_input_ctls
17308{
17309 struct alc_spec *spec = codec->spec;
17310 struct hda_input_mux *imux = &spec->private_imux[0];
17311 int i, err, idx;
17312
17313 for (i = 0; i < AUTO_PIN_LAST; i++) {
17314 if (alc662_is_input_pin(codec, cfg->input_pins[i])) {
17315 idx = alc662_input_pin_idx(codec, 0x0b,
17316 cfg->input_pins[i]);
17317 if (idx >= 0) {
17318 err = new_analog_input(spec, cfg->input_pins[i],
17319 auto_pin_cfg_labels[i],
17320 idx, 0x0b);
17321 if (err < 0)
17322 return err;
17323 }
17324 idx = alc662_input_pin_idx(codec, 0x22,
17325 cfg->input_pins[i]);
17326 if (idx >= 0) {
17327 imux->items[imux->num_items].label =
17328 auto_pin_cfg_labels[i];
17329 imux->items[imux->num_items].index = idx;
17330 imux->num_items++;
17331 }
17332 }
17333 }
17334 return 0;
17335}
17336 17260
17337static void alc662_auto_set_output_and_unmute(struct hda_codec *codec, 17261static void alc662_auto_set_output_and_unmute(struct hda_codec *codec,
17338 hda_nid_t nid, int pin_type, 17262 hda_nid_t nid, int pin_type,
@@ -17386,7 +17310,7 @@ static void alc662_auto_init_analog_input(struct hda_codec *codec)
17386 17310
17387 for (i = 0; i < AUTO_PIN_LAST; i++) { 17311 for (i = 0; i < AUTO_PIN_LAST; i++) {
17388 hda_nid_t nid = spec->autocfg.input_pins[i]; 17312 hda_nid_t nid = spec->autocfg.input_pins[i];
17389 if (alc662_is_input_pin(codec, nid)) { 17313 if (alc_is_input_pin(codec, nid)) {
17390 alc_set_input_pin(codec, nid, i); 17314 alc_set_input_pin(codec, nid, i);
17391 if (nid != ALC662_PIN_CD_NID && 17315 if (nid != ALC662_PIN_CD_NID &&
17392 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP)) 17316 (get_wcaps(codec, nid) & AC_WCAP_OUT_AMP))
@@ -17427,7 +17351,7 @@ static int alc662_parse_auto_config(struct hda_codec *codec)
17427 "Headphone"); 17351 "Headphone");
17428 if (err < 0) 17352 if (err < 0)
17429 return err; 17353 return err;
17430 err = alc662_auto_create_analog_input_ctls(codec, &spec->autocfg); 17354 err = alc662_auto_create_input_ctls(codec, &spec->autocfg);
17431 if (err < 0) 17355 if (err < 0)
17432 return err; 17356 return err;
17433 17357
@@ -17484,8 +17408,8 @@ static int patch_alc662(struct hda_codec *codec)
17484 alc662_models, 17408 alc662_models,
17485 alc662_cfg_tbl); 17409 alc662_cfg_tbl);
17486 if (board_config < 0) { 17410 if (board_config < 0) {
17487 printk(KERN_INFO "hda_codec: Unknown model for %s, " 17411 printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
17488 "trying auto-probe from BIOS...\n", codec->chip_name); 17412 codec->chip_name);
17489 board_config = ALC662_AUTO; 17413 board_config = ALC662_AUTO;
17490 } 17414 }
17491 17415
@@ -17510,7 +17434,7 @@ static int patch_alc662(struct hda_codec *codec)
17510 } 17434 }
17511 17435
17512 if (board_config != ALC662_AUTO) 17436 if (board_config != ALC662_AUTO)
17513 setup_preset(spec, &alc662_presets[board_config]); 17437 setup_preset(codec, &alc662_presets[board_config]);
17514 17438
17515 spec->stream_analog_playback = &alc662_pcm_analog_playback; 17439 spec->stream_analog_playback = &alc662_pcm_analog_playback;
17516 spec->stream_analog_capture = &alc662_pcm_analog_capture; 17440 spec->stream_analog_capture = &alc662_pcm_analog_capture;
@@ -17526,7 +17450,7 @@ static int patch_alc662(struct hda_codec *codec)
17526 spec->capsrc_nids = alc662_capsrc_nids; 17450 spec->capsrc_nids = alc662_capsrc_nids;
17527 17451
17528 if (!spec->cap_mixer) 17452 if (!spec->cap_mixer)
17529 set_capture_mixer(spec); 17453 set_capture_mixer(codec);
17530 if (codec->vendor_id == 0x10ec0662) 17454 if (codec->vendor_id == 0x10ec0662)
17531 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT); 17455 set_beep_amp(spec, 0x0b, 0x05, HDA_INPUT);
17532 else 17456 else
@@ -17562,23 +17486,23 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
17562 { .id = 0x10ec0861, .name = "ALC861", .patch = patch_alc861 }, 17486 { .id = 0x10ec0861, .name = "ALC861", .patch = patch_alc861 },
17563 { .id = 0x10ec0862, .name = "ALC861-VD", .patch = patch_alc861vd }, 17487 { .id = 0x10ec0862, .name = "ALC861-VD", .patch = patch_alc861vd },
17564 { .id = 0x10ec0662, .rev = 0x100002, .name = "ALC662 rev2", 17488 { .id = 0x10ec0662, .rev = 0x100002, .name = "ALC662 rev2",
17565 .patch = patch_alc883 }, 17489 .patch = patch_alc882 },
17566 { .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1", 17490 { .id = 0x10ec0662, .rev = 0x100101, .name = "ALC662 rev1",
17567 .patch = patch_alc662 }, 17491 .patch = patch_alc662 },
17568 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 }, 17492 { .id = 0x10ec0663, .name = "ALC663", .patch = patch_alc662 },
17569 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, 17493 { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
17570 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, 17494 { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
17571 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc883 }, 17495 { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
17572 { .id = 0x10ec0885, .rev = 0x100101, .name = "ALC889A", 17496 { .id = 0x10ec0885, .rev = 0x100101, .name = "ALC889A",
17573 .patch = patch_alc882 }, /* should be patch_alc883() in future */ 17497 .patch = patch_alc882 },
17574 { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", 17498 { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
17575 .patch = patch_alc882 }, /* should be patch_alc883() in future */ 17499 .patch = patch_alc882 },
17576 { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, 17500 { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
17577 { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc883 }, 17501 { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 },
17578 { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", 17502 { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
17579 .patch = patch_alc883 }, 17503 .patch = patch_alc882 },
17580 { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc883 }, 17504 { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc882 },
17581 { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc883 }, 17505 { .id = 0x10ec0889, .name = "ALC889", .patch = patch_alc882 },
17582 {} /* terminator */ 17506 {} /* terminator */
17583}; 17507};
17584 17508
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6990cfcb6a38..e31e53dc6962 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -40,6 +40,8 @@ enum {
40 STAC_INSERT_EVENT, 40 STAC_INSERT_EVENT,
41 STAC_PWR_EVENT, 41 STAC_PWR_EVENT,
42 STAC_HP_EVENT, 42 STAC_HP_EVENT,
43 STAC_LO_EVENT,
44 STAC_MIC_EVENT,
43}; 45};
44 46
45enum { 47enum {
@@ -81,6 +83,7 @@ enum {
81 STAC_DELL_M6_DMIC, 83 STAC_DELL_M6_DMIC,
82 STAC_DELL_M6_BOTH, 84 STAC_DELL_M6_BOTH,
83 STAC_DELL_EQ, 85 STAC_DELL_EQ,
86 STAC_ALIENWARE_M17X,
84 STAC_92HD73XX_MODELS 87 STAC_92HD73XX_MODELS
85}; 88};
86 89
@@ -177,6 +180,12 @@ struct sigmatel_jack {
177 struct snd_jack *jack; 180 struct snd_jack *jack;
178}; 181};
179 182
183struct sigmatel_mic_route {
184 hda_nid_t pin;
185 unsigned char mux_idx;
186 unsigned char dmux_idx;
187};
188
180struct sigmatel_spec { 189struct sigmatel_spec {
181 struct snd_kcontrol_new *mixers[4]; 190 struct snd_kcontrol_new *mixers[4];
182 unsigned int num_mixers; 191 unsigned int num_mixers;
@@ -188,6 +197,7 @@ struct sigmatel_spec {
188 unsigned int hp_detect: 1; 197 unsigned int hp_detect: 1;
189 unsigned int spdif_mute: 1; 198 unsigned int spdif_mute: 1;
190 unsigned int check_volume_offset:1; 199 unsigned int check_volume_offset:1;
200 unsigned int auto_mic:1;
191 201
192 /* gpio lines */ 202 /* gpio lines */
193 unsigned int eapd_mask; 203 unsigned int eapd_mask;
@@ -219,7 +229,6 @@ struct sigmatel_spec {
219 229
220 /* playback */ 230 /* playback */
221 struct hda_input_mux *mono_mux; 231 struct hda_input_mux *mono_mux;
222 struct hda_input_mux *amp_mux;
223 unsigned int cur_mmux; 232 unsigned int cur_mmux;
224 struct hda_multi_out multiout; 233 struct hda_multi_out multiout;
225 hda_nid_t dac_nids[5]; 234 hda_nid_t dac_nids[5];
@@ -239,6 +248,15 @@ struct sigmatel_spec {
239 unsigned int num_dmuxes; 248 unsigned int num_dmuxes;
240 hda_nid_t *smux_nids; 249 hda_nid_t *smux_nids;
241 unsigned int num_smuxes; 250 unsigned int num_smuxes;
251 unsigned int num_analog_muxes;
252
253 unsigned long *capvols; /* amp-volume attr: HDA_COMPOSE_AMP_VAL() */
254 unsigned long *capsws; /* amp-mute attr: HDA_COMPOSE_AMP_VAL() */
255 unsigned int num_caps; /* number of capture volume/switch elements */
256
257 struct sigmatel_mic_route ext_mic;
258 struct sigmatel_mic_route int_mic;
259
242 const char **spdif_labels; 260 const char **spdif_labels;
243 261
244 hda_nid_t dig_in_nid; 262 hda_nid_t dig_in_nid;
@@ -263,7 +281,6 @@ struct sigmatel_spec {
263 unsigned int cur_smux[2]; 281 unsigned int cur_smux[2];
264 unsigned int cur_amux; 282 unsigned int cur_amux;
265 hda_nid_t *amp_nids; 283 hda_nid_t *amp_nids;
266 unsigned int num_amps;
267 unsigned int powerdown_adcs; 284 unsigned int powerdown_adcs;
268 285
269 /* i/o switches */ 286 /* i/o switches */
@@ -282,7 +299,6 @@ struct sigmatel_spec {
282 struct hda_input_mux private_dimux; 299 struct hda_input_mux private_dimux;
283 struct hda_input_mux private_imux; 300 struct hda_input_mux private_imux;
284 struct hda_input_mux private_smux; 301 struct hda_input_mux private_smux;
285 struct hda_input_mux private_amp_mux;
286 struct hda_input_mux private_mono_mux; 302 struct hda_input_mux private_mono_mux;
287}; 303};
288 304
@@ -311,11 +327,6 @@ static hda_nid_t stac92hd73xx_adc_nids[2] = {
311 0x1a, 0x1b 327 0x1a, 0x1b
312}; 328};
313 329
314#define DELL_M6_AMP 2
315static hda_nid_t stac92hd73xx_amp_nids[3] = {
316 0x0b, 0x0c, 0x0e
317};
318
319#define STAC92HD73XX_NUM_DMICS 2 330#define STAC92HD73XX_NUM_DMICS 2
320static hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = { 331static hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = {
321 0x13, 0x14, 0 332 0x13, 0x14, 0
@@ -323,8 +334,8 @@ static hda_nid_t stac92hd73xx_dmic_nids[STAC92HD73XX_NUM_DMICS + 1] = {
323 334
324#define STAC92HD73_DAC_COUNT 5 335#define STAC92HD73_DAC_COUNT 5
325 336
326static hda_nid_t stac92hd73xx_mux_nids[4] = { 337static hda_nid_t stac92hd73xx_mux_nids[2] = {
327 0x28, 0x29, 0x2a, 0x2b, 338 0x20, 0x21,
328}; 339};
329 340
330static hda_nid_t stac92hd73xx_dmux_nids[2] = { 341static hda_nid_t stac92hd73xx_dmux_nids[2] = {
@@ -335,14 +346,16 @@ static hda_nid_t stac92hd73xx_smux_nids[2] = {
335 0x22, 0x23, 346 0x22, 0x23,
336}; 347};
337 348
338#define STAC92HD83XXX_NUM_DMICS 2 349#define STAC92HD73XX_NUM_CAPS 2
339static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = { 350static unsigned long stac92hd73xx_capvols[] = {
340 0x11, 0x12, 0 351 HDA_COMPOSE_AMP_VAL(0x20, 3, 0, HDA_OUTPUT),
352 HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
341}; 353};
354#define stac92hd73xx_capsws stac92hd73xx_capvols
342 355
343#define STAC92HD83_DAC_COUNT 3 356#define STAC92HD83_DAC_COUNT 3
344 357
345static hda_nid_t stac92hd83xxx_dmux_nids[2] = { 358static hda_nid_t stac92hd83xxx_mux_nids[2] = {
346 0x17, 0x18, 359 0x17, 0x18,
347}; 360};
348 361
@@ -362,9 +375,12 @@ static unsigned int stac92hd83xxx_pwr_mapping[4] = {
362 0x03, 0x0c, 0x20, 0x40, 375 0x03, 0x0c, 0x20, 0x40,
363}; 376};
364 377
365static hda_nid_t stac92hd83xxx_amp_nids[1] = { 378#define STAC92HD83XXX_NUM_CAPS 2
366 0xc, 379static unsigned long stac92hd83xxx_capvols[] = {
380 HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT),
381 HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_OUTPUT),
367}; 382};
383#define stac92hd83xxx_capsws stac92hd83xxx_capvols
368 384
369static hda_nid_t stac92hd71bxx_pwr_nids[3] = { 385static hda_nid_t stac92hd71bxx_pwr_nids[3] = {
370 0x0a, 0x0d, 0x0f 386 0x0a, 0x0d, 0x0f
@@ -395,6 +411,13 @@ static hda_nid_t stac92hd71bxx_slave_dig_outs[2] = {
395 0x22, 0 411 0x22, 0
396}; 412};
397 413
414#define STAC92HD71BXX_NUM_CAPS 2
415static unsigned long stac92hd71bxx_capvols[] = {
416 HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT),
417 HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
418};
419#define stac92hd71bxx_capsws stac92hd71bxx_capvols
420
398static hda_nid_t stac925x_adc_nids[1] = { 421static hda_nid_t stac925x_adc_nids[1] = {
399 0x03, 422 0x03,
400}; 423};
@@ -416,6 +439,13 @@ static hda_nid_t stac925x_dmux_nids[1] = {
416 0x14, 439 0x14,
417}; 440};
418 441
442static unsigned long stac925x_capvols[] = {
443 HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_OUTPUT),
444};
445static unsigned long stac925x_capsws[] = {
446 HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
447};
448
419static hda_nid_t stac922x_adc_nids[2] = { 449static hda_nid_t stac922x_adc_nids[2] = {
420 0x06, 0x07, 450 0x06, 0x07,
421}; 451};
@@ -424,6 +454,13 @@ static hda_nid_t stac922x_mux_nids[2] = {
424 0x12, 0x13, 454 0x12, 0x13,
425}; 455};
426 456
457#define STAC922X_NUM_CAPS 2
458static unsigned long stac922x_capvols[] = {
459 HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_INPUT),
460 HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT),
461};
462#define stac922x_capsws stac922x_capvols
463
427static hda_nid_t stac927x_slave_dig_outs[2] = { 464static hda_nid_t stac927x_slave_dig_outs[2] = {
428 0x1f, 0, 465 0x1f, 0,
429}; 466};
@@ -453,6 +490,18 @@ static hda_nid_t stac927x_dmic_nids[STAC927X_NUM_DMICS + 1] = {
453 0x13, 0x14, 0 490 0x13, 0x14, 0
454}; 491};
455 492
493#define STAC927X_NUM_CAPS 3
494static unsigned long stac927x_capvols[] = {
495 HDA_COMPOSE_AMP_VAL(0x18, 3, 0, HDA_INPUT),
496 HDA_COMPOSE_AMP_VAL(0x19, 3, 0, HDA_INPUT),
497 HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_INPUT),
498};
499static unsigned long stac927x_capsws[] = {
500 HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
501 HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_OUTPUT),
502 HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
503};
504
456static const char *stac927x_spdif_labels[5] = { 505static const char *stac927x_spdif_labels[5] = {
457 "Digital Playback", "ADAT", "Analog Mux 1", 506 "Digital Playback", "ADAT", "Analog Mux 1",
458 "Analog Mux 2", "Analog Mux 3" 507 "Analog Mux 2", "Analog Mux 3"
@@ -479,6 +528,16 @@ static hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = {
479 0x17, 0x18, 0 528 0x17, 0x18, 0
480}; 529};
481 530
531#define STAC9205_NUM_CAPS 2
532static unsigned long stac9205_capvols[] = {
533 HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_INPUT),
534 HDA_COMPOSE_AMP_VAL(0x1c, 3, 0, HDA_INPUT),
535};
536static unsigned long stac9205_capsws[] = {
537 HDA_COMPOSE_AMP_VAL(0x1d, 3, 0, HDA_OUTPUT),
538 HDA_COMPOSE_AMP_VAL(0x1e, 3, 0, HDA_OUTPUT),
539};
540
482static hda_nid_t stac9200_pin_nids[8] = { 541static hda_nid_t stac9200_pin_nids[8] = {
483 0x08, 0x09, 0x0d, 0x0e, 542 0x08, 0x09, 0x0d, 0x0e,
484 0x0f, 0x10, 0x11, 0x12, 543 0x0f, 0x10, 0x11, 0x12,
@@ -529,34 +588,6 @@ static hda_nid_t stac9205_pin_nids[12] = {
529 0x21, 0x22, 588 0x21, 0x22,
530}; 589};
531 590
532#define stac92xx_amp_volume_info snd_hda_mixer_amp_volume_info
533
534static int stac92xx_amp_volume_get(struct snd_kcontrol *kcontrol,
535 struct snd_ctl_elem_value *ucontrol)
536{
537 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
538 struct sigmatel_spec *spec = codec->spec;
539 hda_nid_t nid = spec->amp_nids[spec->cur_amux];
540
541 kcontrol->private_value ^= get_amp_nid(kcontrol);
542 kcontrol->private_value |= nid;
543
544 return snd_hda_mixer_amp_volume_get(kcontrol, ucontrol);
545}
546
547static int stac92xx_amp_volume_put(struct snd_kcontrol *kcontrol,
548 struct snd_ctl_elem_value *ucontrol)
549{
550 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
551 struct sigmatel_spec *spec = codec->spec;
552 hda_nid_t nid = spec->amp_nids[spec->cur_amux];
553
554 kcontrol->private_value ^= get_amp_nid(kcontrol);
555 kcontrol->private_value |= nid;
556
557 return snd_hda_mixer_amp_volume_put(kcontrol, ucontrol);
558}
559
560static int stac92xx_dmux_enum_info(struct snd_kcontrol *kcontrol, 591static int stac92xx_dmux_enum_info(struct snd_kcontrol *kcontrol,
561 struct snd_ctl_elem_info *uinfo) 592 struct snd_ctl_elem_info *uinfo)
562{ 593{
@@ -693,9 +724,35 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
693 struct hda_codec *codec = snd_kcontrol_chip(kcontrol); 724 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
694 struct sigmatel_spec *spec = codec->spec; 725 struct sigmatel_spec *spec = codec->spec;
695 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); 726 unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
696 727 const struct hda_input_mux *imux = spec->input_mux;
697 return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, 728 unsigned int idx, prev_idx;
698 spec->mux_nids[adc_idx], &spec->cur_mux[adc_idx]); 729
730 idx = ucontrol->value.enumerated.item[0];
731 if (idx >= imux->num_items)
732 idx = imux->num_items - 1;
733 prev_idx = spec->cur_mux[adc_idx];
734 if (prev_idx == idx)
735 return 0;
736 if (idx < spec->num_analog_muxes) {
737 snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0,
738 AC_VERB_SET_CONNECT_SEL,
739 imux->items[idx].index);
740 if (prev_idx >= spec->num_analog_muxes) {
741 imux = spec->dinput_mux;
742 /* 0 = analog */
743 snd_hda_codec_write_cache(codec,
744 spec->dmux_nids[adc_idx], 0,
745 AC_VERB_SET_CONNECT_SEL,
746 imux->items[0].index);
747 }
748 } else {
749 imux = spec->dinput_mux;
750 snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0,
751 AC_VERB_SET_CONNECT_SEL,
752 imux->items[idx - 1].index);
753 }
754 spec->cur_mux[adc_idx] = idx;
755 return 1;
699} 756}
700 757
701static int stac92xx_mono_mux_enum_info(struct snd_kcontrol *kcontrol, 758static int stac92xx_mono_mux_enum_info(struct snd_kcontrol *kcontrol,
@@ -726,41 +783,6 @@ static int stac92xx_mono_mux_enum_put(struct snd_kcontrol *kcontrol,
726 spec->mono_nid, &spec->cur_mmux); 783 spec->mono_nid, &spec->cur_mmux);
727} 784}
728 785
729static int stac92xx_amp_mux_enum_info(struct snd_kcontrol *kcontrol,
730 struct snd_ctl_elem_info *uinfo)
731{
732 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
733 struct sigmatel_spec *spec = codec->spec;
734 return snd_hda_input_mux_info(spec->amp_mux, uinfo);
735}
736
737static int stac92xx_amp_mux_enum_get(struct snd_kcontrol *kcontrol,
738 struct snd_ctl_elem_value *ucontrol)
739{
740 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
741 struct sigmatel_spec *spec = codec->spec;
742
743 ucontrol->value.enumerated.item[0] = spec->cur_amux;
744 return 0;
745}
746
747static int stac92xx_amp_mux_enum_put(struct snd_kcontrol *kcontrol,
748 struct snd_ctl_elem_value *ucontrol)
749{
750 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
751 struct sigmatel_spec *spec = codec->spec;
752 struct snd_kcontrol *ctl =
753 snd_hda_find_mixer_ctl(codec, "Amp Capture Volume");
754 if (!ctl)
755 return -EINVAL;
756
757 snd_ctl_notify(codec->bus->card, SNDRV_CTL_EVENT_MASK_VALUE |
758 SNDRV_CTL_EVENT_MASK_INFO, &ctl->id);
759
760 return snd_hda_input_mux_put(codec, spec->amp_mux, ucontrol,
761 0, &spec->cur_amux);
762}
763
764#define stac92xx_aloopback_info snd_ctl_boolean_mono_info 786#define stac92xx_aloopback_info snd_ctl_boolean_mono_info
765 787
766static int stac92xx_aloopback_get(struct snd_kcontrol *kcontrol, 788static int stac92xx_aloopback_get(struct snd_kcontrol *kcontrol,
@@ -828,84 +850,16 @@ static struct hda_verb stac9200_eapd_init[] = {
828 {} 850 {}
829}; 851};
830 852
831static struct hda_verb stac92hd73xx_6ch_core_init[] = {
832 /* set master volume and direct control */
833 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
834 /* setup adcs to point to mixer */
835 { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
836 { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
837 { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
838 { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
839 { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
840 /* setup import muxs */
841 { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
842 { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
843 { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
844 { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
845 {}
846};
847
848static struct hda_verb dell_eq_core_init[] = { 853static struct hda_verb dell_eq_core_init[] = {
849 /* set master volume to max value without distortion 854 /* set master volume to max value without distortion
850 * and direct control */ 855 * and direct control */
851 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec}, 856 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec},
852 /* setup adcs to point to mixer */
853 { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
854 { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
855 /* setup import muxs */
856 { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
857 { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
858 { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
859 { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
860 {}
861};
862
863static struct hda_verb dell_m6_core_init[] = {
864 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
865 /* setup adcs to point to mixer */
866 { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
867 { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
868 /* setup import muxs */
869 { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
870 { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
871 { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
872 { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x00},
873 {}
874};
875
876static struct hda_verb stac92hd73xx_8ch_core_init[] = {
877 /* set master volume and direct control */
878 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
879 /* setup adcs to point to mixer */
880 { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
881 { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
882 { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
883 { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
884 { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
885 /* setup import muxs */
886 { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
887 { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
888 { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
889 { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x03},
890 {} 857 {}
891}; 858};
892 859
893static struct hda_verb stac92hd73xx_10ch_core_init[] = { 860static struct hda_verb stac92hd73xx_core_init[] = {
894 /* set master volume and direct control */ 861 /* set master volume and direct control */
895 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff}, 862 { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
896 /* dac3 is connected to import3 mux */
897 { 0x18, AC_VERB_SET_AMP_GAIN_MUTE, 0xb07f},
898 /* setup adcs to point to mixer */
899 { 0x20, AC_VERB_SET_CONNECT_SEL, 0x0b},
900 { 0x21, AC_VERB_SET_CONNECT_SEL, 0x0b},
901 { 0x0f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
902 { 0x10, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
903 { 0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
904 /* setup import muxs */
905 { 0x28, AC_VERB_SET_CONNECT_SEL, 0x01},
906 { 0x29, AC_VERB_SET_CONNECT_SEL, 0x01},
907 { 0x2a, AC_VERB_SET_CONNECT_SEL, 0x01},
908 { 0x2b, AC_VERB_SET_CONNECT_SEL, 0x03},
909 {} 863 {}
910}; 864};
911 865
@@ -925,19 +879,6 @@ static struct hda_verb stac92hd71bxx_core_init[] = {
925 {} 879 {}
926}; 880};
927 881
928#define HD_DISABLE_PORTF 1
929static struct hda_verb stac92hd71bxx_analog_core_init[] = {
930 /* start of config #1 */
931
932 /* connect port 0f to audio mixer */
933 { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x2},
934 /* start of config #2 */
935
936 /* set master volume and direct control */
937 { 0x28, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
938 {}
939};
940
941static struct hda_verb stac92hd71bxx_unmute_core_init[] = { 882static struct hda_verb stac92hd71bxx_unmute_core_init[] = {
942 /* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */ 883 /* unmute right and left channels for nodes 0x0f, 0xa, 0x0d */
943 { 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, 884 { 0x0f, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
@@ -996,31 +937,6 @@ static struct hda_verb stac9205_core_init[] = {
996 .put = stac92xx_mono_mux_enum_put, \ 937 .put = stac92xx_mono_mux_enum_put, \
997 } 938 }
998 939
999#define STAC_AMP_MUX \
1000 { \
1001 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1002 .name = "Amp Selector Capture Switch", \
1003 .count = 1, \
1004 .info = stac92xx_amp_mux_enum_info, \
1005 .get = stac92xx_amp_mux_enum_get, \
1006 .put = stac92xx_amp_mux_enum_put, \
1007 }
1008
1009#define STAC_AMP_VOL(xname, nid, chs, idx, dir) \
1010 { \
1011 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
1012 .name = xname, \
1013 .index = 0, \
1014 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
1015 SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
1016 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
1017 .info = stac92xx_amp_volume_info, \
1018 .get = stac92xx_amp_volume_get, \
1019 .put = stac92xx_amp_volume_put, \
1020 .tlv = { .c = snd_hda_mixer_amp_tlv }, \
1021 .private_value = HDA_COMPOSE_AMP_VAL(nid, chs, idx, dir) \
1022 }
1023
1024#define STAC_ANALOG_LOOPBACK(verb_read, verb_write, cnt) \ 940#define STAC_ANALOG_LOOPBACK(verb_read, verb_write, cnt) \
1025 { \ 941 { \
1026 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ 942 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
@@ -1051,34 +967,6 @@ static struct snd_kcontrol_new stac9200_mixer[] = {
1051 { } /* end */ 967 { } /* end */
1052}; 968};
1053 969
1054#define DELL_M6_MIXER 6
1055static struct snd_kcontrol_new stac92hd73xx_6ch_mixer[] = {
1056 /* start of config #1 */
1057 HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
1058 HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
1059
1060 HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
1061 HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
1062
1063 HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
1064 HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
1065
1066 /* start of config #2 */
1067 HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
1068 HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
1069
1070 HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
1071 HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
1072
1073 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
1074 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
1075
1076 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
1077 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
1078
1079 { } /* end */
1080};
1081
1082static struct snd_kcontrol_new stac92hd73xx_6ch_loopback[] = { 970static struct snd_kcontrol_new stac92hd73xx_6ch_loopback[] = {
1083 STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 3), 971 STAC_ANALOG_LOOPBACK(0xFA0, 0x7A1, 3),
1084 {} 972 {}
@@ -1094,134 +982,14 @@ static struct snd_kcontrol_new stac92hd73xx_10ch_loopback[] = {
1094 {} 982 {}
1095}; 983};
1096 984
1097static struct snd_kcontrol_new stac92hd73xx_8ch_mixer[] = {
1098 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
1099 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
1100
1101 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
1102 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
1103
1104 HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
1105 HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
1106
1107 HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
1108 HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
1109
1110 HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
1111 HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
1112
1113 HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
1114 HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
1115
1116 HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
1117 HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
1118 { } /* end */
1119};
1120
1121static struct snd_kcontrol_new stac92hd73xx_10ch_mixer[] = {
1122 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x20, 0x0, HDA_OUTPUT),
1123 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x20, 0x0, HDA_OUTPUT),
1124
1125 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x21, 0x0, HDA_OUTPUT),
1126 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x21, 0x0, HDA_OUTPUT),
1127
1128 HDA_CODEC_VOLUME("Front Mic Mixer Capture Volume", 0x1d, 0, HDA_INPUT),
1129 HDA_CODEC_MUTE("Front Mic Mixer Capture Switch", 0x1d, 0, HDA_INPUT),
1130
1131 HDA_CODEC_VOLUME("Mic Mixer Capture Volume", 0x1d, 0x1, HDA_INPUT),
1132 HDA_CODEC_MUTE("Mic Mixer Capture Switch", 0x1d, 0x1, HDA_INPUT),
1133
1134 HDA_CODEC_VOLUME("Line In Mixer Capture Volume", 0x1d, 0x2, HDA_INPUT),
1135 HDA_CODEC_MUTE("Line In Mixer Capture Switch", 0x1d, 0x2, HDA_INPUT),
1136
1137 HDA_CODEC_VOLUME("DAC Mixer Capture Volume", 0x1d, 0x3, HDA_INPUT),
1138 HDA_CODEC_MUTE("DAC Mixer Capture Switch", 0x1d, 0x3, HDA_INPUT),
1139
1140 HDA_CODEC_VOLUME("CD Mixer Capture Volume", 0x1d, 0x4, HDA_INPUT),
1141 HDA_CODEC_MUTE("CD Mixer Capture Switch", 0x1d, 0x4, HDA_INPUT),
1142 { } /* end */
1143};
1144
1145
1146static struct snd_kcontrol_new stac92hd83xxx_mixer[] = {
1147 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x17, 0x0, HDA_OUTPUT),
1148 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x17, 0x0, HDA_OUTPUT),
1149
1150 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x18, 0x0, HDA_OUTPUT),
1151 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x18, 0x0, HDA_OUTPUT),
1152
1153 HDA_CODEC_VOLUME("DAC0 Capture Volume", 0x1b, 0x3, HDA_INPUT),
1154 HDA_CODEC_MUTE("DAC0 Capture Switch", 0x1b, 0x3, HDA_INPUT),
1155
1156 HDA_CODEC_VOLUME("DAC1 Capture Volume", 0x1b, 0x4, HDA_INPUT),
1157 HDA_CODEC_MUTE("DAC1 Capture Switch", 0x1b, 0x4, HDA_INPUT),
1158
1159 HDA_CODEC_VOLUME("Front Mic Capture Volume", 0x1b, 0x0, HDA_INPUT),
1160 HDA_CODEC_MUTE("Front Mic Capture Switch", 0x1b, 0x0, HDA_INPUT),
1161
1162 HDA_CODEC_VOLUME("Line In Capture Volume", 0x1b, 0x2, HDA_INPUT),
1163 HDA_CODEC_MUTE("Line In Capture Switch", 0x1b, 0x2, HDA_INPUT),
1164
1165 /*
1166 HDA_CODEC_VOLUME("Mic Capture Volume", 0x1b, 0x1, HDA_INPUT),
1167 HDA_CODEC_MUTE("Mic Capture Switch", 0x1b 0x1, HDA_INPUT),
1168 */
1169 { } /* end */
1170};
1171
1172static struct snd_kcontrol_new stac92hd71bxx_analog_mixer[] = {
1173 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1c, 0x0, HDA_OUTPUT),
1174 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1c, 0x0, HDA_OUTPUT),
1175
1176 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1d, 0x0, HDA_OUTPUT),
1177 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1d, 0x0, HDA_OUTPUT),
1178 /* analog pc-beep replaced with digital beep support */
1179 /*
1180 HDA_CODEC_VOLUME("PC Beep Volume", 0x17, 0x2, HDA_INPUT),
1181 HDA_CODEC_MUTE("PC Beep Switch", 0x17, 0x2, HDA_INPUT),
1182 */
1183
1184 HDA_CODEC_MUTE("Import0 Mux Capture Switch", 0x17, 0x0, HDA_INPUT),
1185 HDA_CODEC_VOLUME("Import0 Mux Capture Volume", 0x17, 0x0, HDA_INPUT),
1186
1187 HDA_CODEC_MUTE("Import1 Mux Capture Switch", 0x17, 0x1, HDA_INPUT),
1188 HDA_CODEC_VOLUME("Import1 Mux Capture Volume", 0x17, 0x1, HDA_INPUT),
1189
1190 HDA_CODEC_MUTE("DAC0 Capture Switch", 0x17, 0x3, HDA_INPUT),
1191 HDA_CODEC_VOLUME("DAC0 Capture Volume", 0x17, 0x3, HDA_INPUT),
1192
1193 HDA_CODEC_MUTE("DAC1 Capture Switch", 0x17, 0x4, HDA_INPUT),
1194 HDA_CODEC_VOLUME("DAC1 Capture Volume", 0x17, 0x4, HDA_INPUT),
1195 { } /* end */
1196};
1197 985
1198static struct snd_kcontrol_new stac92hd71bxx_loopback[] = { 986static struct snd_kcontrol_new stac92hd71bxx_loopback[] = {
1199 STAC_ANALOG_LOOPBACK(0xFA0, 0x7A0, 2) 987 STAC_ANALOG_LOOPBACK(0xFA0, 0x7A0, 2)
1200}; 988};
1201 989
1202static struct snd_kcontrol_new stac92hd71bxx_mixer[] = {
1203 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1c, 0x0, HDA_OUTPUT),
1204 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1c, 0x0, HDA_OUTPUT),
1205
1206 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1d, 0x0, HDA_OUTPUT),
1207 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1d, 0x0, HDA_OUTPUT),
1208 { } /* end */
1209};
1210
1211static struct snd_kcontrol_new stac925x_mixer[] = { 990static struct snd_kcontrol_new stac925x_mixer[] = {
1212 HDA_CODEC_VOLUME("Master Playback Volume", 0x0e, 0, HDA_OUTPUT), 991 HDA_CODEC_VOLUME("Master Playback Volume", 0x0e, 0, HDA_OUTPUT),
1213 HDA_CODEC_MUTE("Master Playback Switch", 0x0e, 0, HDA_OUTPUT), 992 HDA_CODEC_MUTE("Master Playback Switch", 0x0e, 0, HDA_OUTPUT),
1214 HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_OUTPUT),
1215 HDA_CODEC_MUTE("Capture Switch", 0x14, 0, HDA_OUTPUT),
1216 { } /* end */
1217};
1218
1219static struct snd_kcontrol_new stac9205_mixer[] = {
1220 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x1b, 0x0, HDA_INPUT),
1221 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1d, 0x0, HDA_OUTPUT),
1222
1223 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x1c, 0x0, HDA_INPUT),
1224 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1e, 0x0, HDA_OUTPUT),
1225 { } /* end */ 993 { } /* end */
1226}; 994};
1227 995
@@ -1230,29 +998,6 @@ static struct snd_kcontrol_new stac9205_loopback[] = {
1230 {} 998 {}
1231}; 999};
1232 1000
1233/* This needs to be generated dynamically based on sequence */
1234static struct snd_kcontrol_new stac922x_mixer[] = {
1235 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x17, 0x0, HDA_INPUT),
1236 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x17, 0x0, HDA_INPUT),
1237
1238 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x18, 0x0, HDA_INPUT),
1239 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x18, 0x0, HDA_INPUT),
1240 { } /* end */
1241};
1242
1243
1244static struct snd_kcontrol_new stac927x_mixer[] = {
1245 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x0, 0x18, 0x0, HDA_INPUT),
1246 HDA_CODEC_MUTE_IDX("Capture Switch", 0x0, 0x1b, 0x0, HDA_OUTPUT),
1247
1248 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x1, 0x19, 0x0, HDA_INPUT),
1249 HDA_CODEC_MUTE_IDX("Capture Switch", 0x1, 0x1c, 0x0, HDA_OUTPUT),
1250
1251 HDA_CODEC_VOLUME_IDX("Capture Volume", 0x2, 0x1A, 0x0, HDA_INPUT),
1252 HDA_CODEC_MUTE_IDX("Capture Switch", 0x2, 0x1d, 0x0, HDA_OUTPUT),
1253 { } /* end */
1254};
1255
1256static struct snd_kcontrol_new stac927x_loopback[] = { 1001static struct snd_kcontrol_new stac927x_loopback[] = {
1257 STAC_ANALOG_LOOPBACK(0xFEB, 0x7EB, 1), 1002 STAC_ANALOG_LOOPBACK(0xFEB, 0x7EB, 1),
1258 {} 1003 {}
@@ -1310,16 +1055,19 @@ static int stac92xx_build_controls(struct hda_codec *codec)
1310 int err; 1055 int err;
1311 int i; 1056 int i;
1312 1057
1313 err = snd_hda_add_new_ctls(codec, spec->mixer); 1058 if (spec->mixer) {
1314 if (err < 0) 1059 err = snd_hda_add_new_ctls(codec, spec->mixer);
1315 return err; 1060 if (err < 0)
1061 return err;
1062 }
1316 1063
1317 for (i = 0; i < spec->num_mixers; i++) { 1064 for (i = 0; i < spec->num_mixers; i++) {
1318 err = snd_hda_add_new_ctls(codec, spec->mixers[i]); 1065 err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
1319 if (err < 0) 1066 if (err < 0)
1320 return err; 1067 return err;
1321 } 1068 }
1322 if (spec->num_dmuxes > 0) { 1069 if (!spec->auto_mic && spec->num_dmuxes > 0 &&
1070 snd_hda_get_bool_hint(codec, "separate_dmux") == 1) {
1323 stac_dmux_mixer.count = spec->num_dmuxes; 1071 stac_dmux_mixer.count = spec->num_dmuxes;
1324 err = snd_hda_ctl_add(codec, 1072 err = snd_hda_ctl_add(codec,
1325 snd_ctl_new1(&stac_dmux_mixer, codec)); 1073 snd_ctl_new1(&stac_dmux_mixer, codec));
@@ -1766,12 +1514,20 @@ static unsigned int dell_m6_pin_configs[13] = {
1766 0x4f0000f0, 1514 0x4f0000f0,
1767}; 1515};
1768 1516
1517static unsigned int alienware_m17x_pin_configs[13] = {
1518 0x0321101f, 0x0321101f, 0x03a11020, 0x03014020,
1519 0x90170110, 0x4f0000f0, 0x4f0000f0, 0x4f0000f0,
1520 0x4f0000f0, 0x90a60160, 0x4f0000f0, 0x4f0000f0,
1521 0x904601b0,
1522};
1523
1769static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = { 1524static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
1770 [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs, 1525 [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs,
1771 [STAC_DELL_M6_AMIC] = dell_m6_pin_configs, 1526 [STAC_DELL_M6_AMIC] = dell_m6_pin_configs,
1772 [STAC_DELL_M6_DMIC] = dell_m6_pin_configs, 1527 [STAC_DELL_M6_DMIC] = dell_m6_pin_configs,
1773 [STAC_DELL_M6_BOTH] = dell_m6_pin_configs, 1528 [STAC_DELL_M6_BOTH] = dell_m6_pin_configs,
1774 [STAC_DELL_EQ] = dell_m6_pin_configs, 1529 [STAC_DELL_EQ] = dell_m6_pin_configs,
1530 [STAC_ALIENWARE_M17X] = alienware_m17x_pin_configs,
1775}; 1531};
1776 1532
1777static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = { 1533static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
@@ -1783,6 +1539,7 @@ static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
1783 [STAC_DELL_M6_DMIC] = "dell-m6-dmic", 1539 [STAC_DELL_M6_DMIC] = "dell-m6-dmic",
1784 [STAC_DELL_M6_BOTH] = "dell-m6", 1540 [STAC_DELL_M6_BOTH] = "dell-m6",
1785 [STAC_DELL_EQ] = "dell-eq", 1541 [STAC_DELL_EQ] = "dell-eq",
1542 [STAC_ALIENWARE_M17X] = "alienware",
1786}; 1543};
1787 1544
1788static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { 1545static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
@@ -1820,6 +1577,12 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
1820 {} /* terminator */ 1577 {} /* terminator */
1821}; 1578};
1822 1579
1580static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
1581 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
1582 "Alienware M17x", STAC_ALIENWARE_M17X),
1583 {} /* terminator */
1584};
1585
1823static unsigned int ref92hd83xxx_pin_configs[10] = { 1586static unsigned int ref92hd83xxx_pin_configs[10] = {
1824 0x02214030, 0x02211010, 0x02a19020, 0x02170130, 1587 0x02214030, 0x02211010, 0x02a19020, 0x02170130,
1825 0x01014050, 0x01819040, 0x01014020, 0x90a3014e, 1588 0x01014050, 0x01819040, 0x01014020, 0x90a3014e,
@@ -1927,6 +1690,8 @@ static struct snd_pci_quirk stac92hd71bxx_cfg_tbl[] = {
1927 "HP mini 1000", STAC_HP_M4), 1690 "HP mini 1000", STAC_HP_M4),
1928 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361b, 1691 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x361b,
1929 "HP HDX", STAC_HP_HDX), /* HDX16 */ 1692 "HP HDX", STAC_HP_HDX), /* HDX16 */
1693 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xfff0, 0x7010,
1694 "HP", STAC_HP_DV5),
1930 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233, 1695 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0233,
1931 "unknown Dell", STAC_DELL_M4_1), 1696 "unknown Dell", STAC_DELL_M4_1),
1932 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0234, 1697 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0234,
@@ -2642,8 +2407,7 @@ static int stac92xx_hp_switch_get(struct snd_kcontrol *kcontrol,
2642 return 0; 2407 return 0;
2643} 2408}
2644 2409
2645static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid, 2410static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid);
2646 unsigned char type);
2647 2411
2648static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol, 2412static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol,
2649 struct snd_ctl_elem_value *ucontrol) 2413 struct snd_ctl_elem_value *ucontrol)
@@ -2657,7 +2421,7 @@ static int stac92xx_hp_switch_put(struct snd_kcontrol *kcontrol,
2657 /* check to be sure that the ports are upto date with 2421 /* check to be sure that the ports are upto date with
2658 * switch changes 2422 * switch changes
2659 */ 2423 */
2660 stac_issue_unsol_event(codec, nid, STAC_HP_EVENT); 2424 stac_issue_unsol_event(codec, nid);
2661 2425
2662 return 1; 2426 return 1;
2663} 2427}
@@ -2790,7 +2554,7 @@ static int stac92xx_io_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
2790 * appropriately according to the pin direction 2554 * appropriately according to the pin direction
2791 */ 2555 */
2792 if (spec->hp_detect) 2556 if (spec->hp_detect)
2793 stac_issue_unsol_event(codec, nid, STAC_HP_EVENT); 2557 stac_issue_unsol_event(codec, nid);
2794 2558
2795 return 1; 2559 return 1;
2796} 2560}
@@ -2859,8 +2623,6 @@ enum {
2859 STAC_CTL_WIDGET_VOL, 2623 STAC_CTL_WIDGET_VOL,
2860 STAC_CTL_WIDGET_MUTE, 2624 STAC_CTL_WIDGET_MUTE,
2861 STAC_CTL_WIDGET_MONO_MUX, 2625 STAC_CTL_WIDGET_MONO_MUX,
2862 STAC_CTL_WIDGET_AMP_MUX,
2863 STAC_CTL_WIDGET_AMP_VOL,
2864 STAC_CTL_WIDGET_HP_SWITCH, 2626 STAC_CTL_WIDGET_HP_SWITCH,
2865 STAC_CTL_WIDGET_IO_SWITCH, 2627 STAC_CTL_WIDGET_IO_SWITCH,
2866 STAC_CTL_WIDGET_CLFE_SWITCH, 2628 STAC_CTL_WIDGET_CLFE_SWITCH,
@@ -2871,8 +2633,6 @@ static struct snd_kcontrol_new stac92xx_control_templates[] = {
2871 HDA_CODEC_VOLUME(NULL, 0, 0, 0), 2633 HDA_CODEC_VOLUME(NULL, 0, 0, 0),
2872 HDA_CODEC_MUTE(NULL, 0, 0, 0), 2634 HDA_CODEC_MUTE(NULL, 0, 0, 0),
2873 STAC_MONO_MUX, 2635 STAC_MONO_MUX,
2874 STAC_AMP_MUX,
2875 STAC_AMP_VOL(NULL, 0, 0, 0, 0),
2876 STAC_CODEC_HP_SWITCH(NULL), 2636 STAC_CODEC_HP_SWITCH(NULL),
2877 STAC_CODEC_IO_SWITCH(NULL, 0), 2637 STAC_CODEC_IO_SWITCH(NULL, 0),
2878 STAC_CODEC_CLFE_SWITCH(NULL, 0), 2638 STAC_CODEC_CLFE_SWITCH(NULL, 0),
@@ -2973,6 +2733,8 @@ static int stac92xx_add_input_source(struct sigmatel_spec *spec)
2973 struct snd_kcontrol_new *knew; 2733 struct snd_kcontrol_new *knew;
2974 struct hda_input_mux *imux = &spec->private_imux; 2734 struct hda_input_mux *imux = &spec->private_imux;
2975 2735
2736 if (spec->auto_mic)
2737 return 0; /* no need for input source */
2976 if (!spec->num_adcs || imux->num_items <= 1) 2738 if (!spec->num_adcs || imux->num_items <= 1)
2977 return 0; /* no need for input source control */ 2739 return 0; /* no need for input source control */
2978 knew = stac_control_new(spec, &stac_input_src_temp, 2740 knew = stac_control_new(spec, &stac_input_src_temp,
@@ -3066,7 +2828,7 @@ static hda_nid_t get_unassigned_dac(struct hda_codec *codec, hda_nid_t nid)
3066 HDA_MAX_CONNECTIONS); 2828 HDA_MAX_CONNECTIONS);
3067 for (j = 0; j < conn_len; j++) { 2829 for (j = 0; j < conn_len; j++) {
3068 wcaps = get_wcaps(codec, conn[j]); 2830 wcaps = get_wcaps(codec, conn[j]);
3069 wtype = (wcaps & AC_WCAP_TYPE) >> AC_WCAP_TYPE_SHIFT; 2831 wtype = get_wcaps_type(wcaps);
3070 /* we check only analog outputs */ 2832 /* we check only analog outputs */
3071 if (wtype != AC_WID_AUD_OUT || (wcaps & AC_WCAP_DIGITAL)) 2833 if (wtype != AC_WID_AUD_OUT || (wcaps & AC_WCAP_DIGITAL))
3072 continue; 2834 continue;
@@ -3325,6 +3087,21 @@ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs,
3325 return 0; 3087 return 0;
3326} 3088}
3327 3089
3090static int stac92xx_add_capvol_ctls(struct hda_codec *codec, unsigned long vol,
3091 unsigned long sw, int idx)
3092{
3093 int err;
3094 err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_VOL, idx,
3095 "Capture Volume", vol);
3096 if (err < 0)
3097 return err;
3098 err = stac92xx_add_control_idx(codec->spec, STAC_CTL_WIDGET_MUTE, idx,
3099 "Capture Switch", sw);
3100 if (err < 0)
3101 return err;
3102 return 0;
3103}
3104
3328/* add playback controls from the parsed DAC table */ 3105/* add playback controls from the parsed DAC table */
3329static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec, 3106static int stac92xx_auto_create_multi_out_ctls(struct hda_codec *codec,
3330 const struct auto_pin_cfg *cfg) 3107 const struct auto_pin_cfg *cfg)
@@ -3398,7 +3175,7 @@ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec)
3398 spec->mono_nid, 3175 spec->mono_nid,
3399 con_lst, 3176 con_lst,
3400 HDA_MAX_NUM_INPUTS); 3177 HDA_MAX_NUM_INPUTS);
3401 if (!num_cons || num_cons > ARRAY_SIZE(stac92xx_mono_labels)) 3178 if (num_cons <= 0 || num_cons > ARRAY_SIZE(stac92xx_mono_labels))
3402 return -EINVAL; 3179 return -EINVAL;
3403 3180
3404 for (i = 0; i < num_cons; i++) { 3181 for (i = 0; i < num_cons; i++) {
@@ -3412,37 +3189,6 @@ static int stac92xx_auto_create_mono_output_ctls(struct hda_codec *codec)
3412 "Mono Mux", spec->mono_nid); 3189 "Mono Mux", spec->mono_nid);
3413} 3190}
3414 3191
3415/* labels for amp mux outputs */
3416static const char *stac92xx_amp_labels[3] = {
3417 "Front Microphone", "Microphone", "Line In",
3418};
3419
3420/* create amp out controls mux on capable codecs */
3421static int stac92xx_auto_create_amp_output_ctls(struct hda_codec *codec)
3422{
3423 struct sigmatel_spec *spec = codec->spec;
3424 struct hda_input_mux *amp_mux = &spec->private_amp_mux;
3425 int i, err;
3426
3427 for (i = 0; i < spec->num_amps; i++) {
3428 amp_mux->items[amp_mux->num_items].label =
3429 stac92xx_amp_labels[i];
3430 amp_mux->items[amp_mux->num_items].index = i;
3431 amp_mux->num_items++;
3432 }
3433
3434 if (spec->num_amps > 1) {
3435 err = stac92xx_add_control(spec, STAC_CTL_WIDGET_AMP_MUX,
3436 "Amp Selector Capture Switch", 0);
3437 if (err < 0)
3438 return err;
3439 }
3440 return stac92xx_add_control(spec, STAC_CTL_WIDGET_AMP_VOL,
3441 "Amp Capture Volume",
3442 HDA_COMPOSE_AMP_VAL(spec->amp_nids[0], 3, 0, HDA_INPUT));
3443}
3444
3445
3446/* create PC beep volume controls */ 3192/* create PC beep volume controls */
3447static int stac92xx_auto_create_beep_ctls(struct hda_codec *codec, 3193static int stac92xx_auto_create_beep_ctls(struct hda_codec *codec,
3448 hda_nid_t nid) 3194 hda_nid_t nid)
@@ -3511,19 +3257,33 @@ static int stac92xx_beep_switch_ctl(struct hda_codec *codec)
3511static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec) 3257static int stac92xx_auto_create_mux_input_ctls(struct hda_codec *codec)
3512{ 3258{
3513 struct sigmatel_spec *spec = codec->spec; 3259 struct sigmatel_spec *spec = codec->spec;
3514 int wcaps, nid, i, err = 0; 3260 int i, j, err = 0;
3515 3261
3516 for (i = 0; i < spec->num_muxes; i++) { 3262 for (i = 0; i < spec->num_muxes; i++) {
3263 hda_nid_t nid;
3264 unsigned int wcaps;
3265 unsigned long val;
3266
3517 nid = spec->mux_nids[i]; 3267 nid = spec->mux_nids[i];
3518 wcaps = get_wcaps(codec, nid); 3268 wcaps = get_wcaps(codec, nid);
3269 if (!(wcaps & AC_WCAP_OUT_AMP))
3270 continue;
3519 3271
3520 if (wcaps & AC_WCAP_OUT_AMP) { 3272 /* check whether already the same control was created as
3521 err = stac92xx_add_control_idx(spec, 3273 * normal Capture Volume.
3522 STAC_CTL_WIDGET_VOL, i, "Mux Capture Volume", 3274 */
3523 HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT)); 3275 val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
3524 if (err < 0) 3276 for (j = 0; j < spec->num_caps; j++) {
3525 return err; 3277 if (spec->capvols[j] == val)
3278 break;
3526 } 3279 }
3280 if (j < spec->num_caps)
3281 continue;
3282
3283 err = stac92xx_add_control_idx(spec, STAC_CTL_WIDGET_VOL, i,
3284 "Mux Capture Volume", val);
3285 if (err < 0)
3286 return err;
3527 } 3287 }
3528 return 0; 3288 return 0;
3529}; 3289};
@@ -3544,7 +3304,7 @@ static int stac92xx_auto_create_spdif_mux_ctls(struct hda_codec *codec)
3544 spec->smux_nids[0], 3304 spec->smux_nids[0],
3545 con_lst, 3305 con_lst,
3546 HDA_MAX_NUM_INPUTS); 3306 HDA_MAX_NUM_INPUTS);
3547 if (!num_cons) 3307 if (num_cons <= 0)
3548 return -EINVAL; 3308 return -EINVAL;
3549 3309
3550 if (!labels) 3310 if (!labels)
@@ -3565,101 +3325,231 @@ static const char *stac92xx_dmic_labels[5] = {
3565 "Digital Mic 3", "Digital Mic 4" 3325 "Digital Mic 3", "Digital Mic 4"
3566}; 3326};
3567 3327
3328static int get_connection_index(struct hda_codec *codec, hda_nid_t mux,
3329 hda_nid_t nid)
3330{
3331 hda_nid_t conn[HDA_MAX_NUM_INPUTS];
3332 int i, nums;
3333
3334 nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
3335 for (i = 0; i < nums; i++)
3336 if (conn[i] == nid)
3337 return i;
3338 return -1;
3339}
3340
3341/* create a volume assigned to the given pin (only if supported) */
3342/* return 1 if the volume control is created */
3343static int create_elem_capture_vol(struct hda_codec *codec, hda_nid_t nid,
3344 const char *label, int direction)
3345{
3346 unsigned int caps, nums;
3347 char name[32];
3348 int err;
3349
3350 if (direction == HDA_OUTPUT)
3351 caps = AC_WCAP_OUT_AMP;
3352 else
3353 caps = AC_WCAP_IN_AMP;
3354 if (!(get_wcaps(codec, nid) & caps))
3355 return 0;
3356 caps = query_amp_caps(codec, nid, direction);
3357 nums = (caps & AC_AMPCAP_NUM_STEPS) >> AC_AMPCAP_NUM_STEPS_SHIFT;
3358 if (!nums)
3359 return 0;
3360 snprintf(name, sizeof(name), "%s Capture Volume", label);
3361 err = stac92xx_add_control(codec->spec, STAC_CTL_WIDGET_VOL, name,
3362 HDA_COMPOSE_AMP_VAL(nid, 3, 0, direction));
3363 if (err < 0)
3364 return err;
3365 return 1;
3366}
3367
3568/* create playback/capture controls for input pins on dmic capable codecs */ 3368/* create playback/capture controls for input pins on dmic capable codecs */
3569static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec, 3369static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec,
3570 const struct auto_pin_cfg *cfg) 3370 const struct auto_pin_cfg *cfg)
3571{ 3371{
3572 struct sigmatel_spec *spec = codec->spec; 3372 struct sigmatel_spec *spec = codec->spec;
3373 struct hda_input_mux *imux = &spec->private_imux;
3573 struct hda_input_mux *dimux = &spec->private_dimux; 3374 struct hda_input_mux *dimux = &spec->private_dimux;
3574 hda_nid_t con_lst[HDA_MAX_NUM_INPUTS]; 3375 int err, i, active_mics;
3575 int err, i, j; 3376 unsigned int def_conf;
3576 char name[32];
3577 3377
3578 dimux->items[dimux->num_items].label = stac92xx_dmic_labels[0]; 3378 dimux->items[dimux->num_items].label = stac92xx_dmic_labels[0];
3579 dimux->items[dimux->num_items].index = 0; 3379 dimux->items[dimux->num_items].index = 0;
3580 dimux->num_items++; 3380 dimux->num_items++;
3581 3381
3382 active_mics = 0;
3383 for (i = 0; i < spec->num_dmics; i++) {
3384 /* check the validity: sometimes it's a dead vendor-spec node */
3385 if (get_wcaps_type(get_wcaps(codec, spec->dmic_nids[i]))
3386 != AC_WID_PIN)
3387 continue;
3388 def_conf = snd_hda_codec_get_pincfg(codec, spec->dmic_nids[i]);
3389 if (get_defcfg_connect(def_conf) != AC_JACK_PORT_NONE)
3390 active_mics++;
3391 }
3392
3582 for (i = 0; i < spec->num_dmics; i++) { 3393 for (i = 0; i < spec->num_dmics; i++) {
3583 hda_nid_t nid; 3394 hda_nid_t nid;
3584 int index; 3395 int index;
3585 int num_cons; 3396 const char *label;
3586 unsigned int wcaps;
3587 unsigned int def_conf;
3588 3397
3589 def_conf = snd_hda_codec_get_pincfg(codec, spec->dmic_nids[i]); 3398 nid = spec->dmic_nids[i];
3399 if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
3400 continue;
3401 def_conf = snd_hda_codec_get_pincfg(codec, nid);
3590 if (get_defcfg_connect(def_conf) == AC_JACK_PORT_NONE) 3402 if (get_defcfg_connect(def_conf) == AC_JACK_PORT_NONE)
3591 continue; 3403 continue;
3592 3404
3593 nid = spec->dmic_nids[i]; 3405 index = get_connection_index(codec, spec->dmux_nids[0], nid);
3594 num_cons = snd_hda_get_connections(codec, 3406 if (index < 0)
3595 spec->dmux_nids[0], 3407 continue;
3596 con_lst,
3597 HDA_MAX_NUM_INPUTS);
3598 for (j = 0; j < num_cons; j++)
3599 if (con_lst[j] == nid) {
3600 index = j;
3601 goto found;
3602 }
3603 continue;
3604found:
3605 wcaps = get_wcaps(codec, nid) &
3606 (AC_WCAP_OUT_AMP | AC_WCAP_IN_AMP);
3607 3408
3608 if (wcaps) { 3409 if (active_mics == 1)
3609 sprintf(name, "%s Capture Volume", 3410 label = "Digital Mic";
3610 stac92xx_dmic_labels[dimux->num_items]); 3411 else
3412 label = stac92xx_dmic_labels[dimux->num_items];
3611 3413
3612 err = stac92xx_add_control(spec, 3414 err = create_elem_capture_vol(codec, nid, label, HDA_INPUT);
3613 STAC_CTL_WIDGET_VOL, 3415 if (err < 0)
3614 name, 3416 return err;
3615 HDA_COMPOSE_AMP_VAL(nid, 3, 0, 3417 if (!err) {
3616 (wcaps & AC_WCAP_OUT_AMP) ? 3418 err = create_elem_capture_vol(codec, nid, label,
3617 HDA_OUTPUT : HDA_INPUT)); 3419 HDA_OUTPUT);
3618 if (err < 0) 3420 if (err < 0)
3619 return err; 3421 return err;
3620 } 3422 }
3621 3423
3622 dimux->items[dimux->num_items].label = 3424 dimux->items[dimux->num_items].label = label;
3623 stac92xx_dmic_labels[dimux->num_items];
3624 dimux->items[dimux->num_items].index = index; 3425 dimux->items[dimux->num_items].index = index;
3625 dimux->num_items++; 3426 dimux->num_items++;
3427 if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) {
3428 imux->items[imux->num_items].label = label;
3429 imux->items[imux->num_items].index = index;
3430 imux->num_items++;
3431 }
3626 } 3432 }
3627 3433
3628 return 0; 3434 return 0;
3629} 3435}
3630 3436
3437static int check_mic_pin(struct hda_codec *codec, hda_nid_t nid,
3438 hda_nid_t *fixed, hda_nid_t *ext)
3439{
3440 unsigned int cfg;
3441
3442 if (!nid)
3443 return 0;
3444 cfg = snd_hda_codec_get_pincfg(codec, nid);
3445 switch (get_defcfg_connect(cfg)) {
3446 case AC_JACK_PORT_FIXED:
3447 if (*fixed)
3448 return 1; /* already occupied */
3449 *fixed = nid;
3450 break;
3451 case AC_JACK_PORT_COMPLEX:
3452 if (*ext)
3453 return 1; /* already occupied */
3454 *ext = nid;
3455 break;
3456 }
3457 return 0;
3458}
3459
3460static int set_mic_route(struct hda_codec *codec,
3461 struct sigmatel_mic_route *mic,
3462 hda_nid_t pin)
3463{
3464 struct sigmatel_spec *spec = codec->spec;
3465 struct auto_pin_cfg *cfg = &spec->autocfg;
3466 int i;
3467
3468 mic->pin = pin;
3469 for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++)
3470 if (pin == cfg->input_pins[i])
3471 break;
3472 if (i <= AUTO_PIN_FRONT_MIC) {
3473 /* analog pin */
3474 mic->dmux_idx = 0;
3475 i = get_connection_index(codec, spec->mux_nids[0], pin);
3476 if (i < 0)
3477 return -1;
3478 mic->mux_idx = i;
3479 } else if (spec->dmux_nids) {
3480 /* digital pin */
3481 mic->mux_idx = 0;
3482 i = get_connection_index(codec, spec->dmux_nids[0], pin);
3483 if (i < 0)
3484 return -1;
3485 mic->dmux_idx = i;
3486 }
3487 return 0;
3488}
3489
3490/* return non-zero if the device is for automatic mic switch */
3491static int stac_check_auto_mic(struct hda_codec *codec)
3492{
3493 struct sigmatel_spec *spec = codec->spec;
3494 struct auto_pin_cfg *cfg = &spec->autocfg;
3495 hda_nid_t fixed, ext;
3496 int i;
3497
3498 for (i = AUTO_PIN_LINE; i < AUTO_PIN_LAST; i++) {
3499 if (cfg->input_pins[i])
3500 return 0; /* must be exclusively mics */
3501 }
3502 fixed = ext = 0;
3503 for (i = AUTO_PIN_MIC; i <= AUTO_PIN_FRONT_MIC; i++)
3504 if (check_mic_pin(codec, cfg->input_pins[i], &fixed, &ext))
3505 return 0;
3506 for (i = 0; i < spec->num_dmics; i++)
3507 if (check_mic_pin(codec, spec->dmic_nids[i], &fixed, &ext))
3508 return 0;
3509 if (!fixed || !ext)
3510 return 0;
3511 if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
3512 return 0; /* no unsol support */
3513 if (set_mic_route(codec, &spec->ext_mic, ext) ||
3514 set_mic_route(codec, &spec->int_mic, fixed))
3515 return 0; /* something is wrong */
3516 return 1;
3517}
3518
3631/* create playback/capture controls for input pins */ 3519/* create playback/capture controls for input pins */
3632static int stac92xx_auto_create_analog_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) 3520static int stac92xx_auto_create_analog_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg)
3633{ 3521{
3634 struct sigmatel_spec *spec = codec->spec; 3522 struct sigmatel_spec *spec = codec->spec;
3635 struct hda_input_mux *imux = &spec->private_imux; 3523 struct hda_input_mux *imux = &spec->private_imux;
3636 hda_nid_t con_lst[HDA_MAX_NUM_INPUTS]; 3524 int i, j;
3637 int i, j, k;
3638 3525
3639 for (i = 0; i < AUTO_PIN_LAST; i++) { 3526 for (i = 0; i < AUTO_PIN_LAST; i++) {
3640 int index; 3527 hda_nid_t nid = cfg->input_pins[i];
3528 int index, err;
3641 3529
3642 if (!cfg->input_pins[i]) 3530 if (!nid)
3643 continue; 3531 continue;
3644 index = -1; 3532 index = -1;
3645 for (j = 0; j < spec->num_muxes; j++) { 3533 for (j = 0; j < spec->num_muxes; j++) {
3646 int num_cons; 3534 index = get_connection_index(codec, spec->mux_nids[j],
3647 num_cons = snd_hda_get_connections(codec, 3535 nid);
3648 spec->mux_nids[j], 3536 if (index >= 0)
3649 con_lst, 3537 break;
3650 HDA_MAX_NUM_INPUTS);
3651 for (k = 0; k < num_cons; k++)
3652 if (con_lst[k] == cfg->input_pins[i]) {
3653 index = k;
3654 goto found;
3655 }
3656 } 3538 }
3657 continue; 3539 if (index < 0)
3658 found: 3540 continue;
3541
3542 err = create_elem_capture_vol(codec, nid,
3543 auto_pin_cfg_labels[i],
3544 HDA_INPUT);
3545 if (err < 0)
3546 return err;
3547
3659 imux->items[imux->num_items].label = auto_pin_cfg_labels[i]; 3548 imux->items[imux->num_items].label = auto_pin_cfg_labels[i];
3660 imux->items[imux->num_items].index = index; 3549 imux->items[imux->num_items].index = index;
3661 imux->num_items++; 3550 imux->num_items++;
3662 } 3551 }
3552 spec->num_analog_muxes = imux->num_items;
3663 3553
3664 if (imux->num_items) { 3554 if (imux->num_items) {
3665 /* 3555 /*
@@ -3711,7 +3601,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3711{ 3601{
3712 struct sigmatel_spec *spec = codec->spec; 3602 struct sigmatel_spec *spec = codec->spec;
3713 int hp_swap = 0; 3603 int hp_swap = 0;
3714 int err; 3604 int i, err;
3715 3605
3716 if ((err = snd_hda_parse_pin_def_config(codec, 3606 if ((err = snd_hda_parse_pin_def_config(codec,
3717 &spec->autocfg, 3607 &spec->autocfg,
@@ -3751,11 +3641,10 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3751 if (snd_hda_get_connections(codec, 3641 if (snd_hda_get_connections(codec,
3752 spec->autocfg.mono_out_pin, conn_list, 1) && 3642 spec->autocfg.mono_out_pin, conn_list, 1) &&
3753 snd_hda_get_connections(codec, conn_list[0], 3643 snd_hda_get_connections(codec, conn_list[0],
3754 conn_list, 1)) { 3644 conn_list, 1) > 0) {
3755 3645
3756 int wcaps = get_wcaps(codec, conn_list[0]); 3646 int wcaps = get_wcaps(codec, conn_list[0]);
3757 int wid_type = (wcaps & AC_WCAP_TYPE) 3647 int wid_type = get_wcaps_type(wcaps);
3758 >> AC_WCAP_TYPE_SHIFT;
3759 /* LR swap check, some stac925x have a mux that 3648 /* LR swap check, some stac925x have a mux that
3760 * changes the DACs output path instead of the 3649 * changes the DACs output path instead of the
3761 * mono-mux path. 3650 * mono-mux path.
@@ -3846,6 +3735,21 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3846 spec->autocfg.line_outs = 0; 3735 spec->autocfg.line_outs = 0;
3847 } 3736 }
3848 3737
3738 if (stac_check_auto_mic(codec)) {
3739 spec->auto_mic = 1;
3740 /* only one capture for auto-mic */
3741 spec->num_adcs = 1;
3742 spec->num_caps = 1;
3743 spec->num_muxes = 1;
3744 }
3745
3746 for (i = 0; i < spec->num_caps; i++) {
3747 err = stac92xx_add_capvol_ctls(codec, spec->capvols[i],
3748 spec->capsws[i], i);
3749 if (err < 0)
3750 return err;
3751 }
3752
3849 err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg); 3753 err = stac92xx_auto_create_analog_input_ctls(codec, &spec->autocfg);
3850 if (err < 0) 3754 if (err < 0)
3851 return err; 3755 return err;
@@ -3855,11 +3759,6 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3855 if (err < 0) 3759 if (err < 0)
3856 return err; 3760 return err;
3857 } 3761 }
3858 if (spec->num_amps > 0) {
3859 err = stac92xx_auto_create_amp_output_ctls(codec);
3860 if (err < 0)
3861 return err;
3862 }
3863 if (spec->num_dmics > 0 && !spec->dinput_mux) 3762 if (spec->num_dmics > 0 && !spec->dinput_mux)
3864 if ((err = stac92xx_auto_create_dmic_input_ctls(codec, 3763 if ((err = stac92xx_auto_create_dmic_input_ctls(codec,
3865 &spec->autocfg)) < 0) 3764 &spec->autocfg)) < 0)
@@ -3896,7 +3795,6 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out
3896 spec->dinput_mux = &spec->private_dimux; 3795 spec->dinput_mux = &spec->private_dimux;
3897 spec->sinput_mux = &spec->private_smux; 3796 spec->sinput_mux = &spec->private_smux;
3898 spec->mono_mux = &spec->private_mono_mux; 3797 spec->mono_mux = &spec->private_mono_mux;
3899 spec->amp_mux = &spec->private_amp_mux;
3900 return 1; 3798 return 1;
3901} 3799}
3902 3800
@@ -4108,14 +4006,14 @@ static int stac_add_event(struct sigmatel_spec *spec, hda_nid_t nid,
4108} 4006}
4109 4007
4110static struct sigmatel_event *stac_get_event(struct hda_codec *codec, 4008static struct sigmatel_event *stac_get_event(struct hda_codec *codec,
4111 hda_nid_t nid, unsigned char type) 4009 hda_nid_t nid)
4112{ 4010{
4113 struct sigmatel_spec *spec = codec->spec; 4011 struct sigmatel_spec *spec = codec->spec;
4114 struct sigmatel_event *event = spec->events.list; 4012 struct sigmatel_event *event = spec->events.list;
4115 int i; 4013 int i;
4116 4014
4117 for (i = 0; i < spec->events.used; i++, event++) { 4015 for (i = 0; i < spec->events.used; i++, event++) {
4118 if (event->nid == nid && event->type == type) 4016 if (event->nid == nid)
4119 return event; 4017 return event;
4120 } 4018 }
4121 return NULL; 4019 return NULL;
@@ -4135,24 +4033,32 @@ static struct sigmatel_event *stac_get_event_from_tag(struct hda_codec *codec,
4135 return NULL; 4033 return NULL;
4136} 4034}
4137 4035
4138static void enable_pin_detect(struct hda_codec *codec, hda_nid_t nid, 4036/* check if given nid is a valid pin and no other events are assigned
4139 unsigned int type) 4037 * to it. If OK, assign the event, set the unsol flag, and returns 1.
4038 * Otherwise, returns zero.
4039 */
4040static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
4041 unsigned int type)
4140{ 4042{
4141 struct sigmatel_event *event; 4043 struct sigmatel_event *event;
4142 int tag; 4044 int tag;
4143 4045
4144 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP)) 4046 if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP))
4145 return; 4047 return 0;
4146 event = stac_get_event(codec, nid, type); 4048 event = stac_get_event(codec, nid);
4147 if (event) 4049 if (event) {
4050 if (event->type != type)
4051 return 0;
4148 tag = event->tag; 4052 tag = event->tag;
4149 else 4053 } else {
4150 tag = stac_add_event(codec->spec, nid, type, 0); 4054 tag = stac_add_event(codec->spec, nid, type, 0);
4151 if (tag < 0) 4055 if (tag < 0)
4152 return; 4056 return 0;
4057 }
4153 snd_hda_codec_write_cache(codec, nid, 0, 4058 snd_hda_codec_write_cache(codec, nid, 0,
4154 AC_VERB_SET_UNSOLICITED_ENABLE, 4059 AC_VERB_SET_UNSOLICITED_ENABLE,
4155 AC_USRSP_EN | tag); 4060 AC_USRSP_EN | tag);
4061 return 1;
4156} 4062}
4157 4063
4158static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid) 4064static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
@@ -4245,20 +4151,36 @@ static int stac92xx_init(struct hda_codec *codec)
4245 hda_nid_t nid = cfg->hp_pins[i]; 4151 hda_nid_t nid = cfg->hp_pins[i];
4246 enable_pin_detect(codec, nid, STAC_HP_EVENT); 4152 enable_pin_detect(codec, nid, STAC_HP_EVENT);
4247 } 4153 }
4154 if (cfg->line_out_type == AUTO_PIN_LINE_OUT &&
4155 cfg->speaker_outs > 0) {
4156 /* enable pin-detect for line-outs as well */
4157 for (i = 0; i < cfg->line_outs; i++) {
4158 hda_nid_t nid = cfg->line_out_pins[i];
4159 enable_pin_detect(codec, nid, STAC_LO_EVENT);
4160 }
4161 }
4162
4248 /* force to enable the first line-out; the others are set up 4163 /* force to enable the first line-out; the others are set up
4249 * in unsol_event 4164 * in unsol_event
4250 */ 4165 */
4251 stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0], 4166 stac92xx_auto_set_pinctl(codec, spec->autocfg.line_out_pins[0],
4252 AC_PINCTL_OUT_EN); 4167 AC_PINCTL_OUT_EN);
4253 /* fake event to set up pins */ 4168 /* fake event to set up pins */
4254 stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0], 4169 stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
4255 STAC_HP_EVENT);
4256 } else { 4170 } else {
4257 stac92xx_auto_init_multi_out(codec); 4171 stac92xx_auto_init_multi_out(codec);
4258 stac92xx_auto_init_hp_out(codec); 4172 stac92xx_auto_init_hp_out(codec);
4259 for (i = 0; i < cfg->hp_outs; i++) 4173 for (i = 0; i < cfg->hp_outs; i++)
4260 stac_toggle_power_map(codec, cfg->hp_pins[i], 1); 4174 stac_toggle_power_map(codec, cfg->hp_pins[i], 1);
4261 } 4175 }
4176 if (spec->auto_mic) {
4177 /* initialize connection to analog input */
4178 if (spec->dmux_nids)
4179 snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
4180 AC_VERB_SET_CONNECT_SEL, 0);
4181 if (enable_pin_detect(codec, spec->ext_mic.pin, STAC_MIC_EVENT))
4182 stac_issue_unsol_event(codec, spec->ext_mic.pin);
4183 }
4262 for (i = 0; i < AUTO_PIN_LAST; i++) { 4184 for (i = 0; i < AUTO_PIN_LAST; i++) {
4263 hda_nid_t nid = cfg->input_pins[i]; 4185 hda_nid_t nid = cfg->input_pins[i];
4264 if (nid) { 4186 if (nid) {
@@ -4285,10 +4207,9 @@ static int stac92xx_init(struct hda_codec *codec)
4285 } 4207 }
4286 conf = snd_hda_codec_get_pincfg(codec, nid); 4208 conf = snd_hda_codec_get_pincfg(codec, nid);
4287 if (get_defcfg_connect(conf) != AC_JACK_PORT_FIXED) { 4209 if (get_defcfg_connect(conf) != AC_JACK_PORT_FIXED) {
4288 enable_pin_detect(codec, nid, 4210 if (enable_pin_detect(codec, nid,
4289 STAC_INSERT_EVENT); 4211 STAC_INSERT_EVENT))
4290 stac_issue_unsol_event(codec, nid, 4212 stac_issue_unsol_event(codec, nid);
4291 STAC_INSERT_EVENT);
4292 } 4213 }
4293 } 4214 }
4294 } 4215 }
@@ -4333,10 +4254,8 @@ static int stac92xx_init(struct hda_codec *codec)
4333 stac_toggle_power_map(codec, nid, 1); 4254 stac_toggle_power_map(codec, nid, 1);
4334 continue; 4255 continue;
4335 } 4256 }
4336 if (!stac_get_event(codec, nid, STAC_INSERT_EVENT)) { 4257 if (enable_pin_detect(codec, nid, STAC_PWR_EVENT))
4337 enable_pin_detect(codec, nid, STAC_PWR_EVENT); 4258 stac_issue_unsol_event(codec, nid);
4338 stac_issue_unsol_event(codec, nid, STAC_PWR_EVENT);
4339 }
4340 } 4259 }
4341 if (spec->dac_list) 4260 if (spec->dac_list)
4342 stac92xx_power_down(codec); 4261 stac92xx_power_down(codec);
@@ -4440,6 +4359,48 @@ static int get_pin_presence(struct hda_codec *codec, hda_nid_t nid)
4440 return 0; 4359 return 0;
4441} 4360}
4442 4361
4362static void stac92xx_line_out_detect(struct hda_codec *codec,
4363 int presence)
4364{
4365 struct sigmatel_spec *spec = codec->spec;
4366 struct auto_pin_cfg *cfg = &spec->autocfg;
4367 int i;
4368
4369 for (i = 0; i < cfg->line_outs; i++) {
4370 if (presence)
4371 break;
4372 presence = get_pin_presence(codec, cfg->line_out_pins[i]);
4373 if (presence) {
4374 unsigned int pinctl;
4375 pinctl = snd_hda_codec_read(codec,
4376 cfg->line_out_pins[i], 0,
4377 AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
4378 if (pinctl & AC_PINCTL_IN_EN)
4379 presence = 0; /* mic- or line-input */
4380 }
4381 }
4382
4383 if (presence) {
4384 /* disable speakers */
4385 for (i = 0; i < cfg->speaker_outs; i++)
4386 stac92xx_reset_pinctl(codec, cfg->speaker_pins[i],
4387 AC_PINCTL_OUT_EN);
4388 if (spec->eapd_mask && spec->eapd_switch)
4389 stac_gpio_set(codec, spec->gpio_mask,
4390 spec->gpio_dir, spec->gpio_data &
4391 ~spec->eapd_mask);
4392 } else {
4393 /* enable speakers */
4394 for (i = 0; i < cfg->speaker_outs; i++)
4395 stac92xx_set_pinctl(codec, cfg->speaker_pins[i],
4396 AC_PINCTL_OUT_EN);
4397 if (spec->eapd_mask && spec->eapd_switch)
4398 stac_gpio_set(codec, spec->gpio_mask,
4399 spec->gpio_dir, spec->gpio_data |
4400 spec->eapd_mask);
4401 }
4402}
4403
4443/* return non-zero if the hp-pin of the given array index isn't 4404/* return non-zero if the hp-pin of the given array index isn't
4444 * a jack-detection target 4405 * a jack-detection target
4445 */ 4406 */
@@ -4492,13 +4453,6 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
4492 for (i = 0; i < cfg->line_outs; i++) 4453 for (i = 0; i < cfg->line_outs; i++)
4493 stac92xx_reset_pinctl(codec, cfg->line_out_pins[i], 4454 stac92xx_reset_pinctl(codec, cfg->line_out_pins[i],
4494 AC_PINCTL_OUT_EN); 4455 AC_PINCTL_OUT_EN);
4495 for (i = 0; i < cfg->speaker_outs; i++)
4496 stac92xx_reset_pinctl(codec, cfg->speaker_pins[i],
4497 AC_PINCTL_OUT_EN);
4498 if (spec->eapd_mask && spec->eapd_switch)
4499 stac_gpio_set(codec, spec->gpio_mask,
4500 spec->gpio_dir, spec->gpio_data &
4501 ~spec->eapd_mask);
4502 } else { 4456 } else {
4503 /* enable lineouts */ 4457 /* enable lineouts */
4504 if (spec->hp_switch) 4458 if (spec->hp_switch)
@@ -4507,14 +4461,8 @@ static void stac92xx_hp_detect(struct hda_codec *codec)
4507 for (i = 0; i < cfg->line_outs; i++) 4461 for (i = 0; i < cfg->line_outs; i++)
4508 stac92xx_set_pinctl(codec, cfg->line_out_pins[i], 4462 stac92xx_set_pinctl(codec, cfg->line_out_pins[i],
4509 AC_PINCTL_OUT_EN); 4463 AC_PINCTL_OUT_EN);
4510 for (i = 0; i < cfg->speaker_outs; i++)
4511 stac92xx_set_pinctl(codec, cfg->speaker_pins[i],
4512 AC_PINCTL_OUT_EN);
4513 if (spec->eapd_mask && spec->eapd_switch)
4514 stac_gpio_set(codec, spec->gpio_mask,
4515 spec->gpio_dir, spec->gpio_data |
4516 spec->eapd_mask);
4517 } 4464 }
4465 stac92xx_line_out_detect(codec, presence);
4518 /* toggle hp outs */ 4466 /* toggle hp outs */
4519 for (i = 0; i < cfg->hp_outs; i++) { 4467 for (i = 0; i < cfg->hp_outs; i++) {
4520 unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN; 4468 unsigned int val = AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN;
@@ -4599,10 +4547,28 @@ static void stac92xx_report_jack(struct hda_codec *codec, hda_nid_t nid)
4599 } 4547 }
4600} 4548}
4601 4549
4602static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid, 4550static void stac92xx_mic_detect(struct hda_codec *codec)
4603 unsigned char type)
4604{ 4551{
4605 struct sigmatel_event *event = stac_get_event(codec, nid, type); 4552 struct sigmatel_spec *spec = codec->spec;
4553 struct sigmatel_mic_route *mic;
4554
4555 if (get_pin_presence(codec, spec->ext_mic.pin))
4556 mic = &spec->ext_mic;
4557 else
4558 mic = &spec->int_mic;
4559 if (mic->dmux_idx)
4560 snd_hda_codec_write_cache(codec, spec->dmux_nids[0], 0,
4561 AC_VERB_SET_CONNECT_SEL,
4562 mic->dmux_idx);
4563 else
4564 snd_hda_codec_write_cache(codec, spec->mux_nids[0], 0,
4565 AC_VERB_SET_CONNECT_SEL,
4566 mic->mux_idx);
4567}
4568
4569static void stac_issue_unsol_event(struct hda_codec *codec, hda_nid_t nid)
4570{
4571 struct sigmatel_event *event = stac_get_event(codec, nid);
4606 if (!event) 4572 if (!event)
4607 return; 4573 return;
4608 codec->patch_ops.unsol_event(codec, (unsigned)event->tag << 26); 4574 codec->patch_ops.unsol_event(codec, (unsigned)event->tag << 26);
@@ -4621,8 +4587,18 @@ static void stac92xx_unsol_event(struct hda_codec *codec, unsigned int res)
4621 4587
4622 switch (event->type) { 4588 switch (event->type) {
4623 case STAC_HP_EVENT: 4589 case STAC_HP_EVENT:
4590 case STAC_LO_EVENT:
4624 stac92xx_hp_detect(codec); 4591 stac92xx_hp_detect(codec);
4625 /* fallthru */ 4592 break;
4593 case STAC_MIC_EVENT:
4594 stac92xx_mic_detect(codec);
4595 break;
4596 }
4597
4598 switch (event->type) {
4599 case STAC_HP_EVENT:
4600 case STAC_LO_EVENT:
4601 case STAC_MIC_EVENT:
4626 case STAC_INSERT_EVENT: 4602 case STAC_INSERT_EVENT:
4627 case STAC_PWR_EVENT: 4603 case STAC_PWR_EVENT:
4628 if (spec->num_pwrs > 0) 4604 if (spec->num_pwrs > 0)
@@ -4713,8 +4689,7 @@ static int stac92xx_resume(struct hda_codec *codec)
4713 snd_hda_codec_resume_cache(codec); 4689 snd_hda_codec_resume_cache(codec);
4714 /* fake event to set up pins again to override cached values */ 4690 /* fake event to set up pins again to override cached values */
4715 if (spec->hp_detect) 4691 if (spec->hp_detect)
4716 stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0], 4692 stac_issue_unsol_event(codec, spec->autocfg.hp_pins[0]);
4717 STAC_HP_EVENT);
4718 return 0; 4693 return 0;
4719} 4694}
4720 4695
@@ -4754,6 +4729,19 @@ static int stac92xx_hp_check_power_status(struct hda_codec *codec,
4754static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state) 4729static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state)
4755{ 4730{
4756 struct sigmatel_spec *spec = codec->spec; 4731 struct sigmatel_spec *spec = codec->spec;
4732 int i;
4733 hda_nid_t nid;
4734
4735 /* reset each pin before powering down DAC/ADC to avoid click noise */
4736 nid = codec->start_nid;
4737 for (i = 0; i < codec->num_nodes; i++, nid++) {
4738 unsigned int wcaps = get_wcaps(codec, nid);
4739 unsigned int wid_type = get_wcaps_type(wcaps);
4740 if (wid_type == AC_WID_PIN)
4741 snd_hda_codec_read(codec, nid, 0,
4742 AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
4743 }
4744
4757 if (spec->eapd_mask) 4745 if (spec->eapd_mask)
4758 stac_gpio_set(codec, spec->gpio_mask, 4746 stac_gpio_set(codec, spec->gpio_mask,
4759 spec->gpio_dir, spec->gpio_data & 4747 spec->gpio_dir, spec->gpio_data &
@@ -4790,7 +4778,8 @@ static int patch_stac9200(struct hda_codec *codec)
4790 stac9200_models, 4778 stac9200_models,
4791 stac9200_cfg_tbl); 4779 stac9200_cfg_tbl);
4792 if (spec->board_config < 0) 4780 if (spec->board_config < 0)
4793 snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9200, using BIOS defaults\n"); 4781 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
4782 codec->chip_name);
4794 else 4783 else
4795 stac92xx_set_config_regs(codec, 4784 stac92xx_set_config_regs(codec,
4796 stac9200_brd_tbl[spec->board_config]); 4785 stac9200_brd_tbl[spec->board_config]);
@@ -4862,8 +4851,8 @@ static int patch_stac925x(struct hda_codec *codec)
4862 stac925x_cfg_tbl); 4851 stac925x_cfg_tbl);
4863 again: 4852 again:
4864 if (spec->board_config < 0) 4853 if (spec->board_config < 0)
4865 snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC925x," 4854 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
4866 "using BIOS defaults\n"); 4855 codec->chip_name);
4867 else 4856 else
4868 stac92xx_set_config_regs(codec, 4857 stac92xx_set_config_regs(codec,
4869 stac925x_brd_tbl[spec->board_config]); 4858 stac925x_brd_tbl[spec->board_config]);
@@ -4893,6 +4882,9 @@ static int patch_stac925x(struct hda_codec *codec)
4893 4882
4894 spec->init = stac925x_core_init; 4883 spec->init = stac925x_core_init;
4895 spec->mixer = stac925x_mixer; 4884 spec->mixer = stac925x_mixer;
4885 spec->num_caps = 1;
4886 spec->capvols = stac925x_capvols;
4887 spec->capsws = stac925x_capsws;
4896 4888
4897 err = stac92xx_parse_auto_config(codec, 0x8, 0x7); 4889 err = stac92xx_parse_auto_config(codec, 0x8, 0x7);
4898 if (!err) { 4890 if (!err) {
@@ -4914,16 +4906,6 @@ static int patch_stac925x(struct hda_codec *codec)
4914 return 0; 4906 return 0;
4915} 4907}
4916 4908
4917static struct hda_input_mux stac92hd73xx_dmux = {
4918 .num_items = 4,
4919 .items = {
4920 { "Analog Inputs", 0x0b },
4921 { "Digital Mic 1", 0x09 },
4922 { "Digital Mic 2", 0x0a },
4923 { "CD", 0x08 },
4924 }
4925};
4926
4927static int patch_stac92hd73xx(struct hda_codec *codec) 4909static int patch_stac92hd73xx(struct hda_codec *codec)
4928{ 4910{
4929 struct sigmatel_spec *spec; 4911 struct sigmatel_spec *spec;
@@ -4943,10 +4925,16 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
4943 STAC_92HD73XX_MODELS, 4925 STAC_92HD73XX_MODELS,
4944 stac92hd73xx_models, 4926 stac92hd73xx_models,
4945 stac92hd73xx_cfg_tbl); 4927 stac92hd73xx_cfg_tbl);
4928 /* check codec subsystem id if not found */
4929 if (spec->board_config < 0)
4930 spec->board_config =
4931 snd_hda_check_board_codec_sid_config(codec,
4932 STAC_92HD73XX_MODELS, stac92hd73xx_models,
4933 stac92hd73xx_codec_id_cfg_tbl);
4946again: 4934again:
4947 if (spec->board_config < 0) 4935 if (spec->board_config < 0)
4948 snd_printdd(KERN_INFO "hda_codec: Unknown model for" 4936 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
4949 " STAC92HD73XX, using BIOS defaults\n"); 4937 codec->chip_name);
4950 else 4938 else
4951 stac92xx_set_config_regs(codec, 4939 stac92xx_set_config_regs(codec,
4952 stac92hd73xx_brd_tbl[spec->board_config]); 4940 stac92hd73xx_brd_tbl[spec->board_config]);
@@ -4959,20 +4947,15 @@ again:
4959 "number of channels defaulting to DAC count\n"); 4947 "number of channels defaulting to DAC count\n");
4960 num_dacs = STAC92HD73_DAC_COUNT; 4948 num_dacs = STAC92HD73_DAC_COUNT;
4961 } 4949 }
4950 spec->init = stac92hd73xx_core_init;
4962 switch (num_dacs) { 4951 switch (num_dacs) {
4963 case 0x3: /* 6 Channel */ 4952 case 0x3: /* 6 Channel */
4964 spec->mixer = stac92hd73xx_6ch_mixer;
4965 spec->init = stac92hd73xx_6ch_core_init;
4966 spec->aloopback_ctl = stac92hd73xx_6ch_loopback; 4953 spec->aloopback_ctl = stac92hd73xx_6ch_loopback;
4967 break; 4954 break;
4968 case 0x4: /* 8 Channel */ 4955 case 0x4: /* 8 Channel */
4969 spec->mixer = stac92hd73xx_8ch_mixer;
4970 spec->init = stac92hd73xx_8ch_core_init;
4971 spec->aloopback_ctl = stac92hd73xx_8ch_loopback; 4956 spec->aloopback_ctl = stac92hd73xx_8ch_loopback;
4972 break; 4957 break;
4973 case 0x5: /* 10 Channel */ 4958 case 0x5: /* 10 Channel */
4974 spec->mixer = stac92hd73xx_10ch_mixer;
4975 spec->init = stac92hd73xx_10ch_core_init;
4976 spec->aloopback_ctl = stac92hd73xx_10ch_loopback; 4959 spec->aloopback_ctl = stac92hd73xx_10ch_loopback;
4977 break; 4960 break;
4978 } 4961 }
@@ -4987,14 +4970,14 @@ again:
4987 spec->dmic_nids = stac92hd73xx_dmic_nids; 4970 spec->dmic_nids = stac92hd73xx_dmic_nids;
4988 spec->dmux_nids = stac92hd73xx_dmux_nids; 4971 spec->dmux_nids = stac92hd73xx_dmux_nids;
4989 spec->smux_nids = stac92hd73xx_smux_nids; 4972 spec->smux_nids = stac92hd73xx_smux_nids;
4990 spec->amp_nids = stac92hd73xx_amp_nids;
4991 spec->num_amps = ARRAY_SIZE(stac92hd73xx_amp_nids);
4992 4973
4993 spec->num_muxes = ARRAY_SIZE(stac92hd73xx_mux_nids); 4974 spec->num_muxes = ARRAY_SIZE(stac92hd73xx_mux_nids);
4994 spec->num_adcs = ARRAY_SIZE(stac92hd73xx_adc_nids); 4975 spec->num_adcs = ARRAY_SIZE(stac92hd73xx_adc_nids);
4995 spec->num_dmuxes = ARRAY_SIZE(stac92hd73xx_dmux_nids); 4976 spec->num_dmuxes = ARRAY_SIZE(stac92hd73xx_dmux_nids);
4996 memcpy(&spec->private_dimux, &stac92hd73xx_dmux, 4977
4997 sizeof(stac92hd73xx_dmux)); 4978 spec->num_caps = STAC92HD73XX_NUM_CAPS;
4979 spec->capvols = stac92hd73xx_capvols;
4980 spec->capsws = stac92hd73xx_capsws;
4998 4981
4999 switch (spec->board_config) { 4982 switch (spec->board_config) {
5000 case STAC_DELL_EQ: 4983 case STAC_DELL_EQ:
@@ -5004,43 +4987,40 @@ again:
5004 case STAC_DELL_M6_DMIC: 4987 case STAC_DELL_M6_DMIC:
5005 case STAC_DELL_M6_BOTH: 4988 case STAC_DELL_M6_BOTH:
5006 spec->num_smuxes = 0; 4989 spec->num_smuxes = 0;
5007 spec->mixer = &stac92hd73xx_6ch_mixer[DELL_M6_MIXER];
5008 spec->amp_nids = &stac92hd73xx_amp_nids[DELL_M6_AMP];
5009 spec->eapd_switch = 0; 4990 spec->eapd_switch = 0;
5010 spec->num_amps = 1;
5011 4991
5012 if (spec->board_config != STAC_DELL_EQ)
5013 spec->init = dell_m6_core_init;
5014 switch (spec->board_config) { 4992 switch (spec->board_config) {
5015 case STAC_DELL_M6_AMIC: /* Analog Mics */ 4993 case STAC_DELL_M6_AMIC: /* Analog Mics */
5016 snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); 4994 snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170);
5017 spec->num_dmics = 0; 4995 spec->num_dmics = 0;
5018 spec->private_dimux.num_items = 1;
5019 break; 4996 break;
5020 case STAC_DELL_M6_DMIC: /* Digital Mics */ 4997 case STAC_DELL_M6_DMIC: /* Digital Mics */
5021 snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); 4998 snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160);
5022 spec->num_dmics = 1; 4999 spec->num_dmics = 1;
5023 spec->private_dimux.num_items = 2;
5024 break; 5000 break;
5025 case STAC_DELL_M6_BOTH: /* Both */ 5001 case STAC_DELL_M6_BOTH: /* Both */
5026 snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170); 5002 snd_hda_codec_set_pincfg(codec, 0x0b, 0x90A70170);
5027 snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160); 5003 snd_hda_codec_set_pincfg(codec, 0x13, 0x90A60160);
5028 spec->num_dmics = 1; 5004 spec->num_dmics = 1;
5029 spec->private_dimux.num_items = 2;
5030 break; 5005 break;
5031 } 5006 }
5032 break; 5007 break;
5008 case STAC_ALIENWARE_M17X:
5009 spec->num_dmics = STAC92HD73XX_NUM_DMICS;
5010 spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids);
5011 spec->eapd_switch = 0;
5012 break;
5033 default: 5013 default:
5034 spec->num_dmics = STAC92HD73XX_NUM_DMICS; 5014 spec->num_dmics = STAC92HD73XX_NUM_DMICS;
5035 spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids); 5015 spec->num_smuxes = ARRAY_SIZE(stac92hd73xx_smux_nids);
5036 spec->eapd_switch = 1; 5016 spec->eapd_switch = 1;
5017 break;
5037 } 5018 }
5038 if (spec->board_config > STAC_92HD73XX_REF) { 5019 if (spec->board_config > STAC_92HD73XX_REF) {
5039 /* GPIO0 High = Enable EAPD */ 5020 /* GPIO0 High = Enable EAPD */
5040 spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1; 5021 spec->eapd_mask = spec->gpio_mask = spec->gpio_dir = 0x1;
5041 spec->gpio_data = 0x01; 5022 spec->gpio_data = 0x01;
5042 } 5023 }
5043 spec->dinput_mux = &spec->private_dimux;
5044 5024
5045 spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids); 5025 spec->num_pwrs = ARRAY_SIZE(stac92hd73xx_pwr_nids);
5046 spec->pwr_nids = stac92hd73xx_pwr_nids; 5026 spec->pwr_nids = stac92hd73xx_pwr_nids;
@@ -5072,15 +5052,6 @@ again:
5072 return 0; 5052 return 0;
5073} 5053}
5074 5054
5075static struct hda_input_mux stac92hd83xxx_dmux = {
5076 .num_items = 3,
5077 .items = {
5078 { "Analog Inputs", 0x03 },
5079 { "Digital Mic 1", 0x04 },
5080 { "Digital Mic 2", 0x05 },
5081 }
5082};
5083
5084static int patch_stac92hd83xxx(struct hda_codec *codec) 5055static int patch_stac92hd83xxx(struct hda_codec *codec)
5085{ 5056{
5086 struct sigmatel_spec *spec; 5057 struct sigmatel_spec *spec;
@@ -5097,32 +5068,30 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
5097 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; 5068 codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
5098 spec->mono_nid = 0x19; 5069 spec->mono_nid = 0x19;
5099 spec->digbeep_nid = 0x21; 5070 spec->digbeep_nid = 0x21;
5100 spec->dmic_nids = stac92hd83xxx_dmic_nids; 5071 spec->mux_nids = stac92hd83xxx_mux_nids;
5101 spec->dmux_nids = stac92hd83xxx_dmux_nids; 5072 spec->num_muxes = ARRAY_SIZE(stac92hd83xxx_mux_nids);
5102 spec->adc_nids = stac92hd83xxx_adc_nids; 5073 spec->adc_nids = stac92hd83xxx_adc_nids;
5074 spec->num_adcs = ARRAY_SIZE(stac92hd83xxx_adc_nids);
5103 spec->pwr_nids = stac92hd83xxx_pwr_nids; 5075 spec->pwr_nids = stac92hd83xxx_pwr_nids;
5104 spec->amp_nids = stac92hd83xxx_amp_nids;
5105 spec->pwr_mapping = stac92hd83xxx_pwr_mapping; 5076 spec->pwr_mapping = stac92hd83xxx_pwr_mapping;
5106 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids); 5077 spec->num_pwrs = ARRAY_SIZE(stac92hd83xxx_pwr_nids);
5107 spec->multiout.dac_nids = spec->dac_nids; 5078 spec->multiout.dac_nids = spec->dac_nids;
5108 5079
5109 spec->init = stac92hd83xxx_core_init; 5080 spec->init = stac92hd83xxx_core_init;
5110 spec->mixer = stac92hd83xxx_mixer;
5111 spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids); 5081 spec->num_pins = ARRAY_SIZE(stac92hd83xxx_pin_nids);
5112 spec->num_dmuxes = ARRAY_SIZE(stac92hd83xxx_dmux_nids);
5113 spec->num_adcs = ARRAY_SIZE(stac92hd83xxx_adc_nids);
5114 spec->num_amps = ARRAY_SIZE(stac92hd83xxx_amp_nids);
5115 spec->num_dmics = STAC92HD83XXX_NUM_DMICS;
5116 spec->dinput_mux = &stac92hd83xxx_dmux;
5117 spec->pin_nids = stac92hd83xxx_pin_nids; 5082 spec->pin_nids = stac92hd83xxx_pin_nids;
5083 spec->num_caps = STAC92HD83XXX_NUM_CAPS;
5084 spec->capvols = stac92hd83xxx_capvols;
5085 spec->capsws = stac92hd83xxx_capsws;
5086
5118 spec->board_config = snd_hda_check_board_config(codec, 5087 spec->board_config = snd_hda_check_board_config(codec,
5119 STAC_92HD83XXX_MODELS, 5088 STAC_92HD83XXX_MODELS,
5120 stac92hd83xxx_models, 5089 stac92hd83xxx_models,
5121 stac92hd83xxx_cfg_tbl); 5090 stac92hd83xxx_cfg_tbl);
5122again: 5091again:
5123 if (spec->board_config < 0) 5092 if (spec->board_config < 0)
5124 snd_printdd(KERN_INFO "hda_codec: Unknown model for" 5093 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5125 " STAC92HD83XXX, using BIOS defaults\n"); 5094 codec->chip_name);
5126 else 5095 else
5127 stac92xx_set_config_regs(codec, 5096 stac92xx_set_config_regs(codec,
5128 stac92hd83xxx_brd_tbl[spec->board_config]); 5097 stac92hd83xxx_brd_tbl[spec->board_config]);
@@ -5164,6 +5133,8 @@ again:
5164 5133
5165 num_dacs = snd_hda_get_connections(codec, nid, 5134 num_dacs = snd_hda_get_connections(codec, nid,
5166 conn, STAC92HD83_DAC_COUNT + 1) - 1; 5135 conn, STAC92HD83_DAC_COUNT + 1) - 1;
5136 if (num_dacs < 0)
5137 num_dacs = STAC92HD83_DAC_COUNT;
5167 5138
5168 /* set port X to select the last DAC 5139 /* set port X to select the last DAC
5169 */ 5140 */
@@ -5177,25 +5148,6 @@ again:
5177 return 0; 5148 return 0;
5178} 5149}
5179 5150
5180static struct hda_input_mux stac92hd71bxx_dmux_nomixer = {
5181 .num_items = 3,
5182 .items = {
5183 { "Analog Inputs", 0x00 },
5184 { "Digital Mic 1", 0x02 },
5185 { "Digital Mic 2", 0x03 },
5186 }
5187};
5188
5189static struct hda_input_mux stac92hd71bxx_dmux_amixer = {
5190 .num_items = 4,
5191 .items = {
5192 { "Analog Inputs", 0x00 },
5193 { "Mixer", 0x01 },
5194 { "Digital Mic 1", 0x02 },
5195 { "Digital Mic 2", 0x03 },
5196 }
5197};
5198
5199/* get the pin connection (fixed, none, etc) */ 5151/* get the pin connection (fixed, none, etc) */
5200static unsigned int stac_get_defcfg_connect(struct hda_codec *codec, int idx) 5152static unsigned int stac_get_defcfg_connect(struct hda_codec *codec, int idx)
5201{ 5153{
@@ -5256,7 +5208,6 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
5256 struct sigmatel_spec *spec; 5208 struct sigmatel_spec *spec;
5257 struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init; 5209 struct hda_verb *unmute_init = stac92hd71bxx_unmute_core_init;
5258 int err = 0; 5210 int err = 0;
5259 unsigned int ndmic_nids = 0;
5260 5211
5261 spec = kzalloc(sizeof(*spec), GFP_KERNEL); 5212 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
5262 if (spec == NULL) 5213 if (spec == NULL)
@@ -5285,8 +5236,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
5285 stac92hd71bxx_cfg_tbl); 5236 stac92hd71bxx_cfg_tbl);
5286again: 5237again:
5287 if (spec->board_config < 0) 5238 if (spec->board_config < 0)
5288 snd_printdd(KERN_INFO "hda_codec: Unknown model for" 5239 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5289 " STAC92HD71BXX, using BIOS defaults\n"); 5240 codec->chip_name);
5290 else 5241 else
5291 stac92xx_set_config_regs(codec, 5242 stac92xx_set_config_regs(codec,
5292 stac92hd71bxx_brd_tbl[spec->board_config]); 5243 stac92hd71bxx_brd_tbl[spec->board_config]);
@@ -5301,6 +5252,10 @@ again:
5301 spec->dmic_nids = stac92hd71bxx_dmic_nids; 5252 spec->dmic_nids = stac92hd71bxx_dmic_nids;
5302 spec->dmux_nids = stac92hd71bxx_dmux_nids; 5253 spec->dmux_nids = stac92hd71bxx_dmux_nids;
5303 5254
5255 spec->num_caps = STAC92HD71BXX_NUM_CAPS;
5256 spec->capvols = stac92hd71bxx_capvols;
5257 spec->capsws = stac92hd71bxx_capsws;
5258
5304 switch (codec->vendor_id) { 5259 switch (codec->vendor_id) {
5305 case 0x111d76b6: /* 4 Port without Analog Mixer */ 5260 case 0x111d76b6: /* 4 Port without Analog Mixer */
5306 case 0x111d76b7: 5261 case 0x111d76b7:
@@ -5308,24 +5263,13 @@ again:
5308 /* fallthru */ 5263 /* fallthru */
5309 case 0x111d76b4: /* 6 Port without Analog Mixer */ 5264 case 0x111d76b4: /* 6 Port without Analog Mixer */
5310 case 0x111d76b5: 5265 case 0x111d76b5:
5311 memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_nomixer,
5312 sizeof(stac92hd71bxx_dmux_nomixer));
5313 spec->mixer = stac92hd71bxx_mixer;
5314 spec->init = stac92hd71bxx_core_init; 5266 spec->init = stac92hd71bxx_core_init;
5315 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; 5267 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
5316 spec->num_dmics = stac92hd71bxx_connected_ports(codec, 5268 spec->num_dmics = stac92hd71bxx_connected_ports(codec,
5317 stac92hd71bxx_dmic_nids, 5269 stac92hd71bxx_dmic_nids,
5318 STAC92HD71BXX_NUM_DMICS); 5270 STAC92HD71BXX_NUM_DMICS);
5319 if (spec->num_dmics) {
5320 spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
5321 spec->dinput_mux = &spec->private_dimux;
5322 ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 1;
5323 }
5324 break; 5271 break;
5325 case 0x111d7608: /* 5 Port with Analog Mixer */ 5272 case 0x111d7608: /* 5 Port with Analog Mixer */
5326 memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_amixer,
5327 sizeof(stac92hd71bxx_dmux_amixer));
5328 spec->private_dimux.num_items--;
5329 switch (spec->board_config) { 5273 switch (spec->board_config) {
5330 case STAC_HP_M4: 5274 case STAC_HP_M4:
5331 /* Enable VREF power saving on GPIO1 detect */ 5275 /* Enable VREF power saving on GPIO1 detect */
@@ -5347,11 +5291,8 @@ again:
5347 5291
5348 /* no output amps */ 5292 /* no output amps */
5349 spec->num_pwrs = 0; 5293 spec->num_pwrs = 0;
5350 spec->mixer = stac92hd71bxx_analog_mixer;
5351 spec->dinput_mux = &spec->private_dimux;
5352
5353 /* disable VSW */ 5294 /* disable VSW */
5354 spec->init = &stac92hd71bxx_analog_core_init[HD_DISABLE_PORTF]; 5295 spec->init = stac92hd71bxx_core_init;
5355 unmute_init++; 5296 unmute_init++;
5356 snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); 5297 snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
5357 snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); 5298 snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
@@ -5359,8 +5300,6 @@ again:
5359 spec->num_dmics = stac92hd71bxx_connected_ports(codec, 5300 spec->num_dmics = stac92hd71bxx_connected_ports(codec,
5360 stac92hd71bxx_dmic_nids, 5301 stac92hd71bxx_dmic_nids,
5361 STAC92HD71BXX_NUM_DMICS - 1); 5302 STAC92HD71BXX_NUM_DMICS - 1);
5362 spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
5363 ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 2;
5364 break; 5303 break;
5365 case 0x111d7603: /* 6 Port with Analog Mixer */ 5304 case 0x111d7603: /* 6 Port with Analog Mixer */
5366 if ((codec->revision_id & 0xf) == 1) 5305 if ((codec->revision_id & 0xf) == 1)
@@ -5370,17 +5309,12 @@ again:
5370 spec->num_pwrs = 0; 5309 spec->num_pwrs = 0;
5371 /* fallthru */ 5310 /* fallthru */
5372 default: 5311 default:
5373 memcpy(&spec->private_dimux, &stac92hd71bxx_dmux_amixer, 5312 spec->init = stac92hd71bxx_core_init;
5374 sizeof(stac92hd71bxx_dmux_amixer));
5375 spec->dinput_mux = &spec->private_dimux;
5376 spec->mixer = stac92hd71bxx_analog_mixer;
5377 spec->init = stac92hd71bxx_analog_core_init;
5378 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; 5313 codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
5379 spec->num_dmics = stac92hd71bxx_connected_ports(codec, 5314 spec->num_dmics = stac92hd71bxx_connected_ports(codec,
5380 stac92hd71bxx_dmic_nids, 5315 stac92hd71bxx_dmic_nids,
5381 STAC92HD71BXX_NUM_DMICS); 5316 STAC92HD71BXX_NUM_DMICS);
5382 spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids); 5317 break;
5383 ndmic_nids = ARRAY_SIZE(stac92hd71bxx_dmic_nids) - 1;
5384 } 5318 }
5385 5319
5386 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) 5320 if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
@@ -5408,6 +5342,7 @@ again:
5408 5342
5409 spec->num_muxes = ARRAY_SIZE(stac92hd71bxx_mux_nids); 5343 spec->num_muxes = ARRAY_SIZE(stac92hd71bxx_mux_nids);
5410 spec->num_adcs = ARRAY_SIZE(stac92hd71bxx_adc_nids); 5344 spec->num_adcs = ARRAY_SIZE(stac92hd71bxx_adc_nids);
5345 spec->num_dmuxes = ARRAY_SIZE(stac92hd71bxx_dmux_nids);
5411 spec->num_smuxes = stac92hd71bxx_connected_smuxes(codec, 0x1e); 5346 spec->num_smuxes = stac92hd71bxx_connected_smuxes(codec, 0x1e);
5412 5347
5413 switch (spec->board_config) { 5348 switch (spec->board_config) {
@@ -5462,8 +5397,6 @@ again:
5462#endif 5397#endif
5463 5398
5464 spec->multiout.dac_nids = spec->dac_nids; 5399 spec->multiout.dac_nids = spec->dac_nids;
5465 if (spec->dinput_mux)
5466 spec->private_dimux.num_items += spec->num_dmics - ndmic_nids;
5467 5400
5468 err = stac92xx_parse_auto_config(codec, 0x21, 0); 5401 err = stac92xx_parse_auto_config(codec, 0x21, 0);
5469 if (!err) { 5402 if (!err) {
@@ -5541,8 +5474,8 @@ static int patch_stac922x(struct hda_codec *codec)
5541 5474
5542 again: 5475 again:
5543 if (spec->board_config < 0) 5476 if (spec->board_config < 0)
5544 snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC922x, " 5477 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5545 "using BIOS defaults\n"); 5478 codec->chip_name);
5546 else 5479 else
5547 stac92xx_set_config_regs(codec, 5480 stac92xx_set_config_regs(codec,
5548 stac922x_brd_tbl[spec->board_config]); 5481 stac922x_brd_tbl[spec->board_config]);
@@ -5555,7 +5488,10 @@ static int patch_stac922x(struct hda_codec *codec)
5555 spec->num_pwrs = 0; 5488 spec->num_pwrs = 0;
5556 5489
5557 spec->init = stac922x_core_init; 5490 spec->init = stac922x_core_init;
5558 spec->mixer = stac922x_mixer; 5491
5492 spec->num_caps = STAC922X_NUM_CAPS;
5493 spec->capvols = stac922x_capvols;
5494 spec->capsws = stac922x_capsws;
5559 5495
5560 spec->multiout.dac_nids = spec->dac_nids; 5496 spec->multiout.dac_nids = spec->dac_nids;
5561 5497
@@ -5604,8 +5540,8 @@ static int patch_stac927x(struct hda_codec *codec)
5604 stac927x_cfg_tbl); 5540 stac927x_cfg_tbl);
5605 again: 5541 again:
5606 if (spec->board_config < 0) 5542 if (spec->board_config < 0)
5607 snd_printdd(KERN_INFO "hda_codec: Unknown model for" 5543 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5608 "STAC927x, using BIOS defaults\n"); 5544 codec->chip_name);
5609 else 5545 else
5610 stac92xx_set_config_regs(codec, 5546 stac92xx_set_config_regs(codec,
5611 stac927x_brd_tbl[spec->board_config]); 5547 stac927x_brd_tbl[spec->board_config]);
@@ -5630,7 +5566,6 @@ static int patch_stac927x(struct hda_codec *codec)
5630 spec->num_dmics = 0; 5566 spec->num_dmics = 0;
5631 5567
5632 spec->init = d965_core_init; 5568 spec->init = d965_core_init;
5633 spec->mixer = stac927x_mixer;
5634 break; 5569 break;
5635 case STAC_DELL_BIOS: 5570 case STAC_DELL_BIOS:
5636 switch (codec->subsystem_id) { 5571 switch (codec->subsystem_id) {
@@ -5662,7 +5597,6 @@ static int patch_stac927x(struct hda_codec *codec)
5662 spec->num_dmics = STAC927X_NUM_DMICS; 5597 spec->num_dmics = STAC927X_NUM_DMICS;
5663 5598
5664 spec->init = d965_core_init; 5599 spec->init = d965_core_init;
5665 spec->mixer = stac927x_mixer;
5666 spec->dmux_nids = stac927x_dmux_nids; 5600 spec->dmux_nids = stac927x_dmux_nids;
5667 spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids); 5601 spec->num_dmuxes = ARRAY_SIZE(stac927x_dmux_nids);
5668 break; 5602 break;
@@ -5675,9 +5609,12 @@ static int patch_stac927x(struct hda_codec *codec)
5675 spec->num_dmics = 0; 5609 spec->num_dmics = 0;
5676 5610
5677 spec->init = stac927x_core_init; 5611 spec->init = stac927x_core_init;
5678 spec->mixer = stac927x_mixer;
5679 } 5612 }
5680 5613
5614 spec->num_caps = STAC927X_NUM_CAPS;
5615 spec->capvols = stac927x_capvols;
5616 spec->capsws = stac927x_capsws;
5617
5681 spec->num_pwrs = 0; 5618 spec->num_pwrs = 0;
5682 spec->aloopback_ctl = stac927x_loopback; 5619 spec->aloopback_ctl = stac927x_loopback;
5683 spec->aloopback_mask = 0x40; 5620 spec->aloopback_mask = 0x40;
@@ -5739,7 +5676,8 @@ static int patch_stac9205(struct hda_codec *codec)
5739 stac9205_cfg_tbl); 5676 stac9205_cfg_tbl);
5740 again: 5677 again:
5741 if (spec->board_config < 0) 5678 if (spec->board_config < 0)
5742 snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9205, using BIOS defaults\n"); 5679 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5680 codec->chip_name);
5743 else 5681 else
5744 stac92xx_set_config_regs(codec, 5682 stac92xx_set_config_regs(codec,
5745 stac9205_brd_tbl[spec->board_config]); 5683 stac9205_brd_tbl[spec->board_config]);
@@ -5758,9 +5696,12 @@ static int patch_stac9205(struct hda_codec *codec)
5758 spec->num_pwrs = 0; 5696 spec->num_pwrs = 0;
5759 5697
5760 spec->init = stac9205_core_init; 5698 spec->init = stac9205_core_init;
5761 spec->mixer = stac9205_mixer;
5762 spec->aloopback_ctl = stac9205_loopback; 5699 spec->aloopback_ctl = stac9205_loopback;
5763 5700
5701 spec->num_caps = STAC9205_NUM_CAPS;
5702 spec->capvols = stac9205_capvols;
5703 spec->capsws = stac9205_capsws;
5704
5764 spec->aloopback_mask = 0x40; 5705 spec->aloopback_mask = 0x40;
5765 spec->aloopback_shift = 0; 5706 spec->aloopback_shift = 0;
5766 /* Turn on/off EAPD per HP plugging */ 5707 /* Turn on/off EAPD per HP plugging */
@@ -5835,12 +5776,6 @@ static struct hda_verb stac9872_core_init[] = {
5835 {} 5776 {}
5836}; 5777};
5837 5778
5838static struct snd_kcontrol_new stac9872_mixer[] = {
5839 HDA_CODEC_VOLUME("Capture Volume", 0x09, 0, HDA_INPUT),
5840 HDA_CODEC_MUTE("Capture Switch", 0x09, 0, HDA_INPUT),
5841 { } /* end */
5842};
5843
5844static hda_nid_t stac9872_pin_nids[] = { 5779static hda_nid_t stac9872_pin_nids[] = {
5845 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 5780 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
5846 0x11, 0x13, 0x14, 5781 0x11, 0x13, 0x14,
@@ -5854,6 +5789,11 @@ static hda_nid_t stac9872_mux_nids[] = {
5854 0x15 5789 0x15
5855}; 5790};
5856 5791
5792static unsigned long stac9872_capvols[] = {
5793 HDA_COMPOSE_AMP_VAL(0x09, 3, 0, HDA_INPUT),
5794};
5795#define stac9872_capsws stac9872_capvols
5796
5857static unsigned int stac9872_vaio_pin_configs[9] = { 5797static unsigned int stac9872_vaio_pin_configs[9] = {
5858 0x03211020, 0x411111f0, 0x411111f0, 0x03a15030, 5798 0x03211020, 0x411111f0, 0x411111f0, 0x03a15030,
5859 0x411111f0, 0x90170110, 0x411111f0, 0x411111f0, 5799 0x411111f0, 0x90170110, 0x411111f0, 0x411111f0,
@@ -5891,8 +5831,8 @@ static int patch_stac9872(struct hda_codec *codec)
5891 stac9872_models, 5831 stac9872_models,
5892 stac9872_cfg_tbl); 5832 stac9872_cfg_tbl);
5893 if (spec->board_config < 0) 5833 if (spec->board_config < 0)
5894 snd_printdd(KERN_INFO "hda_codec: Unknown model for STAC9872, " 5834 snd_printdd(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
5895 "using BIOS defaults\n"); 5835 codec->chip_name);
5896 else 5836 else
5897 stac92xx_set_config_regs(codec, 5837 stac92xx_set_config_regs(codec,
5898 stac9872_brd_tbl[spec->board_config]); 5838 stac9872_brd_tbl[spec->board_config]);
@@ -5902,8 +5842,10 @@ static int patch_stac9872(struct hda_codec *codec)
5902 spec->adc_nids = stac9872_adc_nids; 5842 spec->adc_nids = stac9872_adc_nids;
5903 spec->num_muxes = ARRAY_SIZE(stac9872_mux_nids); 5843 spec->num_muxes = ARRAY_SIZE(stac9872_mux_nids);
5904 spec->mux_nids = stac9872_mux_nids; 5844 spec->mux_nids = stac9872_mux_nids;
5905 spec->mixer = stac9872_mixer;
5906 spec->init = stac9872_core_init; 5845 spec->init = stac9872_core_init;
5846 spec->num_caps = 1;
5847 spec->capvols = stac9872_capvols;
5848 spec->capsws = stac9872_capsws;
5907 5849
5908 err = stac92xx_parse_auto_config(codec, 0x10, 0x12); 5850 err = stac92xx_parse_auto_config(codec, 0x10, 0x12);
5909 if (err < 0) { 5851 if (err < 0) {
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index e8f10b10cceb..ee89db90c9b6 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1339,8 +1339,7 @@ static int get_mux_nids(struct hda_codec *codec)
1339 for (i = 0; i < spec->num_adc_nids; i++) { 1339 for (i = 0; i < spec->num_adc_nids; i++) {
1340 nid = spec->adc_nids[i]; 1340 nid = spec->adc_nids[i];
1341 while (nid) { 1341 while (nid) {
1342 type = (get_wcaps(codec, nid) & AC_WCAP_TYPE) 1342 type = get_wcaps_type(get_wcaps(codec, nid));
1343 >> AC_WCAP_TYPE_SHIFT;
1344 if (type == AC_WID_PIN) 1343 if (type == AC_WID_PIN)
1345 break; 1344 break;
1346 n = snd_hda_get_connections(codec, nid, conn, 1345 n = snd_hda_get_connections(codec, nid, conn,
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index adc909ec125c..9da2dae64c5b 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -379,6 +379,15 @@ struct snd_ice1712 {
379 unsigned char (*set_mclk)(struct snd_ice1712 *ice, unsigned int rate); 379 unsigned char (*set_mclk)(struct snd_ice1712 *ice, unsigned int rate);
380 void (*set_spdif_clock)(struct snd_ice1712 *ice); 380 void (*set_spdif_clock)(struct snd_ice1712 *ice);
381 381
382#ifdef CONFIG_PM
383 int (*pm_suspend)(struct snd_ice1712 *);
384 int (*pm_resume)(struct snd_ice1712 *);
385 int pm_suspend_enabled:1;
386 int pm_saved_is_spdif_master:1;
387 unsigned int pm_saved_spdif_ctrl;
388 unsigned char pm_saved_spdif_cfg;
389 unsigned int pm_saved_route;
390#endif
382}; 391};
383 392
384 393
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index cc84a831eb21..af6e00148621 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -560,6 +560,7 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
560 560
561 case SNDRV_PCM_TRIGGER_START: 561 case SNDRV_PCM_TRIGGER_START:
562 case SNDRV_PCM_TRIGGER_STOP: 562 case SNDRV_PCM_TRIGGER_STOP:
563 case SNDRV_PCM_TRIGGER_SUSPEND:
563 spin_lock(&ice->reg_lock); 564 spin_lock(&ice->reg_lock);
564 old = inb(ICEMT1724(ice, DMA_CONTROL)); 565 old = inb(ICEMT1724(ice, DMA_CONTROL));
565 if (cmd == SNDRV_PCM_TRIGGER_START) 566 if (cmd == SNDRV_PCM_TRIGGER_START)
@@ -570,6 +571,10 @@ static int snd_vt1724_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
570 spin_unlock(&ice->reg_lock); 571 spin_unlock(&ice->reg_lock);
571 break; 572 break;
572 573
574 case SNDRV_PCM_TRIGGER_RESUME:
575 /* apps will have to restart stream */
576 break;
577
573 default: 578 default:
574 return -EINVAL; 579 return -EINVAL;
575 } 580 }
@@ -2262,7 +2267,7 @@ static int __devinit snd_vt1724_read_eeprom(struct snd_ice1712 *ice,
2262 2267
2263 2268
2264 2269
2265static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice) 2270static void snd_vt1724_chip_reset(struct snd_ice1712 *ice)
2266{ 2271{
2267 outb(VT1724_RESET , ICEREG1724(ice, CONTROL)); 2272 outb(VT1724_RESET , ICEREG1724(ice, CONTROL));
2268 inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */ 2273 inb(ICEREG1724(ice, CONTROL)); /* pci posting flush */
@@ -2272,7 +2277,7 @@ static void __devinit snd_vt1724_chip_reset(struct snd_ice1712 *ice)
2272 msleep(10); 2277 msleep(10);
2273} 2278}
2274 2279
2275static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice) 2280static int snd_vt1724_chip_init(struct snd_ice1712 *ice)
2276{ 2281{
2277 outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG)); 2282 outb(ice->eeprom.data[ICE_EEP2_SYSCONF], ICEREG1724(ice, SYS_CFG));
2278 outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG)); 2283 outb(ice->eeprom.data[ICE_EEP2_ACLINK], ICEREG1724(ice, AC97_CFG));
@@ -2287,6 +2292,14 @@ static int __devinit snd_vt1724_chip_init(struct snd_ice1712 *ice)
2287 2292
2288 outb(0, ICEREG1724(ice, POWERDOWN)); 2293 outb(0, ICEREG1724(ice, POWERDOWN));
2289 2294
2295 /* MPU_RX and TX irq masks are cleared later dynamically */
2296 outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
2297
2298 /* don't handle FIFO overrun/underruns (just yet),
2299 * since they cause machine lockups
2300 */
2301 outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
2302
2290 return 0; 2303 return 0;
2291} 2304}
2292 2305
@@ -2431,6 +2444,8 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
2431 snd_vt1724_proc_init(ice); 2444 snd_vt1724_proc_init(ice);
2432 synchronize_irq(pci->irq); 2445 synchronize_irq(pci->irq);
2433 2446
2447 card->private_data = ice;
2448
2434 err = pci_request_regions(pci, "ICE1724"); 2449 err = pci_request_regions(pci, "ICE1724");
2435 if (err < 0) { 2450 if (err < 0) {
2436 kfree(ice); 2451 kfree(ice);
@@ -2459,14 +2474,6 @@ static int __devinit snd_vt1724_create(struct snd_card *card,
2459 return -EIO; 2474 return -EIO;
2460 } 2475 }
2461 2476
2462 /* MPU_RX and TX irq masks are cleared later dynamically */
2463 outb(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX , ICEREG1724(ice, IRQMASK));
2464
2465 /* don't handle FIFO overrun/underruns (just yet),
2466 * since they cause machine lockups
2467 */
2468 outb(VT1724_MULTI_FIFO_ERR, ICEMT1724(ice, DMA_INT_MASK));
2469
2470 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops); 2477 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ice, &ops);
2471 if (err < 0) { 2478 if (err < 0) {
2472 snd_vt1724_free(ice); 2479 snd_vt1724_free(ice);
@@ -2650,11 +2657,96 @@ static void __devexit snd_vt1724_remove(struct pci_dev *pci)
2650 pci_set_drvdata(pci, NULL); 2657 pci_set_drvdata(pci, NULL);
2651} 2658}
2652 2659
2660#ifdef CONFIG_PM
2661static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
2662{
2663 struct snd_card *card = pci_get_drvdata(pci);
2664 struct snd_ice1712 *ice = card->private_data;
2665
2666 if (!ice->pm_suspend_enabled)
2667 return 0;
2668
2669 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
2670
2671 snd_pcm_suspend_all(ice->pcm);
2672 snd_pcm_suspend_all(ice->pcm_pro);
2673 snd_pcm_suspend_all(ice->pcm_ds);
2674 snd_ac97_suspend(ice->ac97);
2675
2676 spin_lock_irq(&ice->reg_lock);
2677 ice->pm_saved_is_spdif_master = ice->is_spdif_master(ice);
2678 ice->pm_saved_spdif_ctrl = inw(ICEMT1724(ice, SPDIF_CTRL));
2679 ice->pm_saved_spdif_cfg = inb(ICEREG1724(ice, SPDIF_CFG));
2680 ice->pm_saved_route = inl(ICEMT1724(ice, ROUTE_PLAYBACK));
2681 spin_unlock_irq(&ice->reg_lock);
2682
2683 if (ice->pm_suspend)
2684 ice->pm_suspend(ice);
2685
2686 pci_disable_device(pci);
2687 pci_save_state(pci);
2688 pci_set_power_state(pci, pci_choose_state(pci, state));
2689 return 0;
2690}
2691
2692static int snd_vt1724_resume(struct pci_dev *pci)
2693{
2694 struct snd_card *card = pci_get_drvdata(pci);
2695 struct snd_ice1712 *ice = card->private_data;
2696
2697 if (!ice->pm_suspend_enabled)
2698 return 0;
2699
2700 pci_set_power_state(pci, PCI_D0);
2701 pci_restore_state(pci);
2702
2703 if (pci_enable_device(pci) < 0) {
2704 snd_card_disconnect(card);
2705 return -EIO;
2706 }
2707
2708 pci_set_master(pci);
2709
2710 snd_vt1724_chip_reset(ice);
2711
2712 if (snd_vt1724_chip_init(ice) < 0) {
2713 snd_card_disconnect(card);
2714 return -EIO;
2715 }
2716
2717 if (ice->pm_resume)
2718 ice->pm_resume(ice);
2719
2720 if (ice->pm_saved_is_spdif_master) {
2721 /* switching to external clock via SPDIF */
2722 ice->set_spdif_clock(ice);
2723 } else {
2724 /* internal on-card clock */
2725 snd_vt1724_set_pro_rate(ice, ice->pro_rate_default, 1);
2726 }
2727
2728 update_spdif_bits(ice, ice->pm_saved_spdif_ctrl);
2729
2730 outb(ice->pm_saved_spdif_cfg, ICEREG1724(ice, SPDIF_CFG));
2731 outl(ice->pm_saved_route, ICEMT1724(ice, ROUTE_PLAYBACK));
2732
2733 if (ice->ac97)
2734 snd_ac97_resume(ice->ac97);
2735
2736 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
2737 return 0;
2738}
2739#endif
2740
2653static struct pci_driver driver = { 2741static struct pci_driver driver = {
2654 .name = "ICE1724", 2742 .name = "ICE1724",
2655 .id_table = snd_vt1724_ids, 2743 .id_table = snd_vt1724_ids,
2656 .probe = snd_vt1724_probe, 2744 .probe = snd_vt1724_probe,
2657 .remove = __devexit_p(snd_vt1724_remove), 2745 .remove = __devexit_p(snd_vt1724_remove),
2746#ifdef CONFIG_PM
2747 .suspend = snd_vt1724_suspend,
2748 .resume = snd_vt1724_resume,
2749#endif
2658}; 2750};
2659 2751
2660static int __init alsa_card_ice1724_init(void) 2752static int __init alsa_card_ice1724_init(void)
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 043a93879bd5..c75515f5be6f 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -1077,7 +1077,7 @@ static int __devinit prodigy_hifi_init(struct snd_ice1712 *ice)
1077/* 1077/*
1078 * initialize the chip 1078 * initialize the chip
1079 */ 1079 */
1080static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice) 1080static void ak4396_init(struct snd_ice1712 *ice)
1081{ 1081{
1082 static unsigned short ak4396_inits[] = { 1082 static unsigned short ak4396_inits[] = {
1083 AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */ 1083 AK4396_CTRL1, 0x87, /* I2S Normal Mode, 24 bit */
@@ -1087,9 +1087,37 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
1087 AK4396_RCH_ATT, 0x00, 1087 AK4396_RCH_ATT, 0x00,
1088 }; 1088 };
1089 1089
1090 struct prodigy_hifi_spec *spec;
1091 unsigned int i; 1090 unsigned int i;
1092 1091
1092 /* initialize ak4396 codec */
1093 /* reset codec */
1094 ak4396_write(ice, AK4396_CTRL1, 0x86);
1095 msleep(100);
1096 ak4396_write(ice, AK4396_CTRL1, 0x87);
1097
1098 for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
1099 ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
1100}
1101
1102#ifdef CONFIG_PM
1103static int __devinit prodigy_hd2_resume(struct snd_ice1712 *ice)
1104{
1105 /* initialize ak4396 codec and restore previous mixer volumes */
1106 struct prodigy_hifi_spec *spec = ice->spec;
1107 int i;
1108 mutex_lock(&ice->gpio_mutex);
1109 ak4396_init(ice);
1110 for (i = 0; i < 2; i++)
1111 ak4396_write(ice, AK4396_LCH_ATT + i, spec->vol[i] & 0xff);
1112 mutex_unlock(&ice->gpio_mutex);
1113 return 0;
1114}
1115#endif
1116
1117static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
1118{
1119 struct prodigy_hifi_spec *spec;
1120
1093 ice->vt1720 = 0; 1121 ice->vt1720 = 0;
1094 ice->vt1724 = 1; 1122 ice->vt1724 = 1;
1095 1123
@@ -1112,14 +1140,12 @@ static int __devinit prodigy_hd2_init(struct snd_ice1712 *ice)
1112 return -ENOMEM; 1140 return -ENOMEM;
1113 ice->spec = spec; 1141 ice->spec = spec;
1114 1142
1115 /* initialize ak4396 codec */ 1143#ifdef CONFIG_PM
1116 /* reset codec */ 1144 ice->pm_resume = &prodigy_hd2_resume;
1117 ak4396_write(ice, AK4396_CTRL1, 0x86); 1145 ice->pm_suspend_enabled = 1;
1118 msleep(100); 1146#endif
1119 ak4396_write(ice, AK4396_CTRL1, 0x87); 1147
1120 1148 ak4396_init(ice);
1121 for (i = 0; i < ARRAY_SIZE(ak4396_inits); i += 2)
1122 ak4396_write(ice, ak4396_inits[i], ak4396_inits[i+1]);
1123 1149
1124 return 0; 1150 return 0;
1125} 1151}
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index c1eb923f2ac9..09b2b2a36df5 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -215,17 +215,8 @@ EXPORT_SYMBOL(oxygen_write_spi);
215 215
216void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data) 216void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data)
217{ 217{
218 unsigned long timeout;
219
220 /* should not need more than about 300 us */ 218 /* should not need more than about 300 us */
221 timeout = jiffies + msecs_to_jiffies(1); 219 msleep(1);
222 do {
223 if (!(oxygen_read16(chip, OXYGEN_2WIRE_BUS_STATUS)
224 & OXYGEN_2WIRE_BUSY))
225 break;
226 udelay(1);
227 cond_resched();
228 } while (time_after_eq(timeout, jiffies));
229 220
230 oxygen_write8(chip, OXYGEN_2WIRE_MAP, map); 221 oxygen_write8(chip, OXYGEN_2WIRE_MAP, map);
231 oxygen_write8(chip, OXYGEN_2WIRE_DATA, data); 222 oxygen_write8(chip, OXYGEN_2WIRE_DATA, data);
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index 312251d39696..9a8936e20744 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -260,6 +260,9 @@ oxygen_search_pci_id(struct oxygen *chip, const struct pci_device_id ids[])
260 * chip didn't if the first EEPROM word was overwritten. 260 * chip didn't if the first EEPROM word was overwritten.
261 */ 261 */
262 subdevice = oxygen_read_eeprom(chip, 2); 262 subdevice = oxygen_read_eeprom(chip, 2);
263 /* use default ID if EEPROM is missing */
264 if (subdevice == 0xffff)
265 subdevice = 0x8788;
263 /* 266 /*
264 * We use only the subsystem device ID for searching because it is 267 * We use only the subsystem device ID for searching because it is
265 * unique even without the subsystem vendor ID, which may have been 268 * unique even without the subsystem vendor ID, which may have been
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index 3b5ca70c9d4d..ef2345d82b86 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -469,9 +469,11 @@ static int oxygen_multich_hw_params(struct snd_pcm_substream *substream,
469 oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, 469 oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
470 oxygen_rate(hw_params) | 470 oxygen_rate(hw_params) |
471 chip->model.dac_i2s_format | 471 chip->model.dac_i2s_format |
472 oxygen_i2s_mclk(hw_params) |
472 oxygen_i2s_bits(hw_params), 473 oxygen_i2s_bits(hw_params),
473 OXYGEN_I2S_RATE_MASK | 474 OXYGEN_I2S_RATE_MASK |
474 OXYGEN_I2S_FORMAT_MASK | 475 OXYGEN_I2S_FORMAT_MASK |
476 OXYGEN_I2S_MCLK_MASK |
475 OXYGEN_I2S_BITS_MASK); 477 OXYGEN_I2S_BITS_MASK);
476 oxygen_update_dac_routing(chip); 478 oxygen_update_dac_routing(chip);
477 oxygen_update_spdif_source(chip); 479 oxygen_update_spdif_source(chip);
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 3da5c029f93b..7bb827c7d806 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3294,15 +3294,33 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
3294 char *clock_source; 3294 char *clock_source;
3295 int x; 3295 int x;
3296 3296
3297 if (hdsp_check_for_iobox (hdsp)) { 3297 status = hdsp_read(hdsp, HDSP_statusRegister);
3298 snd_iprintf(buffer, "No I/O box connected.\nPlease connect one and upload firmware.\n"); 3298 status2 = hdsp_read(hdsp, HDSP_status2Register);
3299
3300 snd_iprintf(buffer, "%s (Card #%d)\n", hdsp->card_name,
3301 hdsp->card->number + 1);
3302 snd_iprintf(buffer, "Buffers: capture %p playback %p\n",
3303 hdsp->capture_buffer, hdsp->playback_buffer);
3304 snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
3305 hdsp->irq, hdsp->port, (unsigned long)hdsp->iobase);
3306 snd_iprintf(buffer, "Control register: 0x%x\n", hdsp->control_register);
3307 snd_iprintf(buffer, "Control2 register: 0x%x\n",
3308 hdsp->control2_register);
3309 snd_iprintf(buffer, "Status register: 0x%x\n", status);
3310 snd_iprintf(buffer, "Status2 register: 0x%x\n", status2);
3311
3312 if (hdsp_check_for_iobox(hdsp)) {
3313 snd_iprintf(buffer, "No I/O box connected.\n"
3314 "Please connect one and upload firmware.\n");
3299 return; 3315 return;
3300 } 3316 }
3301 3317
3302 if (hdsp_check_for_firmware(hdsp, 0)) { 3318 if (hdsp_check_for_firmware(hdsp, 0)) {
3303 if (hdsp->state & HDSP_FirmwareCached) { 3319 if (hdsp->state & HDSP_FirmwareCached) {
3304 if (snd_hdsp_load_firmware_from_cache(hdsp) != 0) { 3320 if (snd_hdsp_load_firmware_from_cache(hdsp) != 0) {
3305 snd_iprintf(buffer, "Firmware loading from cache failed, please upload manually.\n"); 3321 snd_iprintf(buffer, "Firmware loading from "
3322 "cache failed, "
3323 "please upload manually.\n");
3306 return; 3324 return;
3307 } 3325 }
3308 } else { 3326 } else {
@@ -3319,18 +3337,6 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
3319 } 3337 }
3320 } 3338 }
3321 3339
3322 status = hdsp_read(hdsp, HDSP_statusRegister);
3323 status2 = hdsp_read(hdsp, HDSP_status2Register);
3324
3325 snd_iprintf(buffer, "%s (Card #%d)\n", hdsp->card_name, hdsp->card->number + 1);
3326 snd_iprintf(buffer, "Buffers: capture %p playback %p\n",
3327 hdsp->capture_buffer, hdsp->playback_buffer);
3328 snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
3329 hdsp->irq, hdsp->port, (unsigned long)hdsp->iobase);
3330 snd_iprintf(buffer, "Control register: 0x%x\n", hdsp->control_register);
3331 snd_iprintf(buffer, "Control2 register: 0x%x\n", hdsp->control2_register);
3332 snd_iprintf(buffer, "Status register: 0x%x\n", status);
3333 snd_iprintf(buffer, "Status2 register: 0x%x\n", status2);
3334 snd_iprintf(buffer, "FIFO status: %d\n", hdsp_read(hdsp, HDSP_fifoStatus) & 0xff); 3340 snd_iprintf(buffer, "FIFO status: %d\n", hdsp_read(hdsp, HDSP_fifoStatus) & 0xff);
3335 snd_iprintf(buffer, "MIDI1 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut0)); 3341 snd_iprintf(buffer, "MIDI1 Output status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusOut0));
3336 snd_iprintf(buffer, "MIDI1 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn0)); 3342 snd_iprintf(buffer, "MIDI1 Input status: 0x%x\n", hdsp_read(hdsp, HDSP_midiStatusIn0));
@@ -3351,7 +3357,6 @@ snd_hdsp_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
3351 3357
3352 snd_iprintf(buffer, "\n"); 3358 snd_iprintf(buffer, "\n");
3353 3359
3354
3355 switch (hdsp_clock_source(hdsp)) { 3360 switch (hdsp_clock_source(hdsp)) {
3356 case HDSP_CLOCK_SOURCE_AUTOSYNC: 3361 case HDSP_CLOCK_SOURCE_AUTOSYNC:
3357 clock_source = "AutoSync"; 3362 clock_source = "AutoSync";
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 2f0925236a1b..5518371db13f 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -834,7 +834,7 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
834 status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG); 834 status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG);
835 if (status & 1) { 835 if (status & 1) {
836 if (chip->timer) 836 if (chip->timer)
837 snd_timer_interrupt(chip->timer, chip->timer->sticks); 837 snd_timer_interrupt(chip->timer, chip->timer_ticks);
838 } 838 }
839 snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status); 839 snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status);
840 840
@@ -1885,8 +1885,18 @@ static int snd_ymfpci_timer_start(struct snd_timer *timer)
1885 unsigned int count; 1885 unsigned int count;
1886 1886
1887 chip = snd_timer_chip(timer); 1887 chip = snd_timer_chip(timer);
1888 count = (timer->sticks << 1) - 1;
1889 spin_lock_irqsave(&chip->reg_lock, flags); 1888 spin_lock_irqsave(&chip->reg_lock, flags);
1889 if (timer->sticks > 1) {
1890 chip->timer_ticks = timer->sticks;
1891 count = timer->sticks - 1;
1892 } else {
1893 /*
1894 * Divisor 1 is not allowed; fake it by using divisor 2 and
1895 * counting two ticks for each interrupt.
1896 */
1897 chip->timer_ticks = 2;
1898 count = 2 - 1;
1899 }
1890 snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count); 1900 snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count);
1891 snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03); 1901 snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03);
1892 spin_unlock_irqrestore(&chip->reg_lock, flags); 1902 spin_unlock_irqrestore(&chip->reg_lock, flags);
@@ -1909,14 +1919,14 @@ static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer,
1909 unsigned long *num, unsigned long *den) 1919 unsigned long *num, unsigned long *den)
1910{ 1920{
1911 *num = 1; 1921 *num = 1;
1912 *den = 48000; 1922 *den = 96000;
1913 return 0; 1923 return 0;
1914} 1924}
1915 1925
1916static struct snd_timer_hardware snd_ymfpci_timer_hw = { 1926static struct snd_timer_hardware snd_ymfpci_timer_hw = {
1917 .flags = SNDRV_TIMER_HW_AUTO, 1927 .flags = SNDRV_TIMER_HW_AUTO,
1918 .resolution = 20833, /* 1/fs = 20.8333...us */ 1928 .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */
1919 .ticks = 0x8000, 1929 .ticks = 0x10000,
1920 .start = snd_ymfpci_timer_start, 1930 .start = snd_ymfpci_timer_start,
1921 .stop = snd_ymfpci_timer_stop, 1931 .stop = snd_ymfpci_timer_stop,
1922 .precise_resolution = snd_ymfpci_timer_precise_resolution, 1932 .precise_resolution = snd_ymfpci_timer_precise_resolution,
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index d3e786a9a0a7..b1749bc67979 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -29,6 +29,7 @@ source "sound/soc/au1x/Kconfig"
29source "sound/soc/blackfin/Kconfig" 29source "sound/soc/blackfin/Kconfig"
30source "sound/soc/davinci/Kconfig" 30source "sound/soc/davinci/Kconfig"
31source "sound/soc/fsl/Kconfig" 31source "sound/soc/fsl/Kconfig"
32source "sound/soc/imx/Kconfig"
32source "sound/soc/omap/Kconfig" 33source "sound/soc/omap/Kconfig"
33source "sound/soc/pxa/Kconfig" 34source "sound/soc/pxa/Kconfig"
34source "sound/soc/s3c24xx/Kconfig" 35source "sound/soc/s3c24xx/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 6f1e28de23cf..0c5eac01bf2e 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,4 +1,4 @@
1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o 1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o
2 2
3obj-$(CONFIG_SND_SOC) += snd-soc-core.o 3obj-$(CONFIG_SND_SOC) += snd-soc-core.o
4obj-$(CONFIG_SND_SOC) += codecs/ 4obj-$(CONFIG_SND_SOC) += codecs/
@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_SOC) += au1x/
7obj-$(CONFIG_SND_SOC) += blackfin/ 7obj-$(CONFIG_SND_SOC) += blackfin/
8obj-$(CONFIG_SND_SOC) += davinci/ 8obj-$(CONFIG_SND_SOC) += davinci/
9obj-$(CONFIG_SND_SOC) += fsl/ 9obj-$(CONFIG_SND_SOC) += fsl/
10obj-$(CONFIG_SND_SOC) += imx/
10obj-$(CONFIG_SND_SOC) += omap/ 11obj-$(CONFIG_SND_SOC) += omap/
11obj-$(CONFIG_SND_SOC) += pxa/ 12obj-$(CONFIG_SND_SOC) += pxa/
12obj-$(CONFIG_SND_SOC) += s3c24xx/ 13obj-$(CONFIG_SND_SOC) += s3c24xx/
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 173a239a541c..130b12118d4f 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -56,30 +56,14 @@
56 56
57#define MCLK_RATE 12000000 57#define MCLK_RATE 12000000
58 58
59static struct clk *mclk; 59/*
60 60 * As shipped the board does not have inputs. However, it is relatively
61static int at91sam9g20ek_startup(struct snd_pcm_substream *substream) 61 * straightforward to modify the board to hook them up so support is left
62{ 62 * in the driver.
63 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); 63 */
64 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; 64#undef ENABLE_MIC_INPUT
65 int ret;
66
67 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK,
68 MCLK_RATE, SND_SOC_CLOCK_IN);
69 if (ret < 0) {
70 clk_disable(mclk);
71 return ret;
72 }
73
74 return 0;
75}
76
77static void at91sam9g20ek_shutdown(struct snd_pcm_substream *substream)
78{
79 struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
80 65
81 dev_dbg(rtd->socdev->dev, "shutdown"); 66static struct clk *mclk;
82}
83 67
84static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream, 68static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
85 struct snd_pcm_hw_params *params) 69 struct snd_pcm_hw_params *params)
@@ -87,102 +71,17 @@ static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
87 struct snd_soc_pcm_runtime *rtd = substream->private_data; 71 struct snd_soc_pcm_runtime *rtd = substream->private_data;
88 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; 72 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
89 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 73 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
90 struct atmel_ssc_info *ssc_p = cpu_dai->private_data;
91 struct ssc_device *ssc = ssc_p->ssc;
92 int ret; 74 int ret;
93 75
94 unsigned int rate;
95 int cmr_div, period;
96
97 if (ssc == NULL) {
98 printk(KERN_INFO "at91sam9g20ek_hw_params: ssc is NULL!\n");
99 return -EINVAL;
100 }
101
102 /* set codec DAI configuration */ 76 /* set codec DAI configuration */
103 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S | 77 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
104 SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); 78 SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
105 if (ret < 0) 79 if (ret < 0)
106 return ret; 80 return ret;
107 81
108 /* set cpu DAI configuration */ 82 /* set cpu DAI configuration */
109 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | 83 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
110 SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); 84 SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
111 if (ret < 0)
112 return ret;
113
114 /*
115 * The SSC clock dividers depend on the sample rate. The CMR.DIV
116 * field divides the system master clock MCK to drive the SSC TK
117 * signal which provides the codec BCLK. The TCMR.PERIOD and
118 * RCMR.PERIOD fields further divide the BCLK signal to drive
119 * the SSC TF and RF signals which provide the codec DACLRC and
120 * ADCLRC clocks.
121 *
122 * The dividers were determined through trial and error, where a
123 * CMR.DIV value is chosen such that the resulting BCLK value is
124 * divisible, or almost divisible, by (2 * sample rate), and then
125 * the TCMR.PERIOD or RCMR.PERIOD is BCLK / (2 * sample rate) - 1.
126 */
127 rate = params_rate(params);
128
129 switch (rate) {
130 case 8000:
131 cmr_div = 55; /* BCLK = 133MHz/(2*55) = 1.209MHz */
132 period = 74; /* LRC = BCLK/(2*(74+1)) ~= 8060,6Hz */
133 break;
134 case 11025:
135 cmr_div = 67; /* BCLK = 133MHz/(2*60) = 1.108MHz */
136 period = 45; /* LRC = BCLK/(2*(49+1)) = 11083,3Hz */
137 break;
138 case 16000:
139 cmr_div = 63; /* BCLK = 133MHz/(2*63) = 1.055MHz */
140 period = 32; /* LRC = BCLK/(2*(32+1)) = 15993,2Hz */
141 break;
142 case 22050:
143 cmr_div = 52; /* BCLK = 133MHz/(2*52) = 1.278MHz */
144 period = 28; /* LRC = BCLK/(2*(28+1)) = 22049Hz */
145 break;
146 case 32000:
147 cmr_div = 66; /* BCLK = 133MHz/(2*66) = 1.007MHz */
148 period = 15; /* LRC = BCLK/(2*(15+1)) = 31486,742Hz */
149 break;
150 case 44100:
151 cmr_div = 29; /* BCLK = 133MHz/(2*29) = 2.293MHz */
152 period = 25; /* LRC = BCLK/(2*(25+1)) = 44098Hz */
153 break;
154 case 48000:
155 cmr_div = 33; /* BCLK = 133MHz/(2*33) = 2.015MHz */
156 period = 20; /* LRC = BCLK/(2*(20+1)) = 47979,79Hz */
157 break;
158 case 88200:
159 cmr_div = 29; /* BCLK = 133MHz/(2*29) = 2.293MHz */
160 period = 12; /* LRC = BCLK/(2*(12+1)) = 88196Hz */
161 break;
162 case 96000:
163 cmr_div = 23; /* BCLK = 133MHz/(2*23) = 2.891MHz */
164 period = 14; /* LRC = BCLK/(2*(14+1)) = 96376Hz */
165 break;
166 default:
167 printk(KERN_WARNING "unsupported rate %d"
168 " on at91sam9g20ek board\n", rate);
169 return -EINVAL;
170 }
171
172 /* set the MCK divider for BCLK */
173 ret = snd_soc_dai_set_clkdiv(cpu_dai, ATMEL_SSC_CMR_DIV, cmr_div);
174 if (ret < 0)
175 return ret;
176
177 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
178 /* set the BCLK divider for DACLRC */
179 ret = snd_soc_dai_set_clkdiv(cpu_dai,
180 ATMEL_SSC_TCMR_PERIOD, period);
181 } else {
182 /* set the BCLK divider for ADCLRC */
183 ret = snd_soc_dai_set_clkdiv(cpu_dai,
184 ATMEL_SSC_RCMR_PERIOD, period);
185 }
186 if (ret < 0) 85 if (ret < 0)
187 return ret; 86 return ret;
188 87
@@ -190,9 +89,7 @@ static int at91sam9g20ek_hw_params(struct snd_pcm_substream *substream,
190} 89}
191 90
192static struct snd_soc_ops at91sam9g20ek_ops = { 91static struct snd_soc_ops at91sam9g20ek_ops = {
193 .startup = at91sam9g20ek_startup,
194 .hw_params = at91sam9g20ek_hw_params, 92 .hw_params = at91sam9g20ek_hw_params,
195 .shutdown = at91sam9g20ek_shutdown,
196}; 93};
197 94
198static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card, 95static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
@@ -241,10 +138,20 @@ static const struct snd_soc_dapm_route intercon[] = {
241 */ 138 */
242static int at91sam9g20ek_wm8731_init(struct snd_soc_codec *codec) 139static int at91sam9g20ek_wm8731_init(struct snd_soc_codec *codec)
243{ 140{
141 struct snd_soc_dai *codec_dai = &codec->dai[0];
142 int ret;
143
244 printk(KERN_DEBUG 144 printk(KERN_DEBUG
245 "at91sam9g20ek_wm8731 " 145 "at91sam9g20ek_wm8731 "
246 ": at91sam9g20ek_wm8731_init() called\n"); 146 ": at91sam9g20ek_wm8731_init() called\n");
247 147
148 ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK,
149 MCLK_RATE, SND_SOC_CLOCK_IN);
150 if (ret < 0) {
151 printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
152 return ret;
153 }
154
248 /* Add specific widgets */ 155 /* Add specific widgets */
249 snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets, 156 snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets,
250 ARRAY_SIZE(at91sam9g20ek_dapm_widgets)); 157 ARRAY_SIZE(at91sam9g20ek_dapm_widgets));
@@ -255,8 +162,13 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_codec *codec)
255 snd_soc_dapm_nc_pin(codec, "RLINEIN"); 162 snd_soc_dapm_nc_pin(codec, "RLINEIN");
256 snd_soc_dapm_nc_pin(codec, "LLINEIN"); 163 snd_soc_dapm_nc_pin(codec, "LLINEIN");
257 164
258 /* always connected */ 165#ifdef ENABLE_MIC_INPUT
259 snd_soc_dapm_enable_pin(codec, "Int Mic"); 166 snd_soc_dapm_enable_pin(codec, "Int Mic");
167#else
168 snd_soc_dapm_nc_pin(codec, "Int Mic");
169#endif
170
171 /* always connected */
260 snd_soc_dapm_enable_pin(codec, "Ext Spk"); 172 snd_soc_dapm_enable_pin(codec, "Ext Spk");
261 173
262 snd_soc_dapm_sync(codec); 174 snd_soc_dapm_sync(codec);
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 479d7bdf1865..a521aa90ddee 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Au12x0/Au1550 PSC ALSA ASoC audio support. 2 * Au12x0/Au1550 PSC ALSA ASoC audio support.
3 * 3 *
4 * (c) 2007-2008 MSC Vertriebsges.m.b.H., 4 * (c) 2007-2009 MSC Vertriebsges.m.b.H.,
5 * Manuel Lauss <mano@roarinelk.homelinux.net> 5 * Manuel Lauss <manuel.lauss@gmail.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/mutex.h>
22#include <linux/suspend.h> 23#include <linux/suspend.h>
23#include <sound/core.h> 24#include <sound/core.h>
24#include <sound/pcm.h> 25#include <sound/pcm.h>
@@ -29,6 +30,9 @@
29 30
30#include "psc.h" 31#include "psc.h"
31 32
33/* how often to retry failed codec register reads/writes */
34#define AC97_RW_RETRIES 5
35
32#define AC97_DIR \ 36#define AC97_DIR \
33 (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE) 37 (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE)
34 38
@@ -45,6 +49,9 @@
45#define AC97PCR_CLRFIFO(stype) \ 49#define AC97PCR_CLRFIFO(stype) \
46 ((stype) == PCM_TX ? PSC_AC97PCR_TC : PSC_AC97PCR_RC) 50 ((stype) == PCM_TX ? PSC_AC97PCR_TC : PSC_AC97PCR_RC)
47 51
52#define AC97STAT_BUSY(stype) \
53 ((stype) == PCM_TX ? PSC_AC97STAT_TB : PSC_AC97STAT_RB)
54
48/* instance data. There can be only one, MacLeod!!!! */ 55/* instance data. There can be only one, MacLeod!!!! */
49static struct au1xpsc_audio_data *au1xpsc_ac97_workdata; 56static struct au1xpsc_audio_data *au1xpsc_ac97_workdata;
50 57
@@ -54,24 +61,33 @@ static unsigned short au1xpsc_ac97_read(struct snd_ac97 *ac97,
54{ 61{
55 /* FIXME */ 62 /* FIXME */
56 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata; 63 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
57 unsigned short data, tmo; 64 unsigned short data, retry, tmo;
58 65
59 au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg), AC97_CDC(pscdata)); 66 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
60 au_sync(); 67 au_sync();
61 68
62 tmo = 1000; 69 retry = AC97_RW_RETRIES;
63 while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)) && --tmo) 70 do {
64 udelay(2); 71 mutex_lock(&pscdata->lock);
72
73 au_writel(PSC_AC97CDC_RD | PSC_AC97CDC_INDX(reg),
74 AC97_CDC(pscdata));
75 au_sync();
76
77 tmo = 2000;
78 while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD))
79 && --tmo)
80 udelay(2);
65 81
66 if (!tmo)
67 data = 0xffff;
68 else
69 data = au_readl(AC97_CDC(pscdata)) & 0xffff; 82 data = au_readl(AC97_CDC(pscdata)) & 0xffff;
70 83
71 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); 84 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
72 au_sync(); 85 au_sync();
86
87 mutex_unlock(&pscdata->lock);
88 } while (--retry && !tmo);
73 89
74 return data; 90 return retry ? data : 0xffff;
75} 91}
76 92
77/* AC97 controller writes to codec register */ 93/* AC97 controller writes to codec register */
@@ -80,16 +96,29 @@ static void au1xpsc_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
80{ 96{
81 /* FIXME */ 97 /* FIXME */
82 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata; 98 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
83 unsigned int tmo; 99 unsigned int tmo, retry;
84 100
85 au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff), AC97_CDC(pscdata)); 101 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
86 au_sync(); 102 au_sync();
87 tmo = 1000; 103
88 while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD)) && --tmo) 104 retry = AC97_RW_RETRIES;
105 do {
106 mutex_lock(&pscdata->lock);
107
108 au_writel(PSC_AC97CDC_INDX(reg) | (val & 0xffff),
109 AC97_CDC(pscdata));
89 au_sync(); 110 au_sync();
90 111
91 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata)); 112 tmo = 2000;
92 au_sync(); 113 while ((!(au_readl(AC97_EVNT(pscdata)) & PSC_AC97EVNT_CD))
114 && --tmo)
115 udelay(2);
116
117 au_writel(PSC_AC97EVNT_CD, AC97_EVNT(pscdata));
118 au_sync();
119
120 mutex_unlock(&pscdata->lock);
121 } while (--retry && !tmo);
93} 122}
94 123
95/* AC97 controller asserts a warm reset */ 124/* AC97 controller asserts a warm reset */
@@ -129,9 +158,9 @@ static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97)
129 au_sync(); 158 au_sync();
130 159
131 /* wait for PSC to indicate it's ready */ 160 /* wait for PSC to indicate it's ready */
132 i = 100000; 161 i = 1000;
133 while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_SR)) && (--i)) 162 while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_SR)) && (--i))
134 au_sync(); 163 msleep(1);
135 164
136 if (i == 0) { 165 if (i == 0) {
137 printk(KERN_ERR "au1xpsc-ac97: PSC not ready!\n"); 166 printk(KERN_ERR "au1xpsc-ac97: PSC not ready!\n");
@@ -143,9 +172,9 @@ static void au1xpsc_ac97_cold_reset(struct snd_ac97 *ac97)
143 au_sync(); 172 au_sync();
144 173
145 /* wait for AC97 core to become ready */ 174 /* wait for AC97 core to become ready */
146 i = 100000; 175 i = 1000;
147 while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && (--i)) 176 while (!((au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)) && (--i))
148 au_sync(); 177 msleep(1);
149 if (i == 0) 178 if (i == 0)
150 printk(KERN_ERR "au1xpsc-ac97: AC97 ctrl not ready\n"); 179 printk(KERN_ERR "au1xpsc-ac97: AC97 ctrl not ready\n");
151} 180}
@@ -165,12 +194,12 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
165{ 194{
166 /* FIXME */ 195 /* FIXME */
167 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata; 196 struct au1xpsc_audio_data *pscdata = au1xpsc_ac97_workdata;
168 unsigned long r, stat; 197 unsigned long r, ro, stat;
169 int chans, stype = SUBSTREAM_TYPE(substream); 198 int chans, stype = SUBSTREAM_TYPE(substream);
170 199
171 chans = params_channels(params); 200 chans = params_channels(params);
172 201
173 r = au_readl(AC97_CFG(pscdata)); 202 r = ro = au_readl(AC97_CFG(pscdata));
174 stat = au_readl(AC97_STAT(pscdata)); 203 stat = au_readl(AC97_STAT(pscdata));
175 204
176 /* already active? */ 205 /* already active? */
@@ -180,9 +209,6 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
180 (pscdata->rate != params_rate(params))) 209 (pscdata->rate != params_rate(params)))
181 return -EINVAL; 210 return -EINVAL;
182 } else { 211 } else {
183 /* disable AC97 device controller first */
184 au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
185 au_sync();
186 212
187 /* set sample bitdepth: REG[24:21]=(BITS-2)/2 */ 213 /* set sample bitdepth: REG[24:21]=(BITS-2)/2 */
188 r &= ~PSC_AC97CFG_LEN_MASK; 214 r &= ~PSC_AC97CFG_LEN_MASK;
@@ -199,14 +225,40 @@ static int au1xpsc_ac97_hw_params(struct snd_pcm_substream *substream,
199 r |= PSC_AC97CFG_RXSLOT_ENA(4); 225 r |= PSC_AC97CFG_RXSLOT_ENA(4);
200 } 226 }
201 227
202 /* finally enable the AC97 controller again */ 228 /* do we need to poke the hardware? */
229 if (!(r ^ ro))
230 goto out;
231
232 /* ac97 engine is about to be disabled */
233 mutex_lock(&pscdata->lock);
234
235 /* disable AC97 device controller first... */
236 au_writel(r & ~PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
237 au_sync();
238
239 /* ...wait for it... */
240 while (au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR)
241 asm volatile ("nop");
242
243 /* ...write config... */
244 au_writel(r, AC97_CFG(pscdata));
245 au_sync();
246
247 /* ...enable the AC97 controller again... */
203 au_writel(r | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata)); 248 au_writel(r | PSC_AC97CFG_DE_ENABLE, AC97_CFG(pscdata));
204 au_sync(); 249 au_sync();
205 250
251 /* ...and wait for ready bit */
252 while (!(au_readl(AC97_STAT(pscdata)) & PSC_AC97STAT_DR))
253 asm volatile ("nop");
254
255 mutex_unlock(&pscdata->lock);
256
206 pscdata->cfg = r; 257 pscdata->cfg = r;
207 pscdata->rate = params_rate(params); 258 pscdata->rate = params_rate(params);
208 } 259 }
209 260
261out:
210 return 0; 262 return 0;
211} 263}
212 264
@@ -222,6 +274,8 @@ static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream,
222 switch (cmd) { 274 switch (cmd) {
223 case SNDRV_PCM_TRIGGER_START: 275 case SNDRV_PCM_TRIGGER_START:
224 case SNDRV_PCM_TRIGGER_RESUME: 276 case SNDRV_PCM_TRIGGER_RESUME:
277 au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
278 au_sync();
225 au_writel(AC97PCR_START(stype), AC97_PCR(pscdata)); 279 au_writel(AC97PCR_START(stype), AC97_PCR(pscdata));
226 au_sync(); 280 au_sync();
227 break; 281 break;
@@ -229,6 +283,13 @@ static int au1xpsc_ac97_trigger(struct snd_pcm_substream *substream,
229 case SNDRV_PCM_TRIGGER_SUSPEND: 283 case SNDRV_PCM_TRIGGER_SUSPEND:
230 au_writel(AC97PCR_STOP(stype), AC97_PCR(pscdata)); 284 au_writel(AC97PCR_STOP(stype), AC97_PCR(pscdata));
231 au_sync(); 285 au_sync();
286
287 while (au_readl(AC97_STAT(pscdata)) & AC97STAT_BUSY(stype))
288 asm volatile ("nop");
289
290 au_writel(AC97PCR_CLRFIFO(stype), AC97_PCR(pscdata));
291 au_sync();
292
232 break; 293 break;
233 default: 294 default:
234 ret = -EINVAL; 295 ret = -EINVAL;
@@ -251,6 +312,8 @@ static int au1xpsc_ac97_probe(struct platform_device *pdev,
251 if (!au1xpsc_ac97_workdata) 312 if (!au1xpsc_ac97_workdata)
252 return -ENOMEM; 313 return -ENOMEM;
253 314
315 mutex_init(&au1xpsc_ac97_workdata->lock);
316
254 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 317 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
255 if (!r) { 318 if (!r) {
256 ret = -ENODEV; 319 ret = -ENODEV;
@@ -269,9 +332,9 @@ static int au1xpsc_ac97_probe(struct platform_device *pdev,
269 goto out1; 332 goto out1;
270 333
271 /* configuration: max dma trigger threshold, enable ac97 */ 334 /* configuration: max dma trigger threshold, enable ac97 */
272 au1xpsc_ac97_workdata->cfg = PSC_AC97CFG_RT_FIFO8 | 335 au1xpsc_ac97_workdata->cfg = PSC_AC97CFG_RT_FIFO8 |
273 PSC_AC97CFG_TT_FIFO8 | 336 PSC_AC97CFG_TT_FIFO8 |
274 PSC_AC97CFG_DE_ENABLE; 337 PSC_AC97CFG_DE_ENABLE;
275 338
276 /* preserve PSC clock source set up by platform (dev.platform_data 339 /* preserve PSC clock source set up by platform (dev.platform_data
277 * is already occupied by soc layer) 340 * is already occupied by soc layer)
@@ -386,4 +449,4 @@ module_exit(au1xpsc_ac97_exit);
386 449
387MODULE_LICENSE("GPL"); 450MODULE_LICENSE("GPL");
388MODULE_DESCRIPTION("Au12x0/Au1550 PSC AC97 ALSA ASoC audio driver"); 451MODULE_DESCRIPTION("Au12x0/Au1550 PSC AC97 ALSA ASoC audio driver");
389MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>"); 452MODULE_AUTHOR("Manuel Lauss <manuel.lauss@gmail.com>");
diff --git a/sound/soc/au1x/psc.h b/sound/soc/au1x/psc.h
index 8fdb1a04a07b..3f474e8ed4f6 100644
--- a/sound/soc/au1x/psc.h
+++ b/sound/soc/au1x/psc.h
@@ -29,6 +29,7 @@ struct au1xpsc_audio_data {
29 29
30 unsigned long pm[2]; 30 unsigned long pm[2];
31 struct resource *ioarea; 31 struct resource *ioarea;
32 struct mutex lock;
32}; 33};
33 34
34#define PCM_TX 0 35#define PCM_TX 0
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 811596f4c092..ac927ffdc961 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -7,6 +7,15 @@ config SND_BF5XX_I2S
7 mode (supports single stereo In/Out). 7 mode (supports single stereo In/Out).
8 You will also need to select the audio interfaces to support below. 8 You will also need to select the audio interfaces to support below.
9 9
10config SND_BF5XX_TDM
11 tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
12 depends on (BLACKFIN && SND_SOC)
13 help
14 Say Y or M if you want to add support for codecs attached to
15 the Blackfin SPORT (synchronous serial ports) interface in TDM
16 mode.
17 You will also need to select the audio interfaces to support below.
18
10config SND_BF5XX_SOC_SSM2602 19config SND_BF5XX_SOC_SSM2602
11 tristate "SoC SSM2602 Audio support for BF52x ezkit" 20 tristate "SoC SSM2602 Audio support for BF52x ezkit"
12 depends on SND_BF5XX_I2S 21 depends on SND_BF5XX_I2S
@@ -69,12 +78,24 @@ config SND_BF5XX_SOC_I2S
69 tristate 78 tristate
70 select SND_BF5XX_SOC_SPORT 79 select SND_BF5XX_SOC_SPORT
71 80
81config SND_BF5XX_SOC_TDM
82 tristate
83 select SND_BF5XX_SOC_SPORT
84
72config SND_BF5XX_SOC_AC97 85config SND_BF5XX_SOC_AC97
73 tristate 86 tristate
74 select AC97_BUS 87 select AC97_BUS
75 select SND_SOC_AC97_BUS 88 select SND_SOC_AC97_BUS
76 select SND_BF5XX_SOC_SPORT 89 select SND_BF5XX_SOC_SPORT
77 90
91config SND_BF5XX_SOC_AD1836
92 tristate "SoC AD1836 Audio support for BF5xx"
93 depends on SND_BF5XX_TDM
94 select SND_BF5XX_SOC_TDM
95 select SND_SOC_AD1836
96 help
97 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
98
78config SND_BF5XX_SOC_AD1980 99config SND_BF5XX_SOC_AD1980
79 tristate "SoC AD1980/1 Audio support for BF5xx" 100 tristate "SoC AD1980/1 Audio support for BF5xx"
80 depends on SND_BF5XX_AC97 101 depends on SND_BF5XX_AC97
@@ -83,9 +104,17 @@ config SND_BF5XX_SOC_AD1980
83 help 104 help
84 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT. 105 Say Y if you want to add support for SoC audio on BF5xx STAMP/EZKIT.
85 106
107config SND_BF5XX_SOC_AD1938
108 tristate "SoC AD1938 Audio support for Blackfin"
109 depends on SND_BF5XX_TDM
110 select SND_BF5XX_SOC_TDM
111 select SND_SOC_AD1938
112 help
113 Say Y if you want to add support for AD1938 codec on Blackfin.
114
86config SND_BF5XX_SPORT_NUM 115config SND_BF5XX_SPORT_NUM
87 int "Set a SPORT for Sound chip" 116 int "Set a SPORT for Sound chip"
88 depends on (SND_BF5XX_I2S || SND_BF5XX_AC97) 117 depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM)
89 range 0 3 if BF54x 118 range 0 3 if BF54x
90 range 0 1 if !BF54x 119 range 0 1 if !BF54x
91 default 0 120 default 0
diff --git a/sound/soc/blackfin/Makefile b/sound/soc/blackfin/Makefile
index 97bb37a6359c..87e30423912f 100644
--- a/sound/soc/blackfin/Makefile
+++ b/sound/soc/blackfin/Makefile
@@ -1,21 +1,29 @@
1# Blackfin Platform Support 1# Blackfin Platform Support
2snd-bf5xx-ac97-objs := bf5xx-ac97-pcm.o 2snd-bf5xx-ac97-objs := bf5xx-ac97-pcm.o
3snd-bf5xx-i2s-objs := bf5xx-i2s-pcm.o 3snd-bf5xx-i2s-objs := bf5xx-i2s-pcm.o
4snd-bf5xx-tdm-objs := bf5xx-tdm-pcm.o
4snd-soc-bf5xx-sport-objs := bf5xx-sport.o 5snd-soc-bf5xx-sport-objs := bf5xx-sport.o
5snd-soc-bf5xx-ac97-objs := bf5xx-ac97.o 6snd-soc-bf5xx-ac97-objs := bf5xx-ac97.o
6snd-soc-bf5xx-i2s-objs := bf5xx-i2s.o 7snd-soc-bf5xx-i2s-objs := bf5xx-i2s.o
8snd-soc-bf5xx-tdm-objs := bf5xx-tdm.o
7 9
8obj-$(CONFIG_SND_BF5XX_AC97) += snd-bf5xx-ac97.o 10obj-$(CONFIG_SND_BF5XX_AC97) += snd-bf5xx-ac97.o
9obj-$(CONFIG_SND_BF5XX_I2S) += snd-bf5xx-i2s.o 11obj-$(CONFIG_SND_BF5XX_I2S) += snd-bf5xx-i2s.o
12obj-$(CONFIG_SND_BF5XX_TDM) += snd-bf5xx-tdm.o
10obj-$(CONFIG_SND_BF5XX_SOC_SPORT) += snd-soc-bf5xx-sport.o 13obj-$(CONFIG_SND_BF5XX_SOC_SPORT) += snd-soc-bf5xx-sport.o
11obj-$(CONFIG_SND_BF5XX_SOC_AC97) += snd-soc-bf5xx-ac97.o 14obj-$(CONFIG_SND_BF5XX_SOC_AC97) += snd-soc-bf5xx-ac97.o
12obj-$(CONFIG_SND_BF5XX_SOC_I2S) += snd-soc-bf5xx-i2s.o 15obj-$(CONFIG_SND_BF5XX_SOC_I2S) += snd-soc-bf5xx-i2s.o
16obj-$(CONFIG_SND_BF5XX_SOC_TDM) += snd-soc-bf5xx-tdm.o
13 17
14# Blackfin Machine Support 18# Blackfin Machine Support
19snd-ad1836-objs := bf5xx-ad1836.o
15snd-ad1980-objs := bf5xx-ad1980.o 20snd-ad1980-objs := bf5xx-ad1980.o
16snd-ssm2602-objs := bf5xx-ssm2602.o 21snd-ssm2602-objs := bf5xx-ssm2602.o
17snd-ad73311-objs := bf5xx-ad73311.o 22snd-ad73311-objs := bf5xx-ad73311.o
23snd-ad1938-objs := bf5xx-ad1938.o
18 24
25obj-$(CONFIG_SND_BF5XX_SOC_AD1836) += snd-ad1836.o
19obj-$(CONFIG_SND_BF5XX_SOC_AD1980) += snd-ad1980.o 26obj-$(CONFIG_SND_BF5XX_SOC_AD1980) += snd-ad1980.o
20obj-$(CONFIG_SND_BF5XX_SOC_SSM2602) += snd-ssm2602.o 27obj-$(CONFIG_SND_BF5XX_SOC_SSM2602) += snd-ssm2602.o
21obj-$(CONFIG_SND_BF5XX_SOC_AD73311) += snd-ad73311.o 28obj-$(CONFIG_SND_BF5XX_SOC_AD73311) += snd-ad73311.o
29obj-$(CONFIG_SND_BF5XX_SOC_AD1938) += snd-ad1938.o
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index b1ed423fabd5..2758b9017a7f 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -277,28 +277,24 @@ static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
277 if (!dai->active) 277 if (!dai->active)
278 return 0; 278 return 0;
279 279
280 ret = sport_set_multichannel(sport_handle, 16, 0x1F, 1); 280 ret = sport_set_multichannel(sport, 16, 0x1F, 1);
281 if (ret) { 281 if (ret) {
282 pr_err("SPORT is busy!\n"); 282 pr_err("SPORT is busy!\n");
283 return -EBUSY; 283 return -EBUSY;
284 } 284 }
285 285
286 ret = sport_config_rx(sport_handle, IRFS, 0xF, 0, (16*16-1)); 286 ret = sport_config_rx(sport, IRFS, 0xF, 0, (16*16-1));
287 if (ret) { 287 if (ret) {
288 pr_err("SPORT is busy!\n"); 288 pr_err("SPORT is busy!\n");
289 return -EBUSY; 289 return -EBUSY;
290 } 290 }
291 291
292 ret = sport_config_tx(sport_handle, ITFS, 0xF, 0, (16*16-1)); 292 ret = sport_config_tx(sport, ITFS, 0xF, 0, (16*16-1));
293 if (ret) { 293 if (ret) {
294 pr_err("SPORT is busy!\n"); 294 pr_err("SPORT is busy!\n");
295 return -EBUSY; 295 return -EBUSY;
296 } 296 }
297 297
298 if (dai->capture.active)
299 sport_rx_start(sport);
300 if (dai->playback.active)
301 sport_tx_start(sport);
302 return 0; 298 return 0;
303} 299}
304 300
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
new file mode 100644
index 000000000000..cd361e304b0f
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -0,0 +1,128 @@
1/*
2 * File: sound/soc/blackfin/bf5xx-ad1836.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: Aug 4 2009
6 * Description: Board driver for ad1836 sound chip
7 *
8 * Bugs: Enter bugs at http://blackfin.uclinux.org/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/device.h>
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/soc.h>
23#include <sound/soc-dapm.h>
24#include <sound/pcm_params.h>
25
26#include <asm/blackfin.h>
27#include <asm/cacheflush.h>
28#include <asm/irq.h>
29#include <asm/dma.h>
30#include <asm/portmux.h>
31
32#include "../codecs/ad1836.h"
33#include "bf5xx-sport.h"
34
35#include "bf5xx-tdm-pcm.h"
36#include "bf5xx-tdm.h"
37
38static struct snd_soc_card bf5xx_ad1836;
39
40static int bf5xx_ad1836_startup(struct snd_pcm_substream *substream)
41{
42 struct snd_soc_pcm_runtime *rtd = substream->private_data;
43 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
44
45 cpu_dai->private_data = sport_handle;
46 return 0;
47}
48
49static int bf5xx_ad1836_hw_params(struct snd_pcm_substream *substream,
50 struct snd_pcm_hw_params *params)
51{
52 struct snd_soc_pcm_runtime *rtd = substream->private_data;
53 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
54 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
55 int ret = 0;
56 /* set cpu DAI configuration */
57 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
58 SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
59 if (ret < 0)
60 return ret;
61
62 /* set codec DAI configuration */
63 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A |
64 SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
65 if (ret < 0)
66 return ret;
67
68 return 0;
69}
70
71static struct snd_soc_ops bf5xx_ad1836_ops = {
72 .startup = bf5xx_ad1836_startup,
73 .hw_params = bf5xx_ad1836_hw_params,
74};
75
76static struct snd_soc_dai_link bf5xx_ad1836_dai = {
77 .name = "ad1836",
78 .stream_name = "AD1836",
79 .cpu_dai = &bf5xx_tdm_dai,
80 .codec_dai = &ad1836_dai,
81 .ops = &bf5xx_ad1836_ops,
82};
83
84static struct snd_soc_card bf5xx_ad1836 = {
85 .name = "bf5xx_ad1836",
86 .platform = &bf5xx_tdm_soc_platform,
87 .dai_link = &bf5xx_ad1836_dai,
88 .num_links = 1,
89};
90
91static struct snd_soc_device bf5xx_ad1836_snd_devdata = {
92 .card = &bf5xx_ad1836,
93 .codec_dev = &soc_codec_dev_ad1836,
94};
95
96static struct platform_device *bfxx_ad1836_snd_device;
97
98static int __init bf5xx_ad1836_init(void)
99{
100 int ret;
101
102 bfxx_ad1836_snd_device = platform_device_alloc("soc-audio", -1);
103 if (!bfxx_ad1836_snd_device)
104 return -ENOMEM;
105
106 platform_set_drvdata(bfxx_ad1836_snd_device, &bf5xx_ad1836_snd_devdata);
107 bf5xx_ad1836_snd_devdata.dev = &bfxx_ad1836_snd_device->dev;
108 ret = platform_device_add(bfxx_ad1836_snd_device);
109
110 if (ret)
111 platform_device_put(bfxx_ad1836_snd_device);
112
113 return ret;
114}
115
116static void __exit bf5xx_ad1836_exit(void)
117{
118 platform_device_unregister(bfxx_ad1836_snd_device);
119}
120
121module_init(bf5xx_ad1836_init);
122module_exit(bf5xx_ad1836_exit);
123
124/* Module information */
125MODULE_AUTHOR("Barry Song");
126MODULE_DESCRIPTION("ALSA SoC AD1836 board driver");
127MODULE_LICENSE("GPL");
128
diff --git a/sound/soc/blackfin/bf5xx-ad1938.c b/sound/soc/blackfin/bf5xx-ad1938.c
new file mode 100644
index 000000000000..08269e91810c
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-ad1938.c
@@ -0,0 +1,142 @@
1/*
2 * File: sound/soc/blackfin/bf5xx-ad1938.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: Thur June 4 2009
6 * Description: Board driver for ad1938 sound chip
7 *
8 * Bugs: Enter bugs at http://blackfin.uclinux.org/
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <linux/module.h>
27#include <linux/moduleparam.h>
28#include <linux/device.h>
29#include <sound/core.h>
30#include <sound/pcm.h>
31#include <sound/soc.h>
32#include <sound/soc-dapm.h>
33#include <sound/pcm_params.h>
34
35#include <asm/blackfin.h>
36#include <asm/cacheflush.h>
37#include <asm/irq.h>
38#include <asm/dma.h>
39#include <asm/portmux.h>
40
41#include "../codecs/ad1938.h"
42#include "bf5xx-sport.h"
43
44#include "bf5xx-tdm-pcm.h"
45#include "bf5xx-tdm.h"
46
47static struct snd_soc_card bf5xx_ad1938;
48
49static int bf5xx_ad1938_startup(struct snd_pcm_substream *substream)
50{
51 struct snd_soc_pcm_runtime *rtd = substream->private_data;
52 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
53
54 cpu_dai->private_data = sport_handle;
55 return 0;
56}
57
58static int bf5xx_ad1938_hw_params(struct snd_pcm_substream *substream,
59 struct snd_pcm_hw_params *params)
60{
61 struct snd_soc_pcm_runtime *rtd = substream->private_data;
62 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
63 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
64 int ret = 0;
65 /* set cpu DAI configuration */
66 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
67 SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
68 if (ret < 0)
69 return ret;
70
71 /* set codec DAI configuration */
72 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_A |
73 SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
74 if (ret < 0)
75 return ret;
76
77 /* set codec DAI slots, 8 channels, all channels are enabled */
78 ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xFF, 8);
79 if (ret < 0)
80 return ret;
81
82 return 0;
83}
84
85static struct snd_soc_ops bf5xx_ad1938_ops = {
86 .startup = bf5xx_ad1938_startup,
87 .hw_params = bf5xx_ad1938_hw_params,
88};
89
90static struct snd_soc_dai_link bf5xx_ad1938_dai = {
91 .name = "ad1938",
92 .stream_name = "AD1938",
93 .cpu_dai = &bf5xx_tdm_dai,
94 .codec_dai = &ad1938_dai,
95 .ops = &bf5xx_ad1938_ops,
96};
97
98static struct snd_soc_card bf5xx_ad1938 = {
99 .name = "bf5xx_ad1938",
100 .platform = &bf5xx_tdm_soc_platform,
101 .dai_link = &bf5xx_ad1938_dai,
102 .num_links = 1,
103};
104
105static struct snd_soc_device bf5xx_ad1938_snd_devdata = {
106 .card = &bf5xx_ad1938,
107 .codec_dev = &soc_codec_dev_ad1938,
108};
109
110static struct platform_device *bfxx_ad1938_snd_device;
111
112static int __init bf5xx_ad1938_init(void)
113{
114 int ret;
115
116 bfxx_ad1938_snd_device = platform_device_alloc("soc-audio", -1);
117 if (!bfxx_ad1938_snd_device)
118 return -ENOMEM;
119
120 platform_set_drvdata(bfxx_ad1938_snd_device, &bf5xx_ad1938_snd_devdata);
121 bf5xx_ad1938_snd_devdata.dev = &bfxx_ad1938_snd_device->dev;
122 ret = platform_device_add(bfxx_ad1938_snd_device);
123
124 if (ret)
125 platform_device_put(bfxx_ad1938_snd_device);
126
127 return ret;
128}
129
130static void __exit bf5xx_ad1938_exit(void)
131{
132 platform_device_unregister(bfxx_ad1938_snd_device);
133}
134
135module_init(bf5xx_ad1938_init);
136module_exit(bf5xx_ad1938_exit);
137
138/* Module information */
139MODULE_AUTHOR("Barry Song");
140MODULE_DESCRIPTION("ALSA SoC AD1938 board driver");
141MODULE_LICENSE("GPL");
142
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index edfbdc024e66..9825b71d0e28 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -203,23 +203,23 @@ static struct snd_soc_device bf5xx_ad73311_snd_devdata = {
203 .codec_dev = &soc_codec_dev_ad73311, 203 .codec_dev = &soc_codec_dev_ad73311,
204}; 204};
205 205
206static struct platform_device *bf52x_ad73311_snd_device; 206static struct platform_device *bf5xx_ad73311_snd_device;
207 207
208static int __init bf5xx_ad73311_init(void) 208static int __init bf5xx_ad73311_init(void)
209{ 209{
210 int ret; 210 int ret;
211 211
212 pr_debug("%s enter\n", __func__); 212 pr_debug("%s enter\n", __func__);
213 bf52x_ad73311_snd_device = platform_device_alloc("soc-audio", -1); 213 bf5xx_ad73311_snd_device = platform_device_alloc("soc-audio", -1);
214 if (!bf52x_ad73311_snd_device) 214 if (!bf5xx_ad73311_snd_device)
215 return -ENOMEM; 215 return -ENOMEM;
216 216
217 platform_set_drvdata(bf52x_ad73311_snd_device, &bf5xx_ad73311_snd_devdata); 217 platform_set_drvdata(bf5xx_ad73311_snd_device, &bf5xx_ad73311_snd_devdata);
218 bf5xx_ad73311_snd_devdata.dev = &bf52x_ad73311_snd_device->dev; 218 bf5xx_ad73311_snd_devdata.dev = &bf5xx_ad73311_snd_device->dev;
219 ret = platform_device_add(bf52x_ad73311_snd_device); 219 ret = platform_device_add(bf5xx_ad73311_snd_device);
220 220
221 if (ret) 221 if (ret)
222 platform_device_put(bf52x_ad73311_snd_device); 222 platform_device_put(bf5xx_ad73311_snd_device);
223 223
224 return ret; 224 return ret;
225} 225}
@@ -227,7 +227,7 @@ static int __init bf5xx_ad73311_init(void)
227static void __exit bf5xx_ad73311_exit(void) 227static void __exit bf5xx_ad73311_exit(void)
228{ 228{
229 pr_debug("%s enter\n", __func__); 229 pr_debug("%s enter\n", __func__);
230 platform_device_unregister(bf52x_ad73311_snd_device); 230 platform_device_unregister(bf5xx_ad73311_snd_device);
231} 231}
232 232
233module_init(bf5xx_ad73311_init); 233module_init(bf5xx_ad73311_init);
diff --git a/sound/soc/blackfin/bf5xx-i2s.c b/sound/soc/blackfin/bf5xx-i2s.c
index af06904bab0f..876abade27e1 100644
--- a/sound/soc/blackfin/bf5xx-i2s.c
+++ b/sound/soc/blackfin/bf5xx-i2s.c
@@ -259,22 +259,18 @@ static int bf5xx_i2s_resume(struct snd_soc_dai *dai)
259 if (!dai->active) 259 if (!dai->active)
260 return 0; 260 return 0;
261 261
262 ret = sport_config_rx(sport_handle, RFSR | RCKFE, RSFSE|0x1f, 0, 0); 262 ret = sport_config_rx(sport, RFSR | RCKFE, RSFSE|0x1f, 0, 0);
263 if (ret) { 263 if (ret) {
264 pr_err("SPORT is busy!\n"); 264 pr_err("SPORT is busy!\n");
265 return -EBUSY; 265 return -EBUSY;
266 } 266 }
267 267
268 ret = sport_config_tx(sport_handle, TFSR | TCKFE, TSFSE|0x1f, 0, 0); 268 ret = sport_config_tx(sport, TFSR | TCKFE, TSFSE|0x1f, 0, 0);
269 if (ret) { 269 if (ret) {
270 pr_err("SPORT is busy!\n"); 270 pr_err("SPORT is busy!\n");
271 return -EBUSY; 271 return -EBUSY;
272 } 272 }
273 273
274 if (dai->capture.active)
275 sport_rx_start(sport);
276 if (dai->playback.active)
277 sport_tx_start(sport);
278 return 0; 274 return 0;
279} 275}
280 276
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index bc0cdded7116..3a00fa4dbe6d 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -148,24 +148,24 @@ static struct snd_soc_device bf5xx_ssm2602_snd_devdata = {
148 .codec_data = &bf5xx_ssm2602_setup, 148 .codec_data = &bf5xx_ssm2602_setup,
149}; 149};
150 150
151static struct platform_device *bf52x_ssm2602_snd_device; 151static struct platform_device *bf5xx_ssm2602_snd_device;
152 152
153static int __init bf5xx_ssm2602_init(void) 153static int __init bf5xx_ssm2602_init(void)
154{ 154{
155 int ret; 155 int ret;
156 156
157 pr_debug("%s enter\n", __func__); 157 pr_debug("%s enter\n", __func__);
158 bf52x_ssm2602_snd_device = platform_device_alloc("soc-audio", -1); 158 bf5xx_ssm2602_snd_device = platform_device_alloc("soc-audio", -1);
159 if (!bf52x_ssm2602_snd_device) 159 if (!bf5xx_ssm2602_snd_device)
160 return -ENOMEM; 160 return -ENOMEM;
161 161
162 platform_set_drvdata(bf52x_ssm2602_snd_device, 162 platform_set_drvdata(bf5xx_ssm2602_snd_device,
163 &bf5xx_ssm2602_snd_devdata); 163 &bf5xx_ssm2602_snd_devdata);
164 bf5xx_ssm2602_snd_devdata.dev = &bf52x_ssm2602_snd_device->dev; 164 bf5xx_ssm2602_snd_devdata.dev = &bf5xx_ssm2602_snd_device->dev;
165 ret = platform_device_add(bf52x_ssm2602_snd_device); 165 ret = platform_device_add(bf5xx_ssm2602_snd_device);
166 166
167 if (ret) 167 if (ret)
168 platform_device_put(bf52x_ssm2602_snd_device); 168 platform_device_put(bf5xx_ssm2602_snd_device);
169 169
170 return ret; 170 return ret;
171} 171}
@@ -173,7 +173,7 @@ static int __init bf5xx_ssm2602_init(void)
173static void __exit bf5xx_ssm2602_exit(void) 173static void __exit bf5xx_ssm2602_exit(void)
174{ 174{
175 pr_debug("%s enter\n", __func__); 175 pr_debug("%s enter\n", __func__);
176 platform_device_unregister(bf52x_ssm2602_snd_device); 176 platform_device_unregister(bf5xx_ssm2602_snd_device);
177} 177}
178 178
179module_init(bf5xx_ssm2602_init); 179module_init(bf5xx_ssm2602_init);
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.c b/sound/soc/blackfin/bf5xx-tdm-pcm.c
new file mode 100644
index 000000000000..ccb5e823bd18
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.c
@@ -0,0 +1,330 @@
1/*
2 * File: sound/soc/blackfin/bf5xx-tdm-pcm.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: Tue June 06 2009
6 * Description: DMA driver for tdm codec
7 *
8 * Modified:
9 * Copyright 2009 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <linux/module.h>
30#include <linux/init.h>
31#include <linux/platform_device.h>
32#include <linux/slab.h>
33#include <linux/dma-mapping.h>
34
35#include <sound/core.h>
36#include <sound/pcm.h>
37#include <sound/pcm_params.h>
38#include <sound/soc.h>
39
40#include <asm/dma.h>
41
42#include "bf5xx-tdm-pcm.h"
43#include "bf5xx-tdm.h"
44#include "bf5xx-sport.h"
45
46#define PCM_BUFFER_MAX 0x10000
47#define FRAGMENT_SIZE_MIN (4*1024)
48#define FRAGMENTS_MIN 2
49#define FRAGMENTS_MAX 32
50
51static void bf5xx_dma_irq(void *data)
52{
53 struct snd_pcm_substream *pcm = data;
54 snd_pcm_period_elapsed(pcm);
55}
56
57static const struct snd_pcm_hardware bf5xx_pcm_hardware = {
58 .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
59 SNDRV_PCM_INFO_RESUME),
60 .formats = SNDRV_PCM_FMTBIT_S32_LE,
61 .rates = SNDRV_PCM_RATE_48000,
62 .channels_min = 2,
63 .channels_max = 8,
64 .buffer_bytes_max = PCM_BUFFER_MAX,
65 .period_bytes_min = FRAGMENT_SIZE_MIN,
66 .period_bytes_max = PCM_BUFFER_MAX/2,
67 .periods_min = FRAGMENTS_MIN,
68 .periods_max = FRAGMENTS_MAX,
69};
70
71static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream,
72 struct snd_pcm_hw_params *params)
73{
74 size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
75 snd_pcm_lib_malloc_pages(substream, size * 4);
76
77 return 0;
78}
79
80static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream)
81{
82 snd_pcm_lib_free_pages(substream);
83
84 return 0;
85}
86
87static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream)
88{
89 struct snd_pcm_runtime *runtime = substream->runtime;
90 struct sport_device *sport = runtime->private_data;
91 int fragsize_bytes = frames_to_bytes(runtime, runtime->period_size);
92
93 fragsize_bytes /= runtime->channels;
94 /* inflate the fragsize to match the dma width of SPORT */
95 fragsize_bytes *= 8;
96
97 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
98 sport_set_tx_callback(sport, bf5xx_dma_irq, substream);
99 sport_config_tx_dma(sport, runtime->dma_area,
100 runtime->periods, fragsize_bytes);
101 } else {
102 sport_set_rx_callback(sport, bf5xx_dma_irq, substream);
103 sport_config_rx_dma(sport, runtime->dma_area,
104 runtime->periods, fragsize_bytes);
105 }
106
107 return 0;
108}
109
110static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
111{
112 struct snd_pcm_runtime *runtime = substream->runtime;
113 struct sport_device *sport = runtime->private_data;
114 int ret = 0;
115
116 switch (cmd) {
117 case SNDRV_PCM_TRIGGER_START:
118 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
119 sport_tx_start(sport);
120 else
121 sport_rx_start(sport);
122 break;
123 case SNDRV_PCM_TRIGGER_STOP:
124 case SNDRV_PCM_TRIGGER_SUSPEND:
125 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
126 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
127 sport_tx_stop(sport);
128 else
129 sport_rx_stop(sport);
130 break;
131 default:
132 ret = -EINVAL;
133 }
134
135 return ret;
136}
137
138static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream)
139{
140 struct snd_pcm_runtime *runtime = substream->runtime;
141 struct sport_device *sport = runtime->private_data;
142 unsigned int diff;
143 snd_pcm_uframes_t frames;
144
145 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
146 diff = sport_curr_offset_tx(sport);
147 frames = diff / (8*4); /* 32 bytes per frame */
148 } else {
149 diff = sport_curr_offset_rx(sport);
150 frames = diff / (8*4);
151 }
152 return frames;
153}
154
155static int bf5xx_pcm_open(struct snd_pcm_substream *substream)
156{
157 struct snd_pcm_runtime *runtime = substream->runtime;
158 int ret = 0;
159
160 snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware);
161
162 ret = snd_pcm_hw_constraint_integer(runtime,
163 SNDRV_PCM_HW_PARAM_PERIODS);
164 if (ret < 0)
165 goto out;
166
167 if (sport_handle != NULL)
168 runtime->private_data = sport_handle;
169 else {
170 pr_err("sport_handle is NULL\n");
171 ret = -ENODEV;
172 }
173out:
174 return ret;
175}
176
177static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
178 snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count)
179{
180 unsigned int *src;
181 unsigned int *dst;
182 int i;
183
184 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
185 src = buf;
186 dst = (unsigned int *)substream->runtime->dma_area;
187
188 dst += pos * 8;
189 while (count--) {
190 for (i = 0; i < substream->runtime->channels; i++)
191 *(dst + i) = *src++;
192 dst += 8;
193 }
194 } else {
195 src = (unsigned int *)substream->runtime->dma_area;
196 dst = buf;
197
198 src += pos * 8;
199 while (count--) {
200 for (i = 0; i < substream->runtime->channels; i++)
201 *dst++ = *(src+i);
202 src += 8;
203 }
204 }
205
206 return 0;
207}
208
209static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
210 int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
211{
212 unsigned char *buf = substream->runtime->dma_area;
213 buf += pos * 8 * 4;
214 memset(buf, '\0', count * 8 * 4);
215
216 return 0;
217}
218
219
220struct snd_pcm_ops bf5xx_pcm_tdm_ops = {
221 .open = bf5xx_pcm_open,
222 .ioctl = snd_pcm_lib_ioctl,
223 .hw_params = bf5xx_pcm_hw_params,
224 .hw_free = bf5xx_pcm_hw_free,
225 .prepare = bf5xx_pcm_prepare,
226 .trigger = bf5xx_pcm_trigger,
227 .pointer = bf5xx_pcm_pointer,
228 .copy = bf5xx_pcm_copy,
229 .silence = bf5xx_pcm_silence,
230};
231
232static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
233{
234 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
235 struct snd_dma_buffer *buf = &substream->dma_buffer;
236 size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
237
238 buf->dev.type = SNDRV_DMA_TYPE_DEV;
239 buf->dev.dev = pcm->card->dev;
240 buf->private_data = NULL;
241 buf->area = dma_alloc_coherent(pcm->card->dev, size * 4,
242 &buf->addr, GFP_KERNEL);
243 if (!buf->area) {
244 pr_err("Failed to allocate dma memory \
245 Please increase uncached DMA memory region\n");
246 return -ENOMEM;
247 }
248 buf->bytes = size;
249
250 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
251 sport_handle->tx_buf = buf->area;
252 else
253 sport_handle->rx_buf = buf->area;
254
255 return 0;
256}
257
258static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
259{
260 struct snd_pcm_substream *substream;
261 struct snd_dma_buffer *buf;
262 int stream;
263
264 for (stream = 0; stream < 2; stream++) {
265 substream = pcm->streams[stream].substream;
266 if (!substream)
267 continue;
268
269 buf = &substream->dma_buffer;
270 if (!buf->area)
271 continue;
272 dma_free_coherent(NULL, buf->bytes, buf->area, 0);
273 buf->area = NULL;
274 }
275 if (sport_handle)
276 sport_done(sport_handle);
277}
278
279static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
280
281static int bf5xx_pcm_tdm_new(struct snd_card *card, struct snd_soc_dai *dai,
282 struct snd_pcm *pcm)
283{
284 int ret = 0;
285
286 if (!card->dev->dma_mask)
287 card->dev->dma_mask = &bf5xx_pcm_dmamask;
288 if (!card->dev->coherent_dma_mask)
289 card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
290
291 if (dai->playback.channels_min) {
292 ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
293 SNDRV_PCM_STREAM_PLAYBACK);
294 if (ret)
295 goto out;
296 }
297
298 if (dai->capture.channels_min) {
299 ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
300 SNDRV_PCM_STREAM_CAPTURE);
301 if (ret)
302 goto out;
303 }
304out:
305 return ret;
306}
307
308struct snd_soc_platform bf5xx_tdm_soc_platform = {
309 .name = "bf5xx-audio",
310 .pcm_ops = &bf5xx_pcm_tdm_ops,
311 .pcm_new = bf5xx_pcm_tdm_new,
312 .pcm_free = bf5xx_pcm_free_dma_buffers,
313};
314EXPORT_SYMBOL_GPL(bf5xx_tdm_soc_platform);
315
316static int __init bfin_pcm_tdm_init(void)
317{
318 return snd_soc_register_platform(&bf5xx_tdm_soc_platform);
319}
320module_init(bfin_pcm_tdm_init);
321
322static void __exit bfin_pcm_tdm_exit(void)
323{
324 snd_soc_unregister_platform(&bf5xx_tdm_soc_platform);
325}
326module_exit(bfin_pcm_tdm_exit);
327
328MODULE_AUTHOR("Barry Song");
329MODULE_DESCRIPTION("ADI Blackfin TDM PCM DMA module");
330MODULE_LICENSE("GPL");
diff --git a/sound/soc/blackfin/bf5xx-tdm-pcm.h b/sound/soc/blackfin/bf5xx-tdm-pcm.h
new file mode 100644
index 000000000000..ddc5047df88c
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm-pcm.h
@@ -0,0 +1,21 @@
1/*
2 * sound/soc/blackfin/bf5xx-tdm-pcm.h -- ALSA PCM interface for the Blackfin
3 *
4 * Copyright 2009 Analog Device Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _BF5XX_TDM_PCM_H
12#define _BF5XX_TDM_PCM_H
13
14struct bf5xx_pcm_dma_params {
15 char *name; /* stream identifier */
16};
17
18/* platform data */
19extern struct snd_soc_platform bf5xx_tdm_soc_platform;
20
21#endif
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
new file mode 100644
index 000000000000..3096badf09a5
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -0,0 +1,343 @@
1/*
2 * File: sound/soc/blackfin/bf5xx-tdm.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: Thurs June 04 2009
6 * Description: Blackfin I2S(TDM) CPU DAI driver
7 * Even though TDM mode can be as part of I2S DAI, but there
8 * are so much difference in configuration and data flow,
9 * it's very ugly to integrate I2S and TDM into a module
10 *
11 * Modified:
12 * Copyright 2009 Analog Devices Inc.
13 *
14 * Bugs: Enter bugs at http://blackfin.uclinux.org/
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see the file COPYING, or write
28 * to the Free Software Foundation, Inc.,
29 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/device.h>
35#include <sound/core.h>
36#include <sound/pcm.h>
37#include <sound/pcm_params.h>
38#include <sound/initval.h>
39#include <sound/soc.h>
40
41#include <asm/irq.h>
42#include <asm/portmux.h>
43#include <linux/mutex.h>
44#include <linux/gpio.h>
45
46#include "bf5xx-sport.h"
47#include "bf5xx-tdm.h"
48
49struct bf5xx_tdm_port {
50 u16 tcr1;
51 u16 rcr1;
52 u16 tcr2;
53 u16 rcr2;
54 int configured;
55};
56
57static struct bf5xx_tdm_port bf5xx_tdm;
58static int sport_num = CONFIG_SND_BF5XX_SPORT_NUM;
59
60static struct sport_param sport_params[2] = {
61 {
62 .dma_rx_chan = CH_SPORT0_RX,
63 .dma_tx_chan = CH_SPORT0_TX,
64 .err_irq = IRQ_SPORT0_ERROR,
65 .regs = (struct sport_register *)SPORT0_TCR1,
66 },
67 {
68 .dma_rx_chan = CH_SPORT1_RX,
69 .dma_tx_chan = CH_SPORT1_TX,
70 .err_irq = IRQ_SPORT1_ERROR,
71 .regs = (struct sport_register *)SPORT1_TCR1,
72 }
73};
74
75/*
76 * Setting the TFS pin selector for SPORT 0 based on whether the selected
77 * port id F or G. If the port is F then no conflict should exist for the
78 * TFS. When Port G is selected and EMAC then there is a conflict between
79 * the PHY interrupt line and TFS. Current settings prevent the conflict
80 * by ignoring the TFS pin when Port G is selected. This allows both
81 * ssm2602 using Port G and EMAC concurrently.
82 */
83#ifdef CONFIG_BF527_SPORT0_PORTF
84#define LOCAL_SPORT0_TFS (P_SPORT0_TFS)
85#else
86#define LOCAL_SPORT0_TFS (0)
87#endif
88
89static u16 sport_req[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
90 P_SPORT0_DRPRI, P_SPORT0_RSCLK, LOCAL_SPORT0_TFS, 0},
91 {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI,
92 P_SPORT1_RSCLK, P_SPORT1_TFS, 0} };
93
94static int bf5xx_tdm_set_dai_fmt(struct snd_soc_dai *cpu_dai,
95 unsigned int fmt)
96{
97 int ret = 0;
98
99 /* interface format:support TDM,slave mode */
100 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
101 case SND_SOC_DAIFMT_DSP_A:
102 break;
103 default:
104 printk(KERN_ERR "%s: Unknown DAI format type\n", __func__);
105 ret = -EINVAL;
106 break;
107 }
108
109 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
110 case SND_SOC_DAIFMT_CBM_CFM:
111 break;
112 case SND_SOC_DAIFMT_CBS_CFS:
113 case SND_SOC_DAIFMT_CBM_CFS:
114 case SND_SOC_DAIFMT_CBS_CFM:
115 ret = -EINVAL;
116 break;
117 default:
118 printk(KERN_ERR "%s: Unknown DAI master type\n", __func__);
119 ret = -EINVAL;
120 break;
121 }
122
123 return ret;
124}
125
126static int bf5xx_tdm_hw_params(struct snd_pcm_substream *substream,
127 struct snd_pcm_hw_params *params,
128 struct snd_soc_dai *dai)
129{
130 int ret = 0;
131
132 bf5xx_tdm.tcr2 &= ~0x1f;
133 bf5xx_tdm.rcr2 &= ~0x1f;
134 switch (params_format(params)) {
135 case SNDRV_PCM_FORMAT_S32_LE:
136 bf5xx_tdm.tcr2 |= 31;
137 bf5xx_tdm.rcr2 |= 31;
138 sport_handle->wdsize = 4;
139 break;
140 /* at present, we only support 32bit transfer */
141 default:
142 pr_err("not supported PCM format yet\n");
143 return -EINVAL;
144 break;
145 }
146
147 if (!bf5xx_tdm.configured) {
148 /*
149 * TX and RX are not independent,they are enabled at the
150 * same time, even if only one side is running. So, we
151 * need to configure both of them at the time when the first
152 * stream is opened.
153 *
154 * CPU DAI:slave mode.
155 */
156 ret = sport_config_rx(sport_handle, bf5xx_tdm.rcr1,
157 bf5xx_tdm.rcr2, 0, 0);
158 if (ret) {
159 pr_err("SPORT is busy!\n");
160 return -EBUSY;
161 }
162
163 ret = sport_config_tx(sport_handle, bf5xx_tdm.tcr1,
164 bf5xx_tdm.tcr2, 0, 0);
165 if (ret) {
166 pr_err("SPORT is busy!\n");
167 return -EBUSY;
168 }
169
170 bf5xx_tdm.configured = 1;
171 }
172
173 return 0;
174}
175
176static void bf5xx_tdm_shutdown(struct snd_pcm_substream *substream,
177 struct snd_soc_dai *dai)
178{
179 /* No active stream, SPORT is allowed to be configured again. */
180 if (!dai->active)
181 bf5xx_tdm.configured = 0;
182}
183
184#ifdef CONFIG_PM
185static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
186{
187 struct sport_device *sport =
188 (struct sport_device *)dai->private_data;
189
190 if (!dai->active)
191 return 0;
192 if (dai->capture.active)
193 sport_rx_stop(sport);
194 if (dai->playback.active)
195 sport_tx_stop(sport);
196 return 0;
197}
198
199static int bf5xx_tdm_resume(struct snd_soc_dai *dai)
200{
201 int ret;
202 struct sport_device *sport =
203 (struct sport_device *)dai->private_data;
204
205 if (!dai->active)
206 return 0;
207
208 ret = sport_set_multichannel(sport, 8, 0xFF, 1);
209 if (ret) {
210 pr_err("SPORT is busy!\n");
211 ret = -EBUSY;
212 }
213
214 ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0);
215 if (ret) {
216 pr_err("SPORT is busy!\n");
217 ret = -EBUSY;
218 }
219
220 ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0);
221 if (ret) {
222 pr_err("SPORT is busy!\n");
223 ret = -EBUSY;
224 }
225
226 return 0;
227}
228
229#else
230#define bf5xx_tdm_suspend NULL
231#define bf5xx_tdm_resume NULL
232#endif
233
234static struct snd_soc_dai_ops bf5xx_tdm_dai_ops = {
235 .hw_params = bf5xx_tdm_hw_params,
236 .set_fmt = bf5xx_tdm_set_dai_fmt,
237 .shutdown = bf5xx_tdm_shutdown,
238};
239
240struct snd_soc_dai bf5xx_tdm_dai = {
241 .name = "bf5xx-tdm",
242 .id = 0,
243 .suspend = bf5xx_tdm_suspend,
244 .resume = bf5xx_tdm_resume,
245 .playback = {
246 .channels_min = 2,
247 .channels_max = 8,
248 .rates = SNDRV_PCM_RATE_48000,
249 .formats = SNDRV_PCM_FMTBIT_S32_LE,},
250 .capture = {
251 .channels_min = 2,
252 .channels_max = 8,
253 .rates = SNDRV_PCM_RATE_48000,
254 .formats = SNDRV_PCM_FMTBIT_S32_LE,},
255 .ops = &bf5xx_tdm_dai_ops,
256};
257EXPORT_SYMBOL_GPL(bf5xx_tdm_dai);
258
259static int __devinit bfin_tdm_probe(struct platform_device *pdev)
260{
261 int ret = 0;
262
263 if (peripheral_request_list(&sport_req[sport_num][0], "soc-audio")) {
264 pr_err("Requesting Peripherals failed\n");
265 return -EFAULT;
266 }
267
268 /* request DMA for SPORT */
269 sport_handle = sport_init(&sport_params[sport_num], 4, \
270 8 * sizeof(u32), NULL);
271 if (!sport_handle) {
272 peripheral_free_list(&sport_req[sport_num][0]);
273 return -ENODEV;
274 }
275
276 /* SPORT works in TDM mode */
277 ret = sport_set_multichannel(sport_handle, 8, 0xFF, 1);
278 if (ret) {
279 pr_err("SPORT is busy!\n");
280 ret = -EBUSY;
281 goto sport_config_err;
282 }
283
284 ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0);
285 if (ret) {
286 pr_err("SPORT is busy!\n");
287 ret = -EBUSY;
288 goto sport_config_err;
289 }
290
291 ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0);
292 if (ret) {
293 pr_err("SPORT is busy!\n");
294 ret = -EBUSY;
295 goto sport_config_err;
296 }
297
298 ret = snd_soc_register_dai(&bf5xx_tdm_dai);
299 if (ret) {
300 pr_err("Failed to register DAI: %d\n", ret);
301 goto sport_config_err;
302 }
303 return 0;
304
305sport_config_err:
306 peripheral_free_list(&sport_req[sport_num][0]);
307 return ret;
308}
309
310static int __devexit bfin_tdm_remove(struct platform_device *pdev)
311{
312 peripheral_free_list(&sport_req[sport_num][0]);
313 snd_soc_unregister_dai(&bf5xx_tdm_dai);
314
315 return 0;
316}
317
318static struct platform_driver bfin_tdm_driver = {
319 .probe = bfin_tdm_probe,
320 .remove = __devexit_p(bfin_tdm_remove),
321 .driver = {
322 .name = "bfin-tdm",
323 .owner = THIS_MODULE,
324 },
325};
326
327static int __init bfin_tdm_init(void)
328{
329 return platform_driver_register(&bfin_tdm_driver);
330}
331module_init(bfin_tdm_init);
332
333static void __exit bfin_tdm_exit(void)
334{
335 platform_driver_unregister(&bfin_tdm_driver);
336}
337module_exit(bfin_tdm_exit);
338
339/* Module information */
340MODULE_AUTHOR("Barry Song");
341MODULE_DESCRIPTION("TDM driver for ADI Blackfin");
342MODULE_LICENSE("GPL");
343
diff --git a/sound/soc/blackfin/bf5xx-tdm.h b/sound/soc/blackfin/bf5xx-tdm.h
new file mode 100644
index 000000000000..618ec3d90cd4
--- /dev/null
+++ b/sound/soc/blackfin/bf5xx-tdm.h
@@ -0,0 +1,14 @@
1/*
2 * sound/soc/blackfin/bf5xx-tdm.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _BF5XX_TDM_H
10#define _BF5XX_TDM_H
11
12extern struct snd_soc_dai bf5xx_tdm_dai;
13
14#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index bbc97fd76648..0edca93af3b0 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -12,11 +12,15 @@ config SND_SOC_ALL_CODECS
12 tristate "Build all ASoC CODEC drivers" 12 tristate "Build all ASoC CODEC drivers"
13 select SND_SOC_L3 13 select SND_SOC_L3
14 select SND_SOC_AC97_CODEC if SND_SOC_AC97_BUS 14 select SND_SOC_AC97_CODEC if SND_SOC_AC97_BUS
15 select SND_SOC_AD1836 if SPI_MASTER
16 select SND_SOC_AD1938 if SPI_MASTER
15 select SND_SOC_AD1980 if SND_SOC_AC97_BUS 17 select SND_SOC_AD1980 if SND_SOC_AC97_BUS
16 select SND_SOC_AD73311 if I2C 18 select SND_SOC_AD73311 if I2C
17 select SND_SOC_AK4104 if SPI_MASTER 19 select SND_SOC_AK4104 if SPI_MASTER
18 select SND_SOC_AK4535 if I2C 20 select SND_SOC_AK4535 if I2C
21 select SND_SOC_AK4642 if I2C
19 select SND_SOC_CS4270 if I2C 22 select SND_SOC_CS4270 if I2C
23 select SND_SOC_MAX9877 if I2C
20 select SND_SOC_PCM3008 24 select SND_SOC_PCM3008
21 select SND_SOC_SPDIF 25 select SND_SOC_SPDIF
22 select SND_SOC_SSM2602 if I2C 26 select SND_SOC_SSM2602 if I2C
@@ -30,18 +34,23 @@ config SND_SOC_ALL_CODECS
30 select SND_SOC_WM8350 if MFD_WM8350 34 select SND_SOC_WM8350 if MFD_WM8350
31 select SND_SOC_WM8400 if MFD_WM8400 35 select SND_SOC_WM8400 if MFD_WM8400
32 select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI 36 select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI
37 select SND_SOC_WM8523 if I2C
33 select SND_SOC_WM8580 if I2C 38 select SND_SOC_WM8580 if I2C
34 select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI 39 select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
35 select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI 40 select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
36 select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI 41 select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
37 select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI 42 select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
43 select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
38 select SND_SOC_WM8900 if I2C 44 select SND_SOC_WM8900 if I2C
39 select SND_SOC_WM8903 if I2C 45 select SND_SOC_WM8903 if I2C
40 select SND_SOC_WM8940 if I2C 46 select SND_SOC_WM8940 if I2C
41 select SND_SOC_WM8960 if I2C 47 select SND_SOC_WM8960 if I2C
48 select SND_SOC_WM8961 if I2C
42 select SND_SOC_WM8971 if I2C 49 select SND_SOC_WM8971 if I2C
50 select SND_SOC_WM8974 if I2C
43 select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI 51 select SND_SOC_WM8988 if SND_SOC_I2C_AND_SPI
44 select SND_SOC_WM8990 if I2C 52 select SND_SOC_WM8990 if I2C
53 select SND_SOC_WM8993 if I2C
45 select SND_SOC_WM9081 if I2C 54 select SND_SOC_WM9081 if I2C
46 select SND_SOC_WM9705 if SND_SOC_AC97_BUS 55 select SND_SOC_WM9705 if SND_SOC_AC97_BUS
47 select SND_SOC_WM9712 if SND_SOC_AC97_BUS 56 select SND_SOC_WM9712 if SND_SOC_AC97_BUS
@@ -57,11 +66,21 @@ config SND_SOC_ALL_CODECS
57 66
58 If unsure select "N". 67 If unsure select "N".
59 68
69config SND_SOC_WM_HUBS
70 tristate
71 default y if SND_SOC_WM8993=y
72 default m if SND_SOC_WM8993=m
60 73
61config SND_SOC_AC97_CODEC 74config SND_SOC_AC97_CODEC
62 tristate 75 tristate
63 select SND_AC97_CODEC 76 select SND_AC97_CODEC
64 77
78config SND_SOC_AD1836
79 tristate
80
81config SND_SOC_AD1938
82 tristate
83
65config SND_SOC_AD1980 84config SND_SOC_AD1980
66 tristate 85 tristate
67 86
@@ -74,6 +93,9 @@ config SND_SOC_AK4104
74config SND_SOC_AK4535 93config SND_SOC_AK4535
75 tristate 94 tristate
76 95
96config SND_SOC_AK4642
97 tristate
98
77# Cirrus Logic CS4270 Codec 99# Cirrus Logic CS4270 Codec
78config SND_SOC_CS4270 100config SND_SOC_CS4270
79 tristate 101 tristate
@@ -86,6 +108,9 @@ config SND_SOC_CS4270_VD33_ERRATA
86 bool 108 bool
87 depends on SND_SOC_CS4270 109 depends on SND_SOC_CS4270
88 110
111config SND_SOC_CX20442
112 tristate
113
89config SND_SOC_L3 114config SND_SOC_L3
90 tristate 115 tristate
91 116
@@ -129,6 +154,9 @@ config SND_SOC_WM8400
129config SND_SOC_WM8510 154config SND_SOC_WM8510
130 tristate 155 tristate
131 156
157config SND_SOC_WM8523
158 tristate
159
132config SND_SOC_WM8580 160config SND_SOC_WM8580
133 tristate 161 tristate
134 162
@@ -144,6 +172,9 @@ config SND_SOC_WM8750
144config SND_SOC_WM8753 172config SND_SOC_WM8753
145 tristate 173 tristate
146 174
175config SND_SOC_WM8776
176 tristate
177
147config SND_SOC_WM8900 178config SND_SOC_WM8900
148 tristate 179 tristate
149 180
@@ -156,15 +187,24 @@ config SND_SOC_WM8940
156config SND_SOC_WM8960 187config SND_SOC_WM8960
157 tristate 188 tristate
158 189
190config SND_SOC_WM8961
191 tristate
192
159config SND_SOC_WM8971 193config SND_SOC_WM8971
160 tristate 194 tristate
161 195
196config SND_SOC_WM8974
197 tristate
198
162config SND_SOC_WM8988 199config SND_SOC_WM8988
163 tristate 200 tristate
164 201
165config SND_SOC_WM8990 202config SND_SOC_WM8990
166 tristate 203 tristate
167 204
205config SND_SOC_WM8993
206 tristate
207
168config SND_SOC_WM9081 208config SND_SOC_WM9081
169 tristate 209 tristate
170 210
@@ -176,3 +216,7 @@ config SND_SOC_WM9712
176 216
177config SND_SOC_WM9713 217config SND_SOC_WM9713
178 tristate 218 tristate
219
220# Amp
221config SND_SOC_MAX9877
222 tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8b7530546f4d..fb4af28486ba 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,9 +1,13 @@
1snd-soc-ac97-objs := ac97.o 1snd-soc-ac97-objs := ac97.o
2snd-soc-ad1836-objs := ad1836.o
3snd-soc-ad1938-objs := ad1938.o
2snd-soc-ad1980-objs := ad1980.o 4snd-soc-ad1980-objs := ad1980.o
3snd-soc-ad73311-objs := ad73311.o 5snd-soc-ad73311-objs := ad73311.o
4snd-soc-ak4104-objs := ak4104.o 6snd-soc-ak4104-objs := ak4104.o
5snd-soc-ak4535-objs := ak4535.o 7snd-soc-ak4535-objs := ak4535.o
8snd-soc-ak4642-objs := ak4642.o
6snd-soc-cs4270-objs := cs4270.o 9snd-soc-cs4270-objs := cs4270.o
10snd-soc-cx20442-objs := cx20442.o
7snd-soc-l3-objs := l3.o 11snd-soc-l3-objs := l3.o
8snd-soc-pcm3008-objs := pcm3008.o 12snd-soc-pcm3008-objs := pcm3008.o
9snd-soc-spdif-objs := spdif_transciever.o 13snd-soc-spdif-objs := spdif_transciever.o
@@ -18,29 +22,42 @@ snd-soc-uda1380-objs := uda1380.o
18snd-soc-wm8350-objs := wm8350.o 22snd-soc-wm8350-objs := wm8350.o
19snd-soc-wm8400-objs := wm8400.o 23snd-soc-wm8400-objs := wm8400.o
20snd-soc-wm8510-objs := wm8510.o 24snd-soc-wm8510-objs := wm8510.o
25snd-soc-wm8523-objs := wm8523.o
21snd-soc-wm8580-objs := wm8580.o 26snd-soc-wm8580-objs := wm8580.o
22snd-soc-wm8728-objs := wm8728.o 27snd-soc-wm8728-objs := wm8728.o
23snd-soc-wm8731-objs := wm8731.o 28snd-soc-wm8731-objs := wm8731.o
24snd-soc-wm8750-objs := wm8750.o 29snd-soc-wm8750-objs := wm8750.o
25snd-soc-wm8753-objs := wm8753.o 30snd-soc-wm8753-objs := wm8753.o
31snd-soc-wm8776-objs := wm8776.o
26snd-soc-wm8900-objs := wm8900.o 32snd-soc-wm8900-objs := wm8900.o
27snd-soc-wm8903-objs := wm8903.o 33snd-soc-wm8903-objs := wm8903.o
28snd-soc-wm8940-objs := wm8940.o 34snd-soc-wm8940-objs := wm8940.o
29snd-soc-wm8960-objs := wm8960.o 35snd-soc-wm8960-objs := wm8960.o
36snd-soc-wm8961-objs := wm8961.o
30snd-soc-wm8971-objs := wm8971.o 37snd-soc-wm8971-objs := wm8971.o
38snd-soc-wm8974-objs := wm8974.o
31snd-soc-wm8988-objs := wm8988.o 39snd-soc-wm8988-objs := wm8988.o
32snd-soc-wm8990-objs := wm8990.o 40snd-soc-wm8990-objs := wm8990.o
41snd-soc-wm8993-objs := wm8993.o
33snd-soc-wm9081-objs := wm9081.o 42snd-soc-wm9081-objs := wm9081.o
34snd-soc-wm9705-objs := wm9705.o 43snd-soc-wm9705-objs := wm9705.o
35snd-soc-wm9712-objs := wm9712.o 44snd-soc-wm9712-objs := wm9712.o
36snd-soc-wm9713-objs := wm9713.o 45snd-soc-wm9713-objs := wm9713.o
46snd-soc-wm-hubs-objs := wm_hubs.o
47
48# Amp
49snd-soc-max9877-objs := max9877.o
37 50
38obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o 51obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
52obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
53obj-$(CONFIG_SND_SOC_AD1938) += snd-soc-ad1938.o
39obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o 54obj-$(CONFIG_SND_SOC_AD1980) += snd-soc-ad1980.o
40obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o 55obj-$(CONFIG_SND_SOC_AD73311) += snd-soc-ad73311.o
41obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o 56obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
42obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o 57obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
58obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
43obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o 59obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
60obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
44obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o 61obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
45obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o 62obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
46obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o 63obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
@@ -55,19 +72,28 @@ obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
55obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o 72obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o
56obj-$(CONFIG_SND_SOC_WM8400) += snd-soc-wm8400.o 73obj-$(CONFIG_SND_SOC_WM8400) += snd-soc-wm8400.o
57obj-$(CONFIG_SND_SOC_WM8510) += snd-soc-wm8510.o 74obj-$(CONFIG_SND_SOC_WM8510) += snd-soc-wm8510.o
75obj-$(CONFIG_SND_SOC_WM8523) += snd-soc-wm8523.o
58obj-$(CONFIG_SND_SOC_WM8580) += snd-soc-wm8580.o 76obj-$(CONFIG_SND_SOC_WM8580) += snd-soc-wm8580.o
59obj-$(CONFIG_SND_SOC_WM8728) += snd-soc-wm8728.o 77obj-$(CONFIG_SND_SOC_WM8728) += snd-soc-wm8728.o
60obj-$(CONFIG_SND_SOC_WM8731) += snd-soc-wm8731.o 78obj-$(CONFIG_SND_SOC_WM8731) += snd-soc-wm8731.o
61obj-$(CONFIG_SND_SOC_WM8750) += snd-soc-wm8750.o 79obj-$(CONFIG_SND_SOC_WM8750) += snd-soc-wm8750.o
62obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o 80obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o
81obj-$(CONFIG_SND_SOC_WM8776) += snd-soc-wm8776.o
63obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o 82obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o
64obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o 83obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
65obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o 84obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
85obj-$(CONFIG_SND_SOC_WM8974) += snd-soc-wm8974.o
66obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o 86obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o
67obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o 87obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o
88obj-$(CONFIG_SND_SOC_WM8961) += snd-soc-wm8961.o
68obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o 89obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
69obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o 90obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
91obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o
70obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o 92obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o
71obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o 93obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
72obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o 94obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
73obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o 95obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
96obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
97
98# Amp
99obj-$(CONFIG_SND_SOC_MAX9877) += snd-soc-max9877.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
new file mode 100644
index 000000000000..3612bb92df90
--- /dev/null
+++ b/sound/soc/codecs/ad1836.c
@@ -0,0 +1,446 @@
1/*
2 * File: sound/soc/codecs/ad1836.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: Aug 04 2009
6 * Description: Driver for AD1836 sound chip
7 *
8 * Modified:
9 * Copyright 2009 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/version.h>
22#include <linux/kernel.h>
23#include <linux/device.h>
24#include <sound/core.h>
25#include <sound/pcm.h>
26#include <sound/pcm_params.h>
27#include <sound/initval.h>
28#include <sound/soc.h>
29#include <sound/tlv.h>
30#include <sound/soc-dapm.h>
31#include <linux/spi/spi.h>
32#include "ad1836.h"
33
34/* codec private data */
35struct ad1836_priv {
36 struct snd_soc_codec codec;
37 u16 reg_cache[AD1836_NUM_REGS];
38};
39
40static struct snd_soc_codec *ad1836_codec;
41struct snd_soc_codec_device soc_codec_dev_ad1836;
42static int ad1836_register(struct ad1836_priv *ad1836);
43static void ad1836_unregister(struct ad1836_priv *ad1836);
44
45/*
46 * AD1836 volume/mute/de-emphasis etc. controls
47 */
48static const char *ad1836_deemp[] = {"None", "44.1kHz", "32kHz", "48kHz"};
49
50static const struct soc_enum ad1836_deemp_enum =
51 SOC_ENUM_SINGLE(AD1836_DAC_CTRL1, 8, 4, ad1836_deemp);
52
53static const struct snd_kcontrol_new ad1836_snd_controls[] = {
54 /* DAC volume control */
55 SOC_DOUBLE_R("DAC1 Volume", AD1836_DAC_L1_VOL,
56 AD1836_DAC_R1_VOL, 0, 0x3FF, 0),
57 SOC_DOUBLE_R("DAC2 Volume", AD1836_DAC_L2_VOL,
58 AD1836_DAC_R2_VOL, 0, 0x3FF, 0),
59 SOC_DOUBLE_R("DAC3 Volume", AD1836_DAC_L3_VOL,
60 AD1836_DAC_R3_VOL, 0, 0x3FF, 0),
61
62 /* ADC switch control */
63 SOC_DOUBLE("ADC1 Switch", AD1836_ADC_CTRL2, AD1836_ADCL1_MUTE,
64 AD1836_ADCR1_MUTE, 1, 1),
65 SOC_DOUBLE("ADC2 Switch", AD1836_ADC_CTRL2, AD1836_ADCL2_MUTE,
66 AD1836_ADCR2_MUTE, 1, 1),
67
68 /* DAC switch control */
69 SOC_DOUBLE("DAC1 Switch", AD1836_DAC_CTRL2, AD1836_DACL1_MUTE,
70 AD1836_DACR1_MUTE, 1, 1),
71 SOC_DOUBLE("DAC2 Switch", AD1836_DAC_CTRL2, AD1836_DACL2_MUTE,
72 AD1836_DACR2_MUTE, 1, 1),
73 SOC_DOUBLE("DAC3 Switch", AD1836_DAC_CTRL2, AD1836_DACL3_MUTE,
74 AD1836_DACR3_MUTE, 1, 1),
75
76 /* ADC high-pass filter */
77 SOC_SINGLE("ADC High Pass Filter Switch", AD1836_ADC_CTRL1,
78 AD1836_ADC_HIGHPASS_FILTER, 1, 0),
79
80 /* DAC de-emphasis */
81 SOC_ENUM("Playback Deemphasis", ad1836_deemp_enum),
82};
83
84static const struct snd_soc_dapm_widget ad1836_dapm_widgets[] = {
85 SND_SOC_DAPM_DAC("DAC", "Playback", AD1836_DAC_CTRL1,
86 AD1836_DAC_POWERDOWN, 1),
87 SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
88 SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1836_ADC_CTRL1,
89 AD1836_ADC_POWERDOWN, 1, NULL, 0),
90 SND_SOC_DAPM_OUTPUT("DAC1OUT"),
91 SND_SOC_DAPM_OUTPUT("DAC2OUT"),
92 SND_SOC_DAPM_OUTPUT("DAC3OUT"),
93 SND_SOC_DAPM_INPUT("ADC1IN"),
94 SND_SOC_DAPM_INPUT("ADC2IN"),
95};
96
97static const struct snd_soc_dapm_route audio_paths[] = {
98 { "DAC", NULL, "ADC_PWR" },
99 { "ADC", NULL, "ADC_PWR" },
100 { "DAC1OUT", "DAC1 Switch", "DAC" },
101 { "DAC2OUT", "DAC2 Switch", "DAC" },
102 { "DAC3OUT", "DAC3 Switch", "DAC" },
103 { "ADC", "ADC1 Switch", "ADC1IN" },
104 { "ADC", "ADC2 Switch", "ADC2IN" },
105};
106
107/*
108 * DAI ops entries
109 */
110
111static int ad1836_set_dai_fmt(struct snd_soc_dai *codec_dai,
112 unsigned int fmt)
113{
114 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
115 /* at present, we support adc aux mode to interface with
116 * blackfin sport tdm mode
117 */
118 case SND_SOC_DAIFMT_DSP_A:
119 break;
120 default:
121 return -EINVAL;
122 }
123
124 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
125 case SND_SOC_DAIFMT_IB_IF:
126 break;
127 default:
128 return -EINVAL;
129 }
130
131 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
132 /* ALCLK,ABCLK are both output, AD1836 can only be master */
133 case SND_SOC_DAIFMT_CBM_CFM:
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return 0;
140}
141
142static int ad1836_hw_params(struct snd_pcm_substream *substream,
143 struct snd_pcm_hw_params *params,
144 struct snd_soc_dai *dai)
145{
146 int word_len = 0;
147
148 struct snd_soc_pcm_runtime *rtd = substream->private_data;
149 struct snd_soc_device *socdev = rtd->socdev;
150 struct snd_soc_codec *codec = socdev->card->codec;
151
152 /* bit size */
153 switch (params_format(params)) {
154 case SNDRV_PCM_FORMAT_S16_LE:
155 word_len = 3;
156 break;
157 case SNDRV_PCM_FORMAT_S20_3LE:
158 word_len = 1;
159 break;
160 case SNDRV_PCM_FORMAT_S24_LE:
161 case SNDRV_PCM_FORMAT_S32_LE:
162 word_len = 0;
163 break;
164 }
165
166 snd_soc_update_bits(codec, AD1836_DAC_CTRL1,
167 AD1836_DAC_WORD_LEN_MASK, word_len);
168
169 snd_soc_update_bits(codec, AD1836_ADC_CTRL2,
170 AD1836_ADC_WORD_LEN_MASK, word_len);
171
172 return 0;
173}
174
175
176/*
177 * interface to read/write ad1836 register
178 */
179#define AD1836_SPI_REG_SHFT 12
180#define AD1836_SPI_READ (1 << 11)
181#define AD1836_SPI_VAL_MSK 0x3FF
182
183/*
184 * write to the ad1836 register space
185 */
186
187static int ad1836_write_reg(struct snd_soc_codec *codec, unsigned int reg,
188 unsigned int value)
189{
190 u16 *reg_cache = codec->reg_cache;
191 int ret = 0;
192
193 if (value != reg_cache[reg]) {
194 unsigned short buf;
195 struct spi_transfer t = {
196 .tx_buf = &buf,
197 .len = 2,
198 };
199 struct spi_message m;
200
201 buf = (reg << AD1836_SPI_REG_SHFT) |
202 (value & AD1836_SPI_VAL_MSK);
203 spi_message_init(&m);
204 spi_message_add_tail(&t, &m);
205 ret = spi_sync(codec->control_data, &m);
206 if (ret == 0)
207 reg_cache[reg] = value;
208 }
209
210 return ret;
211}
212
213/*
214 * read from the ad1836 register space cache
215 */
216static unsigned int ad1836_read_reg_cache(struct snd_soc_codec *codec,
217 unsigned int reg)
218{
219 u16 *reg_cache = codec->reg_cache;
220
221 if (reg >= codec->reg_cache_size)
222 return -EINVAL;
223
224 return reg_cache[reg];
225}
226
227static int __devinit ad1836_spi_probe(struct spi_device *spi)
228{
229 struct snd_soc_codec *codec;
230 struct ad1836_priv *ad1836;
231
232 ad1836 = kzalloc(sizeof(struct ad1836_priv), GFP_KERNEL);
233 if (ad1836 == NULL)
234 return -ENOMEM;
235
236 codec = &ad1836->codec;
237 codec->control_data = spi;
238 codec->dev = &spi->dev;
239
240 dev_set_drvdata(&spi->dev, ad1836);
241
242 return ad1836_register(ad1836);
243}
244
245static int __devexit ad1836_spi_remove(struct spi_device *spi)
246{
247 struct ad1836_priv *ad1836 = dev_get_drvdata(&spi->dev);
248
249 ad1836_unregister(ad1836);
250 return 0;
251}
252
253static struct spi_driver ad1836_spi_driver = {
254 .driver = {
255 .name = "ad1836-spi",
256 .bus = &spi_bus_type,
257 .owner = THIS_MODULE,
258 },
259 .probe = ad1836_spi_probe,
260 .remove = __devexit_p(ad1836_spi_remove),
261};
262
263static struct snd_soc_dai_ops ad1836_dai_ops = {
264 .hw_params = ad1836_hw_params,
265 .set_fmt = ad1836_set_dai_fmt,
266};
267
268/* codec DAI instance */
269struct snd_soc_dai ad1836_dai = {
270 .name = "AD1836",
271 .playback = {
272 .stream_name = "Playback",
273 .channels_min = 2,
274 .channels_max = 6,
275 .rates = SNDRV_PCM_RATE_48000,
276 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
277 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
278 },
279 .capture = {
280 .stream_name = "Capture",
281 .channels_min = 2,
282 .channels_max = 4,
283 .rates = SNDRV_PCM_RATE_48000,
284 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
285 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
286 },
287 .ops = &ad1836_dai_ops,
288};
289EXPORT_SYMBOL_GPL(ad1836_dai);
290
291static int ad1836_register(struct ad1836_priv *ad1836)
292{
293 int ret;
294 struct snd_soc_codec *codec = &ad1836->codec;
295
296 if (ad1836_codec) {
297 dev_err(codec->dev, "Another ad1836 is registered\n");
298 return -EINVAL;
299 }
300
301 mutex_init(&codec->mutex);
302 INIT_LIST_HEAD(&codec->dapm_widgets);
303 INIT_LIST_HEAD(&codec->dapm_paths);
304 codec->private_data = ad1836;
305 codec->reg_cache = ad1836->reg_cache;
306 codec->reg_cache_size = AD1836_NUM_REGS;
307 codec->name = "AD1836";
308 codec->owner = THIS_MODULE;
309 codec->dai = &ad1836_dai;
310 codec->num_dai = 1;
311 codec->write = ad1836_write_reg;
312 codec->read = ad1836_read_reg_cache;
313 INIT_LIST_HEAD(&codec->dapm_widgets);
314 INIT_LIST_HEAD(&codec->dapm_paths);
315
316 ad1836_dai.dev = codec->dev;
317 ad1836_codec = codec;
318
319 /* default setting for ad1836 */
320 /* de-emphasis: 48kHz, power-on dac */
321 codec->write(codec, AD1836_DAC_CTRL1, 0x300);
322 /* unmute dac channels */
323 codec->write(codec, AD1836_DAC_CTRL2, 0x0);
324 /* high-pass filter enable, power-on adc */
325 codec->write(codec, AD1836_ADC_CTRL1, 0x100);
326 /* unmute adc channles, adc aux mode */
327 codec->write(codec, AD1836_ADC_CTRL2, 0x180);
328 /* left/right diff:PGA/MUX */
329 codec->write(codec, AD1836_ADC_CTRL3, 0x3A);
330 /* volume */
331 codec->write(codec, AD1836_DAC_L1_VOL, 0x3FF);
332 codec->write(codec, AD1836_DAC_R1_VOL, 0x3FF);
333 codec->write(codec, AD1836_DAC_L2_VOL, 0x3FF);
334 codec->write(codec, AD1836_DAC_R2_VOL, 0x3FF);
335 codec->write(codec, AD1836_DAC_L3_VOL, 0x3FF);
336 codec->write(codec, AD1836_DAC_R3_VOL, 0x3FF);
337
338 ret = snd_soc_register_codec(codec);
339 if (ret != 0) {
340 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
341 kfree(ad1836);
342 return ret;
343 }
344
345 ret = snd_soc_register_dai(&ad1836_dai);
346 if (ret != 0) {
347 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
348 snd_soc_unregister_codec(codec);
349 kfree(ad1836);
350 return ret;
351 }
352
353 return 0;
354}
355
356static void ad1836_unregister(struct ad1836_priv *ad1836)
357{
358 snd_soc_unregister_dai(&ad1836_dai);
359 snd_soc_unregister_codec(&ad1836->codec);
360 kfree(ad1836);
361 ad1836_codec = NULL;
362}
363
364static int ad1836_probe(struct platform_device *pdev)
365{
366 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
367 struct snd_soc_codec *codec;
368 int ret = 0;
369
370 if (ad1836_codec == NULL) {
371 dev_err(&pdev->dev, "Codec device not registered\n");
372 return -ENODEV;
373 }
374
375 socdev->card->codec = ad1836_codec;
376 codec = ad1836_codec;
377
378 /* register pcms */
379 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
380 if (ret < 0) {
381 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
382 goto pcm_err;
383 }
384
385 snd_soc_add_controls(codec, ad1836_snd_controls,
386 ARRAY_SIZE(ad1836_snd_controls));
387 snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets,
388 ARRAY_SIZE(ad1836_dapm_widgets));
389 snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
390 snd_soc_dapm_new_widgets(codec);
391
392 ret = snd_soc_init_card(socdev);
393 if (ret < 0) {
394 dev_err(codec->dev, "failed to register card: %d\n", ret);
395 goto card_err;
396 }
397
398 return ret;
399
400card_err:
401 snd_soc_free_pcms(socdev);
402 snd_soc_dapm_free(socdev);
403pcm_err:
404 return ret;
405}
406
407/* power down chip */
408static int ad1836_remove(struct platform_device *pdev)
409{
410 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
411
412 snd_soc_free_pcms(socdev);
413 snd_soc_dapm_free(socdev);
414
415 return 0;
416}
417
418struct snd_soc_codec_device soc_codec_dev_ad1836 = {
419 .probe = ad1836_probe,
420 .remove = ad1836_remove,
421};
422EXPORT_SYMBOL_GPL(soc_codec_dev_ad1836);
423
424static int __init ad1836_init(void)
425{
426 int ret;
427
428 ret = spi_register_driver(&ad1836_spi_driver);
429 if (ret != 0) {
430 printk(KERN_ERR "Failed to register ad1836 SPI driver: %d\n",
431 ret);
432 }
433
434 return ret;
435}
436module_init(ad1836_init);
437
438static void __exit ad1836_exit(void)
439{
440 spi_unregister_driver(&ad1836_spi_driver);
441}
442module_exit(ad1836_exit);
443
444MODULE_DESCRIPTION("ASoC ad1836 driver");
445MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
446MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
new file mode 100644
index 000000000000..7660ee6973c0
--- /dev/null
+++ b/sound/soc/codecs/ad1836.h
@@ -0,0 +1,64 @@
1/*
2 * File: sound/soc/codecs/ad1836.h
3 * Based on:
4 * Author: Barry Song <Barry.Song@analog.com>
5 *
6 * Created: Aug 04, 2009
7 * Description: definitions for AD1836 registers
8 *
9 * Modified:
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 */
18
19#ifndef __AD1836_H__
20#define __AD1836_H__
21
22#define AD1836_DAC_CTRL1 0
23#define AD1836_DAC_POWERDOWN 2
24#define AD1836_DAC_SERFMT_MASK 0xE0
25#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5)
26#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5)
27#define AD1836_DAC_WORD_LEN_MASK 0x18
28
29#define AD1836_DAC_CTRL2 1
30#define AD1836_DACL1_MUTE 0
31#define AD1836_DACR1_MUTE 1
32#define AD1836_DACL2_MUTE 2
33#define AD1836_DACR2_MUTE 3
34#define AD1836_DACL3_MUTE 4
35#define AD1836_DACR3_MUTE 5
36
37#define AD1836_DAC_L1_VOL 2
38#define AD1836_DAC_R1_VOL 3
39#define AD1836_DAC_L2_VOL 4
40#define AD1836_DAC_R2_VOL 5
41#define AD1836_DAC_L3_VOL 6
42#define AD1836_DAC_R3_VOL 7
43
44#define AD1836_ADC_CTRL1 12
45#define AD1836_ADC_POWERDOWN 7
46#define AD1836_ADC_HIGHPASS_FILTER 8
47
48#define AD1836_ADC_CTRL2 13
49#define AD1836_ADCL1_MUTE 0
50#define AD1836_ADCR1_MUTE 1
51#define AD1836_ADCL2_MUTE 2
52#define AD1836_ADCR2_MUTE 3
53#define AD1836_ADC_WORD_LEN_MASK 0x30
54#define AD1836_ADC_SERFMT_MASK (7 << 6)
55#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
56#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
57
58#define AD1836_ADC_CTRL3 14
59
60#define AD1836_NUM_REGS 16
61
62extern struct snd_soc_dai ad1836_dai;
63extern struct snd_soc_codec_device soc_codec_dev_ad1836;
64#endif
diff --git a/sound/soc/codecs/ad1938.c b/sound/soc/codecs/ad1938.c
new file mode 100644
index 000000000000..e62b27701a49
--- /dev/null
+++ b/sound/soc/codecs/ad1938.c
@@ -0,0 +1,682 @@
1/*
2 * File: sound/soc/codecs/ad1938.c
3 * Author: Barry Song <Barry.Song@analog.com>
4 *
5 * Created: June 04 2009
6 * Description: Driver for AD1938 sound chip
7 *
8 * Modified:
9 * Copyright 2009 Analog Devices Inc.
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/version.h>
32#include <linux/kernel.h>
33#include <linux/device.h>
34#include <sound/core.h>
35#include <sound/pcm.h>
36#include <sound/pcm_params.h>
37#include <sound/initval.h>
38#include <sound/soc.h>
39#include <sound/tlv.h>
40#include <sound/soc-dapm.h>
41#include <linux/spi/spi.h>
42#include "ad1938.h"
43
44/* codec private data */
45struct ad1938_priv {
46 struct snd_soc_codec codec;
47 u8 reg_cache[AD1938_NUM_REGS];
48};
49
50static struct snd_soc_codec *ad1938_codec;
51struct snd_soc_codec_device soc_codec_dev_ad1938;
52static int ad1938_register(struct ad1938_priv *ad1938);
53static void ad1938_unregister(struct ad1938_priv *ad1938);
54
55/*
56 * AD1938 volume/mute/de-emphasis etc. controls
57 */
58static const char *ad1938_deemp[] = {"None", "48kHz", "44.1kHz", "32kHz"};
59
60static const struct soc_enum ad1938_deemp_enum =
61 SOC_ENUM_SINGLE(AD1938_DAC_CTRL2, 1, 4, ad1938_deemp);
62
63static const struct snd_kcontrol_new ad1938_snd_controls[] = {
64 /* DAC volume control */
65 SOC_DOUBLE_R("DAC1 Volume", AD1938_DAC_L1_VOL,
66 AD1938_DAC_R1_VOL, 0, 0xFF, 1),
67 SOC_DOUBLE_R("DAC2 Volume", AD1938_DAC_L2_VOL,
68 AD1938_DAC_R2_VOL, 0, 0xFF, 1),
69 SOC_DOUBLE_R("DAC3 Volume", AD1938_DAC_L3_VOL,
70 AD1938_DAC_R3_VOL, 0, 0xFF, 1),
71 SOC_DOUBLE_R("DAC4 Volume", AD1938_DAC_L4_VOL,
72 AD1938_DAC_R4_VOL, 0, 0xFF, 1),
73
74 /* ADC switch control */
75 SOC_DOUBLE("ADC1 Switch", AD1938_ADC_CTRL0, AD1938_ADCL1_MUTE,
76 AD1938_ADCR1_MUTE, 1, 1),
77 SOC_DOUBLE("ADC2 Switch", AD1938_ADC_CTRL0, AD1938_ADCL2_MUTE,
78 AD1938_ADCR2_MUTE, 1, 1),
79
80 /* DAC switch control */
81 SOC_DOUBLE("DAC1 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL1_MUTE,
82 AD1938_DACR1_MUTE, 1, 1),
83 SOC_DOUBLE("DAC2 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL2_MUTE,
84 AD1938_DACR2_MUTE, 1, 1),
85 SOC_DOUBLE("DAC3 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL3_MUTE,
86 AD1938_DACR3_MUTE, 1, 1),
87 SOC_DOUBLE("DAC4 Switch", AD1938_DAC_CHNL_MUTE, AD1938_DACL4_MUTE,
88 AD1938_DACR4_MUTE, 1, 1),
89
90 /* ADC high-pass filter */
91 SOC_SINGLE("ADC High Pass Filter Switch", AD1938_ADC_CTRL0,
92 AD1938_ADC_HIGHPASS_FILTER, 1, 0),
93
94 /* DAC de-emphasis */
95 SOC_ENUM("Playback Deemphasis", ad1938_deemp_enum),
96};
97
98static const struct snd_soc_dapm_widget ad1938_dapm_widgets[] = {
99 SND_SOC_DAPM_DAC("DAC", "Playback", AD1938_DAC_CTRL0, 0, 1),
100 SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
101 SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1938_ADC_CTRL0, 0, 1, NULL, 0),
102 SND_SOC_DAPM_OUTPUT("DAC1OUT"),
103 SND_SOC_DAPM_OUTPUT("DAC2OUT"),
104 SND_SOC_DAPM_OUTPUT("DAC3OUT"),
105 SND_SOC_DAPM_OUTPUT("DAC4OUT"),
106 SND_SOC_DAPM_INPUT("ADC1IN"),
107 SND_SOC_DAPM_INPUT("ADC2IN"),
108};
109
110static const struct snd_soc_dapm_route audio_paths[] = {
111 { "DAC", NULL, "ADC_PWR" },
112 { "ADC", NULL, "ADC_PWR" },
113 { "DAC1OUT", "DAC1 Switch", "DAC" },
114 { "DAC2OUT", "DAC2 Switch", "DAC" },
115 { "DAC3OUT", "DAC3 Switch", "DAC" },
116 { "DAC4OUT", "DAC4 Switch", "DAC" },
117 { "ADC", "ADC1 Switch", "ADC1IN" },
118 { "ADC", "ADC2 Switch", "ADC2IN" },
119};
120
121/*
122 * DAI ops entries
123 */
124
125static int ad1938_mute(struct snd_soc_dai *dai, int mute)
126{
127 struct snd_soc_codec *codec = dai->codec;
128 int reg;
129
130 reg = codec->read(codec, AD1938_DAC_CTRL2);
131 reg = (mute > 0) ? reg | AD1938_DAC_MASTER_MUTE : reg &
132 (~AD1938_DAC_MASTER_MUTE);
133 codec->write(codec, AD1938_DAC_CTRL2, reg);
134
135 return 0;
136}
137
138static inline int ad1938_pll_powerctrl(struct snd_soc_codec *codec, int cmd)
139{
140 int reg = codec->read(codec, AD1938_PLL_CLK_CTRL0);
141 reg = (cmd > 0) ? reg & (~AD1938_PLL_POWERDOWN) : reg |
142 AD1938_PLL_POWERDOWN;
143 codec->write(codec, AD1938_PLL_CLK_CTRL0, reg);
144
145 return 0;
146}
147
148static int ad1938_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
149 unsigned int mask, int slots, int width)
150{
151 struct snd_soc_codec *codec = dai->codec;
152 int dac_reg = codec->read(codec, AD1938_DAC_CTRL1);
153 int adc_reg = codec->read(codec, AD1938_ADC_CTRL2);
154
155 dac_reg &= ~AD1938_DAC_CHAN_MASK;
156 adc_reg &= ~AD1938_ADC_CHAN_MASK;
157
158 switch (slots) {
159 case 2:
160 dac_reg |= AD1938_DAC_2_CHANNELS << AD1938_DAC_CHAN_SHFT;
161 adc_reg |= AD1938_ADC_2_CHANNELS << AD1938_ADC_CHAN_SHFT;
162 break;
163 case 4:
164 dac_reg |= AD1938_DAC_4_CHANNELS << AD1938_DAC_CHAN_SHFT;
165 adc_reg |= AD1938_ADC_4_CHANNELS << AD1938_ADC_CHAN_SHFT;
166 break;
167 case 8:
168 dac_reg |= AD1938_DAC_8_CHANNELS << AD1938_DAC_CHAN_SHFT;
169 adc_reg |= AD1938_ADC_8_CHANNELS << AD1938_ADC_CHAN_SHFT;
170 break;
171 case 16:
172 dac_reg |= AD1938_DAC_16_CHANNELS << AD1938_DAC_CHAN_SHFT;
173 adc_reg |= AD1938_ADC_16_CHANNELS << AD1938_ADC_CHAN_SHFT;
174 break;
175 default:
176 return -EINVAL;
177 }
178
179 codec->write(codec, AD1938_DAC_CTRL1, dac_reg);
180 codec->write(codec, AD1938_ADC_CTRL2, adc_reg);
181
182 return 0;
183}
184
185static int ad1938_set_dai_fmt(struct snd_soc_dai *codec_dai,
186 unsigned int fmt)
187{
188 struct snd_soc_codec *codec = codec_dai->codec;
189 int adc_reg, dac_reg;
190
191 adc_reg = codec->read(codec, AD1938_ADC_CTRL2);
192 dac_reg = codec->read(codec, AD1938_DAC_CTRL1);
193
194 /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S
195 * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A)
196 */
197 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
198 case SND_SOC_DAIFMT_I2S:
199 adc_reg &= ~AD1938_ADC_SERFMT_MASK;
200 adc_reg |= AD1938_ADC_SERFMT_TDM;
201 break;
202 case SND_SOC_DAIFMT_DSP_A:
203 adc_reg &= ~AD1938_ADC_SERFMT_MASK;
204 adc_reg |= AD1938_ADC_SERFMT_AUX;
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
211 case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
212 adc_reg &= ~AD1938_ADC_LEFT_HIGH;
213 adc_reg &= ~AD1938_ADC_BCLK_INV;
214 dac_reg &= ~AD1938_DAC_LEFT_HIGH;
215 dac_reg &= ~AD1938_DAC_BCLK_INV;
216 break;
217 case SND_SOC_DAIFMT_NB_IF: /* normal bclk + invert frm */
218 adc_reg |= AD1938_ADC_LEFT_HIGH;
219 adc_reg &= ~AD1938_ADC_BCLK_INV;
220 dac_reg |= AD1938_DAC_LEFT_HIGH;
221 dac_reg &= ~AD1938_DAC_BCLK_INV;
222 break;
223 case SND_SOC_DAIFMT_IB_NF: /* invert bclk + normal frm */
224 adc_reg &= ~AD1938_ADC_LEFT_HIGH;
225 adc_reg |= AD1938_ADC_BCLK_INV;
226 dac_reg &= ~AD1938_DAC_LEFT_HIGH;
227 dac_reg |= AD1938_DAC_BCLK_INV;
228 break;
229
230 case SND_SOC_DAIFMT_IB_IF: /* invert bclk + frm */
231 adc_reg |= AD1938_ADC_LEFT_HIGH;
232 adc_reg |= AD1938_ADC_BCLK_INV;
233 dac_reg |= AD1938_DAC_LEFT_HIGH;
234 dac_reg |= AD1938_DAC_BCLK_INV;
235 break;
236 default:
237 return -EINVAL;
238 }
239
240 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
241 case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */
242 adc_reg |= AD1938_ADC_LCR_MASTER;
243 adc_reg |= AD1938_ADC_BCLK_MASTER;
244 dac_reg |= AD1938_DAC_LCR_MASTER;
245 dac_reg |= AD1938_DAC_BCLK_MASTER;
246 break;
247 case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & frm master */
248 adc_reg |= AD1938_ADC_LCR_MASTER;
249 adc_reg &= ~AD1938_ADC_BCLK_MASTER;
250 dac_reg |= AD1938_DAC_LCR_MASTER;
251 dac_reg &= ~AD1938_DAC_BCLK_MASTER;
252 break;
253 case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
254 adc_reg &= ~AD1938_ADC_LCR_MASTER;
255 adc_reg |= AD1938_ADC_BCLK_MASTER;
256 dac_reg &= ~AD1938_DAC_LCR_MASTER;
257 dac_reg |= AD1938_DAC_BCLK_MASTER;
258 break;
259 case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & frm slave */
260 adc_reg &= ~AD1938_ADC_LCR_MASTER;
261 adc_reg &= ~AD1938_ADC_BCLK_MASTER;
262 dac_reg &= ~AD1938_DAC_LCR_MASTER;
263 dac_reg &= ~AD1938_DAC_BCLK_MASTER;
264 break;
265 default:
266 return -EINVAL;
267 }
268
269 codec->write(codec, AD1938_ADC_CTRL2, adc_reg);
270 codec->write(codec, AD1938_DAC_CTRL1, dac_reg);
271
272 return 0;
273}
274
275static int ad1938_hw_params(struct snd_pcm_substream *substream,
276 struct snd_pcm_hw_params *params,
277 struct snd_soc_dai *dai)
278{
279 int word_len = 0, reg = 0;
280
281 struct snd_soc_pcm_runtime *rtd = substream->private_data;
282 struct snd_soc_device *socdev = rtd->socdev;
283 struct snd_soc_codec *codec = socdev->card->codec;
284
285 /* bit size */
286 switch (params_format(params)) {
287 case SNDRV_PCM_FORMAT_S16_LE:
288 word_len = 3;
289 break;
290 case SNDRV_PCM_FORMAT_S20_3LE:
291 word_len = 1;
292 break;
293 case SNDRV_PCM_FORMAT_S24_LE:
294 case SNDRV_PCM_FORMAT_S32_LE:
295 word_len = 0;
296 break;
297 }
298
299 reg = codec->read(codec, AD1938_DAC_CTRL2);
300 reg = (reg & (~AD1938_DAC_WORD_LEN_MASK)) | word_len;
301 codec->write(codec, AD1938_DAC_CTRL2, reg);
302
303 reg = codec->read(codec, AD1938_ADC_CTRL1);
304 reg = (reg & (~AD1938_ADC_WORD_LEN_MASK)) | word_len;
305 codec->write(codec, AD1938_ADC_CTRL1, reg);
306
307 return 0;
308}
309
310static int ad1938_set_bias_level(struct snd_soc_codec *codec,
311 enum snd_soc_bias_level level)
312{
313 switch (level) {
314 case SND_SOC_BIAS_ON:
315 ad1938_pll_powerctrl(codec, 1);
316 break;
317 case SND_SOC_BIAS_PREPARE:
318 break;
319 case SND_SOC_BIAS_STANDBY:
320 case SND_SOC_BIAS_OFF:
321 ad1938_pll_powerctrl(codec, 0);
322 break;
323 }
324 codec->bias_level = level;
325 return 0;
326}
327
328/*
329 * interface to read/write ad1938 register
330 */
331
332#define AD1938_SPI_ADDR 0x4
333#define AD1938_SPI_READ 0x1
334#define AD1938_SPI_BUFLEN 3
335
336/*
337 * write to the ad1938 register space
338 */
339
340static int ad1938_write_reg(struct snd_soc_codec *codec, unsigned int reg,
341 unsigned int value)
342{
343 u8 *reg_cache = codec->reg_cache;
344 int ret = 0;
345
346 if (value != reg_cache[reg]) {
347 uint8_t buf[AD1938_SPI_BUFLEN];
348 struct spi_transfer t = {
349 .tx_buf = buf,
350 .len = AD1938_SPI_BUFLEN,
351 };
352 struct spi_message m;
353
354 buf[0] = AD1938_SPI_ADDR << 1;
355 buf[1] = reg;
356 buf[2] = value;
357 spi_message_init(&m);
358 spi_message_add_tail(&t, &m);
359 ret = spi_sync(codec->control_data, &m);
360 if (ret == 0)
361 reg_cache[reg] = value;
362 }
363
364 return ret;
365}
366
367/*
368 * read from the ad1938 register space cache
369 */
370
371static unsigned int ad1938_read_reg_cache(struct snd_soc_codec *codec,
372 unsigned int reg)
373{
374 u8 *reg_cache = codec->reg_cache;
375
376 if (reg >= codec->reg_cache_size)
377 return -EINVAL;
378
379 return reg_cache[reg];
380}
381
382/*
383 * read from the ad1938 register space
384 */
385
386static unsigned int ad1938_read_reg(struct snd_soc_codec *codec,
387 unsigned int reg)
388{
389 char w_buf[AD1938_SPI_BUFLEN];
390 char r_buf[AD1938_SPI_BUFLEN];
391 int ret;
392
393 struct spi_transfer t = {
394 .tx_buf = w_buf,
395 .rx_buf = r_buf,
396 .len = AD1938_SPI_BUFLEN,
397 };
398 struct spi_message m;
399
400 w_buf[0] = (AD1938_SPI_ADDR << 1) | AD1938_SPI_READ;
401 w_buf[1] = reg;
402 w_buf[2] = 0;
403
404 spi_message_init(&m);
405 spi_message_add_tail(&t, &m);
406 ret = spi_sync(codec->control_data, &m);
407 if (ret == 0)
408 return r_buf[2];
409 else
410 return -EIO;
411}
412
413static int ad1938_fill_cache(struct snd_soc_codec *codec)
414{
415 int i;
416 u8 *reg_cache = codec->reg_cache;
417 struct spi_device *spi = codec->control_data;
418
419 for (i = 0; i < codec->reg_cache_size; i++) {
420 int ret = ad1938_read_reg(codec, i);
421 if (ret == -EIO) {
422 dev_err(&spi->dev, "AD1938 SPI read failure\n");
423 return ret;
424 }
425 reg_cache[i] = ret;
426 }
427
428 return 0;
429}
430
431static int __devinit ad1938_spi_probe(struct spi_device *spi)
432{
433 struct snd_soc_codec *codec;
434 struct ad1938_priv *ad1938;
435
436 ad1938 = kzalloc(sizeof(struct ad1938_priv), GFP_KERNEL);
437 if (ad1938 == NULL)
438 return -ENOMEM;
439
440 codec = &ad1938->codec;
441 codec->control_data = spi;
442 codec->dev = &spi->dev;
443
444 dev_set_drvdata(&spi->dev, ad1938);
445
446 return ad1938_register(ad1938);
447}
448
449static int __devexit ad1938_spi_remove(struct spi_device *spi)
450{
451 struct ad1938_priv *ad1938 = dev_get_drvdata(&spi->dev);
452
453 ad1938_unregister(ad1938);
454 return 0;
455}
456
457static struct spi_driver ad1938_spi_driver = {
458 .driver = {
459 .name = "ad1938",
460 .bus = &spi_bus_type,
461 .owner = THIS_MODULE,
462 },
463 .probe = ad1938_spi_probe,
464 .remove = __devexit_p(ad1938_spi_remove),
465};
466
467static struct snd_soc_dai_ops ad1938_dai_ops = {
468 .hw_params = ad1938_hw_params,
469 .digital_mute = ad1938_mute,
470 .set_tdm_slot = ad1938_set_tdm_slot,
471 .set_fmt = ad1938_set_dai_fmt,
472};
473
474/* codec DAI instance */
475struct snd_soc_dai ad1938_dai = {
476 .name = "AD1938",
477 .playback = {
478 .stream_name = "Playback",
479 .channels_min = 2,
480 .channels_max = 8,
481 .rates = SNDRV_PCM_RATE_48000,
482 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
483 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
484 },
485 .capture = {
486 .stream_name = "Capture",
487 .channels_min = 2,
488 .channels_max = 4,
489 .rates = SNDRV_PCM_RATE_48000,
490 .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE |
491 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE,
492 },
493 .ops = &ad1938_dai_ops,
494};
495EXPORT_SYMBOL_GPL(ad1938_dai);
496
497static int ad1938_register(struct ad1938_priv *ad1938)
498{
499 int ret;
500 struct snd_soc_codec *codec = &ad1938->codec;
501
502 if (ad1938_codec) {
503 dev_err(codec->dev, "Another ad1938 is registered\n");
504 return -EINVAL;
505 }
506
507 mutex_init(&codec->mutex);
508 INIT_LIST_HEAD(&codec->dapm_widgets);
509 INIT_LIST_HEAD(&codec->dapm_paths);
510 codec->private_data = ad1938;
511 codec->reg_cache = ad1938->reg_cache;
512 codec->reg_cache_size = AD1938_NUM_REGS;
513 codec->name = "AD1938";
514 codec->owner = THIS_MODULE;
515 codec->dai = &ad1938_dai;
516 codec->num_dai = 1;
517 codec->write = ad1938_write_reg;
518 codec->read = ad1938_read_reg_cache;
519 INIT_LIST_HEAD(&codec->dapm_widgets);
520 INIT_LIST_HEAD(&codec->dapm_paths);
521
522 ad1938_dai.dev = codec->dev;
523 ad1938_codec = codec;
524
525 /* default setting for ad1938 */
526
527 /* unmute dac channels */
528 codec->write(codec, AD1938_DAC_CHNL_MUTE, 0x0);
529 /* de-emphasis: 48kHz, powedown dac */
530 codec->write(codec, AD1938_DAC_CTRL2, 0x1A);
531 /* powerdown dac, dac in tdm mode */
532 codec->write(codec, AD1938_DAC_CTRL0, 0x41);
533 /* high-pass filter enable */
534 codec->write(codec, AD1938_ADC_CTRL0, 0x3);
535 /* sata delay=1, adc aux mode */
536 codec->write(codec, AD1938_ADC_CTRL1, 0x43);
537 /* pll input: mclki/xi */
538 codec->write(codec, AD1938_PLL_CLK_CTRL0, 0x9D);
539 codec->write(codec, AD1938_PLL_CLK_CTRL1, 0x04);
540
541 ad1938_fill_cache(codec);
542
543 ret = snd_soc_register_codec(codec);
544 if (ret != 0) {
545 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
546 kfree(ad1938);
547 return ret;
548 }
549
550 ret = snd_soc_register_dai(&ad1938_dai);
551 if (ret != 0) {
552 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
553 snd_soc_unregister_codec(codec);
554 kfree(ad1938);
555 return ret;
556 }
557
558 return 0;
559}
560
561static void ad1938_unregister(struct ad1938_priv *ad1938)
562{
563 ad1938_set_bias_level(&ad1938->codec, SND_SOC_BIAS_OFF);
564 snd_soc_unregister_dai(&ad1938_dai);
565 snd_soc_unregister_codec(&ad1938->codec);
566 kfree(ad1938);
567 ad1938_codec = NULL;
568}
569
570static int ad1938_probe(struct platform_device *pdev)
571{
572 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
573 struct snd_soc_codec *codec;
574 int ret = 0;
575
576 if (ad1938_codec == NULL) {
577 dev_err(&pdev->dev, "Codec device not registered\n");
578 return -ENODEV;
579 }
580
581 socdev->card->codec = ad1938_codec;
582 codec = ad1938_codec;
583
584 /* register pcms */
585 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
586 if (ret < 0) {
587 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
588 goto pcm_err;
589 }
590
591 snd_soc_add_controls(codec, ad1938_snd_controls,
592 ARRAY_SIZE(ad1938_snd_controls));
593 snd_soc_dapm_new_controls(codec, ad1938_dapm_widgets,
594 ARRAY_SIZE(ad1938_dapm_widgets));
595 snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
596 snd_soc_dapm_new_widgets(codec);
597
598 ad1938_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
599
600 ret = snd_soc_init_card(socdev);
601 if (ret < 0) {
602 dev_err(codec->dev, "failed to register card: %d\n", ret);
603 goto card_err;
604 }
605
606 return ret;
607
608card_err:
609 snd_soc_free_pcms(socdev);
610 snd_soc_dapm_free(socdev);
611pcm_err:
612 return ret;
613}
614
615/* power down chip */
616static int ad1938_remove(struct platform_device *pdev)
617{
618 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
619
620 snd_soc_free_pcms(socdev);
621 snd_soc_dapm_free(socdev);
622
623 return 0;
624}
625
626#ifdef CONFIG_PM
627static int ad1938_suspend(struct platform_device *pdev,
628 pm_message_t state)
629{
630 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
631 struct snd_soc_codec *codec = socdev->card->codec;
632
633 ad1938_set_bias_level(codec, SND_SOC_BIAS_OFF);
634 return 0;
635}
636
637static int ad1938_resume(struct platform_device *pdev)
638{
639 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
640 struct snd_soc_codec *codec = socdev->card->codec;
641
642 if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
643 ad1938_set_bias_level(codec, SND_SOC_BIAS_ON);
644
645 return 0;
646}
647#else
648#define ad1938_suspend NULL
649#define ad1938_resume NULL
650#endif
651
652struct snd_soc_codec_device soc_codec_dev_ad1938 = {
653 .probe = ad1938_probe,
654 .remove = ad1938_remove,
655 .suspend = ad1938_suspend,
656 .resume = ad1938_resume,
657};
658EXPORT_SYMBOL_GPL(soc_codec_dev_ad1938);
659
660static int __init ad1938_init(void)
661{
662 int ret;
663
664 ret = spi_register_driver(&ad1938_spi_driver);
665 if (ret != 0) {
666 printk(KERN_ERR "Failed to register ad1938 SPI driver: %d\n",
667 ret);
668 }
669
670 return ret;
671}
672module_init(ad1938_init);
673
674static void __exit ad1938_exit(void)
675{
676 spi_unregister_driver(&ad1938_spi_driver);
677}
678module_exit(ad1938_exit);
679
680MODULE_DESCRIPTION("ASoC ad1938 driver");
681MODULE_AUTHOR("Barry Song ");
682MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ad1938.h b/sound/soc/codecs/ad1938.h
new file mode 100644
index 000000000000..fe3c48cd2d5b
--- /dev/null
+++ b/sound/soc/codecs/ad1938.h
@@ -0,0 +1,100 @@
1/*
2 * File: sound/soc/codecs/ad1836.h
3 * Based on:
4 * Author: Barry Song <Barry.Song@analog.com>
5 *
6 * Created: May 25, 2009
7 * Description: definitions for AD1938 registers
8 *
9 * Modified:
10 *
11 * Bugs: Enter bugs at http://blackfin.uclinux.org/
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see the file COPYING, or write
25 * to the Free Software Foundation, Inc.,
26 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 */
28
29#ifndef __AD1938_H__
30#define __AD1938_H__
31
32#define AD1938_PLL_CLK_CTRL0 0
33#define AD1938_PLL_POWERDOWN 0x01
34#define AD1938_PLL_CLK_CTRL1 1
35#define AD1938_DAC_CTRL0 2
36#define AD1938_DAC_POWERDOWN 0x01
37#define AD1938_DAC_SERFMT_MASK 0xC0
38#define AD1938_DAC_SERFMT_STEREO (0 << 6)
39#define AD1938_DAC_SERFMT_TDM (1 << 6)
40#define AD1938_DAC_CTRL1 3
41#define AD1938_DAC_2_CHANNELS 0
42#define AD1938_DAC_4_CHANNELS 1
43#define AD1938_DAC_8_CHANNELS 2
44#define AD1938_DAC_16_CHANNELS 3
45#define AD1938_DAC_CHAN_SHFT 1
46#define AD1938_DAC_CHAN_MASK (3 << AD1938_DAC_CHAN_SHFT)
47#define AD1938_DAC_LCR_MASTER (1 << 4)
48#define AD1938_DAC_BCLK_MASTER (1 << 5)
49#define AD1938_DAC_LEFT_HIGH (1 << 3)
50#define AD1938_DAC_BCLK_INV (1 << 7)
51#define AD1938_DAC_CTRL2 4
52#define AD1938_DAC_WORD_LEN_MASK 0xC
53#define AD1938_DAC_MASTER_MUTE 1
54#define AD1938_DAC_CHNL_MUTE 5
55#define AD1938_DACL1_MUTE 0
56#define AD1938_DACR1_MUTE 1
57#define AD1938_DACL2_MUTE 2
58#define AD1938_DACR2_MUTE 3
59#define AD1938_DACL3_MUTE 4
60#define AD1938_DACR3_MUTE 5
61#define AD1938_DACL4_MUTE 6
62#define AD1938_DACR4_MUTE 7
63#define AD1938_DAC_L1_VOL 6
64#define AD1938_DAC_R1_VOL 7
65#define AD1938_DAC_L2_VOL 8
66#define AD1938_DAC_R2_VOL 9
67#define AD1938_DAC_L3_VOL 10
68#define AD1938_DAC_R3_VOL 11
69#define AD1938_DAC_L4_VOL 12
70#define AD1938_DAC_R4_VOL 13
71#define AD1938_ADC_CTRL0 14
72#define AD1938_ADC_POWERDOWN 0x01
73#define AD1938_ADC_HIGHPASS_FILTER 1
74#define AD1938_ADCL1_MUTE 2
75#define AD1938_ADCR1_MUTE 3
76#define AD1938_ADCL2_MUTE 4
77#define AD1938_ADCR2_MUTE 5
78#define AD1938_ADC_CTRL1 15
79#define AD1938_ADC_SERFMT_MASK 0x60
80#define AD1938_ADC_SERFMT_STEREO (0 << 5)
81#define AD1938_ADC_SERFMT_TDM (1 << 2)
82#define AD1938_ADC_SERFMT_AUX (2 << 5)
83#define AD1938_ADC_WORD_LEN_MASK 0x3
84#define AD1938_ADC_CTRL2 16
85#define AD1938_ADC_2_CHANNELS 0
86#define AD1938_ADC_4_CHANNELS 1
87#define AD1938_ADC_8_CHANNELS 2
88#define AD1938_ADC_16_CHANNELS 3
89#define AD1938_ADC_CHAN_SHFT 4
90#define AD1938_ADC_CHAN_MASK (3 << AD1938_ADC_CHAN_SHFT)
91#define AD1938_ADC_LCR_MASTER (1 << 3)
92#define AD1938_ADC_BCLK_MASTER (1 << 6)
93#define AD1938_ADC_LEFT_HIGH (1 << 2)
94#define AD1938_ADC_BCLK_INV (1 << 1)
95
96#define AD1938_NUM_REGS 17
97
98extern struct snd_soc_dai ad1938_dai;
99extern struct snd_soc_codec_device soc_codec_dev_ad1938;
100#endif
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index dd3380202766..0abec0d29a96 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -59,21 +59,6 @@ static inline unsigned int ak4535_read_reg_cache(struct snd_soc_codec *codec,
59 return cache[reg]; 59 return cache[reg];
60} 60}
61 61
62static inline unsigned int ak4535_read(struct snd_soc_codec *codec,
63 unsigned int reg)
64{
65 u8 data;
66 data = reg;
67
68 if (codec->hw_write(codec->control_data, &data, 1) != 1)
69 return -EIO;
70
71 if (codec->hw_read(codec->control_data, &data, 1) != 1)
72 return -EIO;
73
74 return data;
75};
76
77/* 62/*
78 * write ak4535 register cache 63 * write ak4535 register cache
79 */ 64 */
@@ -635,7 +620,6 @@ static int ak4535_probe(struct platform_device *pdev)
635#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 620#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
636 if (setup->i2c_address) { 621 if (setup->i2c_address) {
637 codec->hw_write = (hw_write_t)i2c_master_send; 622 codec->hw_write = (hw_write_t)i2c_master_send;
638 codec->hw_read = (hw_read_t)i2c_master_recv;
639 ret = ak4535_add_i2c_device(pdev, setup); 623 ret = ak4535_add_i2c_device(pdev, setup);
640 } 624 }
641#endif 625#endif
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
new file mode 100644
index 000000000000..e057c7b578df
--- /dev/null
+++ b/sound/soc/codecs/ak4642.c
@@ -0,0 +1,502 @@
1/*
2 * ak4642.c -- AK4642/AK4643 ALSA Soc Audio driver
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on wm8731.c by Richard Purdie
8 * Based on ak4535.c by Richard Purdie
9 * Based on wm8753.c by Liam Girdwood
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/* ** CAUTION **
17 *
18 * This is very simple driver.
19 * It can use headphone output / stereo input only
20 *
21 * AK4642 is not tested.
22 * AK4643 is tested.
23 */
24
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/pm.h>
30#include <linux/i2c.h>
31#include <linux/platform_device.h>
32#include <sound/core.h>
33#include <sound/pcm.h>
34#include <sound/pcm_params.h>
35#include <sound/soc.h>
36#include <sound/soc-dapm.h>
37#include <sound/initval.h>
38
39#include "ak4642.h"
40
41#define AK4642_VERSION "0.0.1"
42
43#define PW_MGMT1 0x00
44#define PW_MGMT2 0x01
45#define SG_SL1 0x02
46#define SG_SL2 0x03
47#define MD_CTL1 0x04
48#define MD_CTL2 0x05
49#define TIMER 0x06
50#define ALC_CTL1 0x07
51#define ALC_CTL2 0x08
52#define L_IVC 0x09
53#define L_DVC 0x0a
54#define ALC_CTL3 0x0b
55#define R_IVC 0x0c
56#define R_DVC 0x0d
57#define MD_CTL3 0x0e
58#define MD_CTL4 0x0f
59#define PW_MGMT3 0x10
60#define DF_S 0x11
61#define FIL3_0 0x12
62#define FIL3_1 0x13
63#define FIL3_2 0x14
64#define FIL3_3 0x15
65#define EQ_0 0x16
66#define EQ_1 0x17
67#define EQ_2 0x18
68#define EQ_3 0x19
69#define EQ_4 0x1a
70#define EQ_5 0x1b
71#define FIL1_0 0x1c
72#define FIL1_1 0x1d
73#define FIL1_2 0x1e
74#define FIL1_3 0x1f
75#define PW_MGMT4 0x20
76#define MD_CTL5 0x21
77#define LO_MS 0x22
78#define HP_MS 0x23
79#define SPK_MS 0x24
80
81#define AK4642_CACHEREGNUM 0x25
82
83struct snd_soc_codec_device soc_codec_dev_ak4642;
84
85/* codec private data */
86struct ak4642_priv {
87 struct snd_soc_codec codec;
88 unsigned int sysclk;
89};
90
91static struct snd_soc_codec *ak4642_codec;
92
93/*
94 * ak4642 register cache
95 */
96static const u16 ak4642_reg[AK4642_CACHEREGNUM] = {
97 0x0000, 0x0000, 0x0001, 0x0000,
98 0x0002, 0x0000, 0x0000, 0x0000,
99 0x00e1, 0x00e1, 0x0018, 0x0000,
100 0x00e1, 0x0018, 0x0011, 0x0008,
101 0x0000, 0x0000, 0x0000, 0x0000,
102 0x0000, 0x0000, 0x0000, 0x0000,
103 0x0000, 0x0000, 0x0000, 0x0000,
104 0x0000, 0x0000, 0x0000, 0x0000,
105 0x0000, 0x0000, 0x0000, 0x0000,
106 0x0000,
107};
108
109/*
110 * read ak4642 register cache
111 */
112static inline unsigned int ak4642_read_reg_cache(struct snd_soc_codec *codec,
113 unsigned int reg)
114{
115 u16 *cache = codec->reg_cache;
116 if (reg >= AK4642_CACHEREGNUM)
117 return -1;
118 return cache[reg];
119}
120
121/*
122 * write ak4642 register cache
123 */
124static inline void ak4642_write_reg_cache(struct snd_soc_codec *codec,
125 u16 reg, unsigned int value)
126{
127 u16 *cache = codec->reg_cache;
128 if (reg >= AK4642_CACHEREGNUM)
129 return;
130
131 cache[reg] = value;
132}
133
134/*
135 * write to the AK4642 register space
136 */
137static int ak4642_write(struct snd_soc_codec *codec, unsigned int reg,
138 unsigned int value)
139{
140 u8 data[2];
141
142 /* data is
143 * D15..D8 AK4642 register offset
144 * D7...D0 register data
145 */
146 data[0] = reg & 0xff;
147 data[1] = value & 0xff;
148
149 if (codec->hw_write(codec->control_data, data, 2) == 2) {
150 ak4642_write_reg_cache(codec, reg, value);
151 return 0;
152 } else
153 return -EIO;
154}
155
156static int ak4642_sync(struct snd_soc_codec *codec)
157{
158 u16 *cache = codec->reg_cache;
159 int i, r = 0;
160
161 for (i = 0; i < AK4642_CACHEREGNUM; i++)
162 r |= ak4642_write(codec, i, cache[i]);
163
164 return r;
165};
166
167static int ak4642_dai_startup(struct snd_pcm_substream *substream,
168 struct snd_soc_dai *dai)
169{
170 int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
171 struct snd_soc_codec *codec = dai->codec;
172
173 if (is_play) {
174 /*
175 * start headphone output
176 *
177 * PLL, Master Mode
178 * Audio I/F Format :MSB justified (ADC & DAC)
179 * Sampling Frequency: 44.1kHz
180 * Digital Volume: −8dB
181 * Bass Boost Level : Middle
182 *
183 * This operation came from example code of
184 * "ASAHI KASEI AK4642" (japanese) manual p97.
185 *
186 * Example code use 0x39, 0x79 value for 0x01 address,
187 * But we need MCKO (0x02) bit now
188 */
189 ak4642_write(codec, 0x05, 0x27);
190 ak4642_write(codec, 0x0f, 0x09);
191 ak4642_write(codec, 0x0e, 0x19);
192 ak4642_write(codec, 0x09, 0x91);
193 ak4642_write(codec, 0x0c, 0x91);
194 ak4642_write(codec, 0x0a, 0x28);
195 ak4642_write(codec, 0x0d, 0x28);
196 ak4642_write(codec, 0x00, 0x64);
197 ak4642_write(codec, 0x01, 0x3b); /* + MCKO bit */
198 ak4642_write(codec, 0x01, 0x7b); /* + MCKO bit */
199 } else {
200 /*
201 * start stereo input
202 *
203 * PLL Master Mode
204 * Audio I/F Format:MSB justified (ADC & DAC)
205 * Sampling Frequency:44.1kHz
206 * Pre MIC AMP:+20dB
207 * MIC Power On
208 * ALC setting:Refer to Table 35
209 * ALC bit=“1”
210 *
211 * This operation came from example code of
212 * "ASAHI KASEI AK4642" (japanese) manual p94.
213 */
214 ak4642_write(codec, 0x05, 0x27);
215 ak4642_write(codec, 0x02, 0x05);
216 ak4642_write(codec, 0x06, 0x3c);
217 ak4642_write(codec, 0x08, 0xe1);
218 ak4642_write(codec, 0x0b, 0x00);
219 ak4642_write(codec, 0x07, 0x21);
220 ak4642_write(codec, 0x00, 0x41);
221 ak4642_write(codec, 0x10, 0x01);
222 }
223
224 return 0;
225}
226
227static void ak4642_dai_shutdown(struct snd_pcm_substream *substream,
228 struct snd_soc_dai *dai)
229{
230 int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
231 struct snd_soc_codec *codec = dai->codec;
232
233 if (is_play) {
234 /* stop headphone output */
235 ak4642_write(codec, 0x01, 0x3b);
236 ak4642_write(codec, 0x01, 0x0b);
237 ak4642_write(codec, 0x00, 0x40);
238 ak4642_write(codec, 0x0e, 0x11);
239 ak4642_write(codec, 0x0f, 0x08);
240 } else {
241 /* stop stereo input */
242 ak4642_write(codec, 0x00, 0x40);
243 ak4642_write(codec, 0x10, 0x00);
244 ak4642_write(codec, 0x07, 0x01);
245 }
246}
247
248static int ak4642_dai_set_sysclk(struct snd_soc_dai *codec_dai,
249 int clk_id, unsigned int freq, int dir)
250{
251 struct snd_soc_codec *codec = codec_dai->codec;
252 struct ak4642_priv *ak4642 = codec->private_data;
253
254 ak4642->sysclk = freq;
255 return 0;
256}
257
258static struct snd_soc_dai_ops ak4642_dai_ops = {
259 .startup = ak4642_dai_startup,
260 .shutdown = ak4642_dai_shutdown,
261 .set_sysclk = ak4642_dai_set_sysclk,
262};
263
264struct snd_soc_dai ak4642_dai = {
265 .name = "AK4642",
266 .playback = {
267 .stream_name = "Playback",
268 .channels_min = 1,
269 .channels_max = 2,
270 .rates = SNDRV_PCM_RATE_8000_48000,
271 .formats = SNDRV_PCM_FMTBIT_S16_LE },
272 .capture = {
273 .stream_name = "Capture",
274 .channels_min = 1,
275 .channels_max = 2,
276 .rates = SNDRV_PCM_RATE_8000_48000,
277 .formats = SNDRV_PCM_FMTBIT_S16_LE },
278 .ops = &ak4642_dai_ops,
279};
280EXPORT_SYMBOL_GPL(ak4642_dai);
281
282static int ak4642_resume(struct platform_device *pdev)
283{
284 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
285 struct snd_soc_codec *codec = socdev->card->codec;
286
287 ak4642_sync(codec);
288 return 0;
289}
290
291/*
292 * initialise the AK4642 driver
293 * register the mixer and dsp interfaces with the kernel
294 */
295static int ak4642_init(struct ak4642_priv *ak4642)
296{
297 struct snd_soc_codec *codec = &ak4642->codec;
298 int ret = 0;
299
300 if (ak4642_codec) {
301 dev_err(codec->dev, "Another ak4642 is registered\n");
302 return -EINVAL;
303 }
304
305 mutex_init(&codec->mutex);
306 INIT_LIST_HEAD(&codec->dapm_widgets);
307 INIT_LIST_HEAD(&codec->dapm_paths);
308
309 codec->private_data = ak4642;
310 codec->name = "AK4642";
311 codec->owner = THIS_MODULE;
312 codec->read = ak4642_read_reg_cache;
313 codec->write = ak4642_write;
314 codec->dai = &ak4642_dai;
315 codec->num_dai = 1;
316 codec->hw_write = (hw_write_t)i2c_master_send;
317 codec->reg_cache_size = ARRAY_SIZE(ak4642_reg);
318 codec->reg_cache = kmemdup(ak4642_reg,
319 sizeof(ak4642_reg), GFP_KERNEL);
320
321 if (!codec->reg_cache)
322 return -ENOMEM;
323
324 ak4642_dai.dev = codec->dev;
325 ak4642_codec = codec;
326
327 ret = snd_soc_register_codec(codec);
328 if (ret) {
329 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
330 goto reg_cache_err;
331 }
332
333 ret = snd_soc_register_dai(&ak4642_dai);
334 if (ret) {
335 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
336 snd_soc_unregister_codec(codec);
337 goto reg_cache_err;
338 }
339
340 /*
341 * clock setting
342 *
343 * Audio I/F Format: MSB justified (ADC & DAC)
344 * BICK frequency at Master Mode: 64fs
345 * Input Master Clock Select at PLL Mode: 11.2896MHz
346 * MCKO: Enable
347 * Sampling Frequency: 44.1kHz
348 *
349 * This operation came from example code of
350 * "ASAHI KASEI AK4642" (japanese) manual p89.
351 *
352 * please fix-me
353 */
354 ak4642_write(codec, 0x01, 0x08);
355 ak4642_write(codec, 0x04, 0x4a);
356 ak4642_write(codec, 0x05, 0x27);
357 ak4642_write(codec, 0x00, 0x40);
358 ak4642_write(codec, 0x01, 0x0b);
359
360 return ret;
361
362reg_cache_err:
363 kfree(codec->reg_cache);
364 codec->reg_cache = NULL;
365
366 return ret;
367}
368
369#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
370static int ak4642_i2c_probe(struct i2c_client *i2c,
371 const struct i2c_device_id *id)
372{
373 struct ak4642_priv *ak4642;
374 struct snd_soc_codec *codec;
375 int ret;
376
377 ak4642 = kzalloc(sizeof(struct ak4642_priv), GFP_KERNEL);
378 if (!ak4642)
379 return -ENOMEM;
380
381 codec = &ak4642->codec;
382 codec->dev = &i2c->dev;
383
384 i2c_set_clientdata(i2c, ak4642);
385 codec->control_data = i2c;
386
387 ret = ak4642_init(ak4642);
388 if (ret < 0)
389 printk(KERN_ERR "failed to initialise AK4642\n");
390
391 return ret;
392}
393
394static int ak4642_i2c_remove(struct i2c_client *client)
395{
396 struct ak4642_priv *ak4642 = i2c_get_clientdata(client);
397
398 snd_soc_unregister_dai(&ak4642_dai);
399 snd_soc_unregister_codec(&ak4642->codec);
400 kfree(ak4642->codec.reg_cache);
401 kfree(ak4642);
402 ak4642_codec = NULL;
403
404 return 0;
405}
406
407static const struct i2c_device_id ak4642_i2c_id[] = {
408 { "ak4642", 0 },
409 { "ak4643", 0 },
410 { }
411};
412MODULE_DEVICE_TABLE(i2c, ak4642_i2c_id);
413
414static struct i2c_driver ak4642_i2c_driver = {
415 .driver = {
416 .name = "AK4642 I2C Codec",
417 .owner = THIS_MODULE,
418 },
419 .probe = ak4642_i2c_probe,
420 .remove = ak4642_i2c_remove,
421 .id_table = ak4642_i2c_id,
422};
423
424#endif
425
426static int ak4642_probe(struct platform_device *pdev)
427{
428 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
429 int ret;
430
431 if (!ak4642_codec) {
432 dev_err(&pdev->dev, "Codec device not registered\n");
433 return -ENODEV;
434 }
435
436 socdev->card->codec = ak4642_codec;
437
438 /* register pcms */
439 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
440 if (ret < 0) {
441 printk(KERN_ERR "ak4642: failed to create pcms\n");
442 goto pcm_err;
443 }
444
445 ret = snd_soc_init_card(socdev);
446 if (ret < 0) {
447 printk(KERN_ERR "ak4642: failed to register card\n");
448 goto card_err;
449 }
450
451 dev_info(&pdev->dev, "AK4642 Audio Codec %s", AK4642_VERSION);
452 return ret;
453
454card_err:
455 snd_soc_free_pcms(socdev);
456 snd_soc_dapm_free(socdev);
457pcm_err:
458 return ret;
459
460}
461
462/* power down chip */
463static int ak4642_remove(struct platform_device *pdev)
464{
465 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
466
467 snd_soc_free_pcms(socdev);
468 snd_soc_dapm_free(socdev);
469
470 return 0;
471}
472
473struct snd_soc_codec_device soc_codec_dev_ak4642 = {
474 .probe = ak4642_probe,
475 .remove = ak4642_remove,
476 .resume = ak4642_resume,
477};
478EXPORT_SYMBOL_GPL(soc_codec_dev_ak4642);
479
480static int __init ak4642_modinit(void)
481{
482 int ret;
483#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
484 ret = i2c_add_driver(&ak4642_i2c_driver);
485#endif
486 return ret;
487
488}
489module_init(ak4642_modinit);
490
491static void __exit ak4642_exit(void)
492{
493#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
494 i2c_del_driver(&ak4642_i2c_driver);
495#endif
496
497}
498module_exit(ak4642_exit);
499
500MODULE_DESCRIPTION("Soc AK4642 driver");
501MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
502MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ak4642.h b/sound/soc/codecs/ak4642.h
new file mode 100644
index 000000000000..e476833d314e
--- /dev/null
+++ b/sound/soc/codecs/ak4642.h
@@ -0,0 +1,20 @@
1/*
2 * ak4642.h -- AK4642 Soc Audio driver
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on ak4535.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _AK4642_H
15#define _AK4642_H
16
17extern struct snd_soc_dai ak4642_dai;
18extern struct snd_soc_codec_device soc_codec_dev_ak4642;
19
20#endif
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index a32b8226c8a4..ca1e24a8f12a 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -806,15 +806,30 @@ static int cs4270_i2c_suspend(struct i2c_client *client, pm_message_t mesg)
806{ 806{
807 struct cs4270_private *cs4270 = i2c_get_clientdata(client); 807 struct cs4270_private *cs4270 = i2c_get_clientdata(client);
808 struct snd_soc_codec *codec = &cs4270->codec; 808 struct snd_soc_codec *codec = &cs4270->codec;
809 int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
810 809
811 return snd_soc_write(codec, CS4270_PWRCTL, reg); 810 return snd_soc_suspend_device(codec->dev);
812} 811}
813 812
814static int cs4270_i2c_resume(struct i2c_client *client) 813static int cs4270_i2c_resume(struct i2c_client *client)
815{ 814{
816 struct cs4270_private *cs4270 = i2c_get_clientdata(client); 815 struct cs4270_private *cs4270 = i2c_get_clientdata(client);
817 struct snd_soc_codec *codec = &cs4270->codec; 816 struct snd_soc_codec *codec = &cs4270->codec;
817
818 return snd_soc_resume_device(codec->dev);
819}
820
821static int cs4270_soc_suspend(struct platform_device *pdev, pm_message_t mesg)
822{
823 struct snd_soc_codec *codec = cs4270_codec;
824 int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
825
826 return snd_soc_write(codec, CS4270_PWRCTL, reg);
827}
828
829static int cs4270_soc_resume(struct platform_device *pdev)
830{
831 struct snd_soc_codec *codec = cs4270_codec;
832 struct i2c_client *i2c_client = codec->control_data;
818 int reg; 833 int reg;
819 834
820 /* In case the device was put to hard reset during sleep, we need to 835 /* In case the device was put to hard reset during sleep, we need to
@@ -825,7 +840,7 @@ static int cs4270_i2c_resume(struct i2c_client *client)
825 for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) { 840 for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
826 u8 val = snd_soc_read(codec, reg); 841 u8 val = snd_soc_read(codec, reg);
827 842
828 if (i2c_smbus_write_byte_data(client, reg, val)) { 843 if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
829 dev_err(codec->dev, "i2c write failed\n"); 844 dev_err(codec->dev, "i2c write failed\n");
830 return -EIO; 845 return -EIO;
831 } 846 }
@@ -840,6 +855,8 @@ static int cs4270_i2c_resume(struct i2c_client *client)
840#else 855#else
841#define cs4270_i2c_suspend NULL 856#define cs4270_i2c_suspend NULL
842#define cs4270_i2c_resume NULL 857#define cs4270_i2c_resume NULL
858#define cs4270_soc_suspend NULL
859#define cs4270_soc_resume NULL
843#endif /* CONFIG_PM */ 860#endif /* CONFIG_PM */
844 861
845/* 862/*
@@ -868,7 +885,9 @@ static struct i2c_driver cs4270_i2c_driver = {
868 */ 885 */
869struct snd_soc_codec_device soc_codec_device_cs4270 = { 886struct snd_soc_codec_device soc_codec_device_cs4270 = {
870 .probe = cs4270_probe, 887 .probe = cs4270_probe,
871 .remove = cs4270_remove 888 .remove = cs4270_remove,
889 .suspend = cs4270_soc_suspend,
890 .resume = cs4270_soc_resume,
872}; 891};
873EXPORT_SYMBOL_GPL(soc_codec_device_cs4270); 892EXPORT_SYMBOL_GPL(soc_codec_device_cs4270);
874 893
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
new file mode 100644
index 000000000000..38eac9c866e1
--- /dev/null
+++ b/sound/soc/codecs/cx20442.c
@@ -0,0 +1,501 @@
1/*
2 * cx20442.c -- CX20442 ALSA Soc Audio driver
3 *
4 * Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
5 *
6 * Initially based on sound/soc/codecs/wm8400.c
7 * Copyright 2008, 2009 Wolfson Microelectronics PLC.
8 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/tty.h>
17
18#include <sound/core.h>
19#include <sound/initval.h>
20#include <sound/soc-dapm.h>
21
22#include "cx20442.h"
23
24
25struct cx20442_priv {
26 struct snd_soc_codec codec;
27 u8 reg_cache[1];
28};
29
30#define CX20442_PM 0x0
31
32#define CX20442_TELIN 0
33#define CX20442_TELOUT 1
34#define CX20442_MIC 2
35#define CX20442_SPKOUT 3
36#define CX20442_AGC 4
37
38static const struct snd_soc_dapm_widget cx20442_dapm_widgets[] = {
39 SND_SOC_DAPM_OUTPUT("TELOUT"),
40 SND_SOC_DAPM_OUTPUT("SPKOUT"),
41 SND_SOC_DAPM_OUTPUT("AGCOUT"),
42
43 SND_SOC_DAPM_MIXER("SPKOUT Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
44
45 SND_SOC_DAPM_PGA("TELOUT Amp", CX20442_PM, CX20442_TELOUT, 0, NULL, 0),
46 SND_SOC_DAPM_PGA("SPKOUT Amp", CX20442_PM, CX20442_SPKOUT, 0, NULL, 0),
47 SND_SOC_DAPM_PGA("SPKOUT AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
48
49 SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
50 SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
51
52 SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
53
54 SND_SOC_DAPM_MICBIAS("TELIN Bias", CX20442_PM, CX20442_TELIN, 0),
55 SND_SOC_DAPM_MICBIAS("MIC Bias", CX20442_PM, CX20442_MIC, 0),
56
57 SND_SOC_DAPM_PGA("MIC AGC", CX20442_PM, CX20442_AGC, 0, NULL, 0),
58
59 SND_SOC_DAPM_INPUT("TELIN"),
60 SND_SOC_DAPM_INPUT("MIC"),
61 SND_SOC_DAPM_INPUT("AGCIN"),
62};
63
64static const struct snd_soc_dapm_route cx20442_audio_map[] = {
65 {"TELOUT", NULL, "TELOUT Amp"},
66
67 {"SPKOUT", NULL, "SPKOUT Mixer"},
68 {"SPKOUT Mixer", NULL, "SPKOUT Amp"},
69
70 {"TELOUT Amp", NULL, "DAC"},
71 {"SPKOUT Amp", NULL, "DAC"},
72
73 {"SPKOUT Mixer", NULL, "SPKOUT AGC"},
74 {"SPKOUT AGC", NULL, "AGCIN"},
75
76 {"AGCOUT", NULL, "MIC AGC"},
77 {"MIC AGC", NULL, "MIC"},
78
79 {"MIC Bias", NULL, "MIC"},
80 {"Input Mixer", NULL, "MIC Bias"},
81
82 {"TELIN Bias", NULL, "TELIN"},
83 {"Input Mixer", NULL, "TELIN Bias"},
84
85 {"ADC", NULL, "Input Mixer"},
86};
87
88static int cx20442_add_widgets(struct snd_soc_codec *codec)
89{
90 snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets,
91 ARRAY_SIZE(cx20442_dapm_widgets));
92
93 snd_soc_dapm_add_routes(codec, cx20442_audio_map,
94 ARRAY_SIZE(cx20442_audio_map));
95
96 snd_soc_dapm_new_widgets(codec);
97 return 0;
98}
99
100static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
101 unsigned int reg)
102{
103 u8 *reg_cache = codec->reg_cache;
104
105 if (reg >= codec->reg_cache_size)
106 return -EINVAL;
107
108 return reg_cache[reg];
109}
110
111enum v253_vls {
112 V253_VLS_NONE = 0,
113 V253_VLS_T,
114 V253_VLS_L,
115 V253_VLS_LT,
116 V253_VLS_S,
117 V253_VLS_ST,
118 V253_VLS_M,
119 V253_VLS_MST,
120 V253_VLS_S1,
121 V253_VLS_S1T,
122 V253_VLS_MS1T,
123 V253_VLS_M1,
124 V253_VLS_M1ST,
125 V253_VLS_M1S1T,
126 V253_VLS_H,
127 V253_VLS_HT,
128 V253_VLS_MS,
129 V253_VLS_MS1,
130 V253_VLS_M1S,
131 V253_VLS_M1S1,
132 V253_VLS_TEST,
133};
134
135static int cx20442_pm_to_v253_vls(u8 value)
136{
137 switch (value & ~(1 << CX20442_AGC)) {
138 case 0:
139 return V253_VLS_T;
140 case (1 << CX20442_SPKOUT):
141 case (1 << CX20442_MIC):
142 case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
143 return V253_VLS_M1S1;
144 case (1 << CX20442_TELOUT):
145 case (1 << CX20442_TELIN):
146 case (1 << CX20442_TELOUT) | (1 << CX20442_TELIN):
147 return V253_VLS_L;
148 case (1 << CX20442_TELOUT) | (1 << CX20442_MIC):
149 return V253_VLS_NONE;
150 }
151 return -EINVAL;
152}
153static int cx20442_pm_to_v253_vsp(u8 value)
154{
155 switch (value & ~(1 << CX20442_AGC)) {
156 case (1 << CX20442_SPKOUT):
157 case (1 << CX20442_MIC):
158 case (1 << CX20442_SPKOUT) | (1 << CX20442_MIC):
159 return (bool)(value & (1 << CX20442_AGC));
160 }
161 return (value & (1 << CX20442_AGC)) ? -EINVAL : 0;
162}
163
164static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
165 unsigned int value)
166{
167 u8 *reg_cache = codec->reg_cache;
168 int vls, vsp, old, len;
169 char buf[18];
170
171 if (reg >= codec->reg_cache_size)
172 return -EINVAL;
173
174 /* hw_write and control_data pointers required for talking to the modem
175 * are expected to be set by the line discipline initialization code */
176 if (!codec->hw_write || !codec->control_data)
177 return -EIO;
178
179 old = reg_cache[reg];
180 reg_cache[reg] = value;
181
182 vls = cx20442_pm_to_v253_vls(value);
183 if (vls < 0)
184 return vls;
185
186 vsp = cx20442_pm_to_v253_vsp(value);
187 if (vsp < 0)
188 return vsp;
189
190 if ((vls == V253_VLS_T) ||
191 (vls == cx20442_pm_to_v253_vls(old))) {
192 if (vsp == cx20442_pm_to_v253_vsp(old))
193 return 0;
194 len = snprintf(buf, ARRAY_SIZE(buf), "at+vsp=%d\r", vsp);
195 } else if (vsp == cx20442_pm_to_v253_vsp(old))
196 len = snprintf(buf, ARRAY_SIZE(buf), "at+vls=%d\r", vls);
197 else
198 len = snprintf(buf, ARRAY_SIZE(buf),
199 "at+vls=%d;+vsp=%d\r", vls, vsp);
200
201 if (unlikely(len > (ARRAY_SIZE(buf) - 1)))
202 return -ENOMEM;
203
204 dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
205 if (codec->hw_write(codec->control_data, buf, len) != len)
206 return -EIO;
207
208 return 0;
209}
210
211
212/* Moved up here as line discipline referres it during initialization */
213static struct snd_soc_codec *cx20442_codec;
214
215
216/*
217 * Line discpline related code
218 *
219 * Any of the callback functions below can be used in two ways:
220 * 1) registerd by a machine driver as one of line discipline operations,
221 * 2) called from a machine's provided line discipline callback function
222 * in case when extra machine specific code must be run as well.
223 */
224
225/* Modem init: echo off, digital speaker off, quiet off, voice mode */
226static const char *v253_init = "ate0m0q0+fclass=8\r";
227
228/* Line discipline .open() */
229static int v253_open(struct tty_struct *tty)
230{
231 struct snd_soc_codec *codec = cx20442_codec;
232 int ret, len = strlen(v253_init);
233
234 /* Doesn't make sense without write callback */
235 if (!tty->ops->write)
236 return -EINVAL;
237
238 /* Pass the codec structure address for use by other ldisc callbacks */
239 tty->disc_data = codec;
240
241 if (tty->ops->write(tty, v253_init, len) != len) {
242 ret = -EIO;
243 goto err;
244 }
245 /* Actual setup will be performed after the modem responds. */
246 return 0;
247err:
248 tty->disc_data = NULL;
249 return ret;
250}
251
252/* Line discipline .close() */
253static void v253_close(struct tty_struct *tty)
254{
255 struct snd_soc_codec *codec = tty->disc_data;
256
257 tty->disc_data = NULL;
258
259 if (!codec)
260 return;
261
262 /* Prevent the codec driver from further accessing the modem */
263 codec->hw_write = NULL;
264 codec->control_data = NULL;
265 codec->pop_time = 0;
266}
267
268/* Line discipline .hangup() */
269static int v253_hangup(struct tty_struct *tty)
270{
271 v253_close(tty);
272 return 0;
273}
274
275/* Line discipline .receive_buf() */
276static void v253_receive(struct tty_struct *tty,
277 const unsigned char *cp, char *fp, int count)
278{
279 struct snd_soc_codec *codec = tty->disc_data;
280
281 if (!codec)
282 return;
283
284 if (!codec->control_data) {
285 /* First modem response, complete setup procedure */
286
287 /* Set up codec driver access to modem controls */
288 codec->control_data = tty;
289 codec->hw_write = (hw_write_t)tty->ops->write;
290 codec->pop_time = 1;
291 }
292}
293
294/* Line discipline .write_wakeup() */
295static void v253_wakeup(struct tty_struct *tty)
296{
297}
298
299struct tty_ldisc_ops v253_ops = {
300 .magic = TTY_LDISC_MAGIC,
301 .name = "cx20442",
302 .owner = THIS_MODULE,
303 .open = v253_open,
304 .close = v253_close,
305 .hangup = v253_hangup,
306 .receive_buf = v253_receive,
307 .write_wakeup = v253_wakeup,
308};
309EXPORT_SYMBOL_GPL(v253_ops);
310
311
312/*
313 * Codec DAI
314 */
315
316struct snd_soc_dai cx20442_dai = {
317 .name = "CX20442",
318 .playback = {
319 .stream_name = "Playback",
320 .channels_min = 1,
321 .channels_max = 1,
322 .rates = SNDRV_PCM_RATE_8000,
323 .formats = SNDRV_PCM_FMTBIT_S16_LE,
324 },
325 .capture = {
326 .stream_name = "Capture",
327 .channels_min = 1,
328 .channels_max = 1,
329 .rates = SNDRV_PCM_RATE_8000,
330 .formats = SNDRV_PCM_FMTBIT_S16_LE,
331 },
332};
333EXPORT_SYMBOL_GPL(cx20442_dai);
334
335static int cx20442_codec_probe(struct platform_device *pdev)
336{
337 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
338 struct snd_soc_codec *codec;
339 int ret;
340
341 if (!cx20442_codec) {
342 dev_err(&pdev->dev, "cx20442 not yet discovered\n");
343 return -ENODEV;
344 }
345 codec = cx20442_codec;
346
347 socdev->card->codec = codec;
348
349 /* register pcms */
350 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
351 if (ret < 0) {
352 dev_err(&pdev->dev, "failed to create pcms\n");
353 goto pcm_err;
354 }
355
356 cx20442_add_widgets(codec);
357
358 ret = snd_soc_init_card(socdev);
359 if (ret < 0) {
360 dev_err(&pdev->dev, "failed to register card\n");
361 goto card_err;
362 }
363
364 return ret;
365
366card_err:
367 snd_soc_free_pcms(socdev);
368 snd_soc_dapm_free(socdev);
369pcm_err:
370 return ret;
371}
372
373/* power down chip */
374static int cx20442_codec_remove(struct platform_device *pdev)
375{
376 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
377
378 snd_soc_free_pcms(socdev);
379 snd_soc_dapm_free(socdev);
380
381 return 0;
382}
383
384struct snd_soc_codec_device cx20442_codec_dev = {
385 .probe = cx20442_codec_probe,
386 .remove = cx20442_codec_remove,
387};
388EXPORT_SYMBOL_GPL(cx20442_codec_dev);
389
390static int cx20442_register(struct cx20442_priv *cx20442)
391{
392 struct snd_soc_codec *codec = &cx20442->codec;
393 int ret;
394
395 mutex_init(&codec->mutex);
396 INIT_LIST_HEAD(&codec->dapm_widgets);
397 INIT_LIST_HEAD(&codec->dapm_paths);
398
399 codec->name = "CX20442";
400 codec->owner = THIS_MODULE;
401 codec->private_data = cx20442;
402
403 codec->dai = &cx20442_dai;
404 codec->num_dai = 1;
405
406 codec->reg_cache = &cx20442->reg_cache;
407 codec->reg_cache_size = ARRAY_SIZE(cx20442->reg_cache);
408 codec->read = cx20442_read_reg_cache;
409 codec->write = cx20442_write;
410
411 codec->bias_level = SND_SOC_BIAS_OFF;
412
413 cx20442_dai.dev = codec->dev;
414
415 cx20442_codec = codec;
416
417 ret = snd_soc_register_codec(codec);
418 if (ret != 0) {
419 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
420 goto err;
421 }
422
423 ret = snd_soc_register_dai(&cx20442_dai);
424 if (ret != 0) {
425 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
426 goto err_codec;
427 }
428
429 return 0;
430
431err_codec:
432 snd_soc_unregister_codec(codec);
433err:
434 cx20442_codec = NULL;
435 kfree(cx20442);
436 return ret;
437}
438
439static void cx20442_unregister(struct cx20442_priv *cx20442)
440{
441 snd_soc_unregister_dai(&cx20442_dai);
442 snd_soc_unregister_codec(&cx20442->codec);
443
444 cx20442_codec = NULL;
445 kfree(cx20442);
446}
447
448static int cx20442_platform_probe(struct platform_device *pdev)
449{
450 struct cx20442_priv *cx20442;
451 struct snd_soc_codec *codec;
452
453 cx20442 = kzalloc(sizeof(struct cx20442_priv), GFP_KERNEL);
454 if (cx20442 == NULL)
455 return -ENOMEM;
456
457 codec = &cx20442->codec;
458
459 codec->control_data = NULL;
460 codec->hw_write = NULL;
461 codec->pop_time = 0;
462
463 codec->dev = &pdev->dev;
464 platform_set_drvdata(pdev, cx20442);
465
466 return cx20442_register(cx20442);
467}
468
469static int __exit cx20442_platform_remove(struct platform_device *pdev)
470{
471 struct cx20442_priv *cx20442 = platform_get_drvdata(pdev);
472
473 cx20442_unregister(cx20442);
474 return 0;
475}
476
477static struct platform_driver cx20442_platform_driver = {
478 .driver = {
479 .name = "cx20442",
480 .owner = THIS_MODULE,
481 },
482 .probe = cx20442_platform_probe,
483 .remove = __exit_p(cx20442_platform_remove),
484};
485
486static int __init cx20442_init(void)
487{
488 return platform_driver_register(&cx20442_platform_driver);
489}
490module_init(cx20442_init);
491
492static void __exit cx20442_exit(void)
493{
494 platform_driver_unregister(&cx20442_platform_driver);
495}
496module_exit(cx20442_exit);
497
498MODULE_DESCRIPTION("ASoC CX20442-11 voice modem codec driver");
499MODULE_AUTHOR("Janusz Krzysztofik");
500MODULE_LICENSE("GPL");
501MODULE_ALIAS("platform:cx20442");
diff --git a/sound/soc/codecs/cx20442.h b/sound/soc/codecs/cx20442.h
new file mode 100644
index 000000000000..688a5eb62e17
--- /dev/null
+++ b/sound/soc/codecs/cx20442.h
@@ -0,0 +1,20 @@
1/*
2 * cx20442.h -- audio driver for CX20442
3 *
4 * Copyright 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */
12
13#ifndef _CX20442_CODEC_H
14#define _CX20442_CODEC_H
15
16extern struct snd_soc_dai cx20442_dai;
17extern struct snd_soc_codec_device cx20442_codec_dev;
18extern struct tty_ldisc_ops v253_ops;
19
20#endif
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
new file mode 100644
index 000000000000..9e7e964a5fa3
--- /dev/null
+++ b/sound/soc/codecs/max9877.c
@@ -0,0 +1,308 @@
1/*
2 * max9877.c -- amp driver for max9877
3 *
4 * Copyright (C) 2009 Samsung Electronics Co.Ltd
5 * Author: Joonyoung Shim <jy0922.shim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/i2c.h>
17#include <sound/soc.h>
18#include <sound/tlv.h>
19
20#include "max9877.h"
21
22static struct i2c_client *i2c;
23
24static u8 max9877_regs[5] = { 0x40, 0x00, 0x00, 0x00, 0x49 };
25
26static void max9877_write_regs(void)
27{
28 unsigned int i;
29 u8 data[6];
30
31 data[0] = MAX9877_INPUT_MODE;
32 for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
33 data[i + 1] = max9877_regs[i];
34
35 if (i2c_master_send(i2c, data, 6) != 6)
36 dev_err(&i2c->dev, "i2c write failed\n");
37}
38
39static int max9877_get_reg(struct snd_kcontrol *kcontrol,
40 struct snd_ctl_elem_value *ucontrol)
41{
42 struct soc_mixer_control *mc =
43 (struct soc_mixer_control *)kcontrol->private_value;
44 unsigned int reg = mc->reg;
45 unsigned int shift = mc->shift;
46 unsigned int mask = mc->max;
47 unsigned int invert = mc->invert;
48
49 ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
50
51 if (invert)
52 ucontrol->value.integer.value[0] =
53 mask - ucontrol->value.integer.value[0];
54
55 return 0;
56}
57
58static int max9877_set_reg(struct snd_kcontrol *kcontrol,
59 struct snd_ctl_elem_value *ucontrol)
60{
61 struct soc_mixer_control *mc =
62 (struct soc_mixer_control *)kcontrol->private_value;
63 unsigned int reg = mc->reg;
64 unsigned int shift = mc->shift;
65 unsigned int mask = mc->max;
66 unsigned int invert = mc->invert;
67 unsigned int val = (ucontrol->value.integer.value[0] & mask);
68
69 if (invert)
70 val = mask - val;
71
72 if (((max9877_regs[reg] >> shift) & mask) == val)
73 return 0;
74
75 max9877_regs[reg] &= ~(mask << shift);
76 max9877_regs[reg] |= val << shift;
77 max9877_write_regs();
78
79 return 1;
80}
81
82static int max9877_get_2reg(struct snd_kcontrol *kcontrol,
83 struct snd_ctl_elem_value *ucontrol)
84{
85 struct soc_mixer_control *mc =
86 (struct soc_mixer_control *)kcontrol->private_value;
87 unsigned int reg = mc->reg;
88 unsigned int reg2 = mc->rreg;
89 unsigned int shift = mc->shift;
90 unsigned int mask = mc->max;
91
92 ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
93 ucontrol->value.integer.value[1] = (max9877_regs[reg2] >> shift) & mask;
94
95 return 0;
96}
97
98static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
99 struct snd_ctl_elem_value *ucontrol)
100{
101 struct soc_mixer_control *mc =
102 (struct soc_mixer_control *)kcontrol->private_value;
103 unsigned int reg = mc->reg;
104 unsigned int reg2 = mc->rreg;
105 unsigned int shift = mc->shift;
106 unsigned int mask = mc->max;
107 unsigned int val = (ucontrol->value.integer.value[0] & mask);
108 unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
109 unsigned int change = 1;
110
111 if (((max9877_regs[reg] >> shift) & mask) == val)
112 change = 0;
113
114 if (((max9877_regs[reg2] >> shift) & mask) == val2)
115 change = 0;
116
117 if (change) {
118 max9877_regs[reg] &= ~(mask << shift);
119 max9877_regs[reg] |= val << shift;
120 max9877_regs[reg2] &= ~(mask << shift);
121 max9877_regs[reg2] |= val2 << shift;
122 max9877_write_regs();
123 }
124
125 return change;
126}
127
128static int max9877_get_out_mode(struct snd_kcontrol *kcontrol,
129 struct snd_ctl_elem_value *ucontrol)
130{
131 u8 value = max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK;
132
133 if (value)
134 value -= 1;
135
136 ucontrol->value.integer.value[0] = value;
137 return 0;
138}
139
140static int max9877_set_out_mode(struct snd_kcontrol *kcontrol,
141 struct snd_ctl_elem_value *ucontrol)
142{
143 u8 value = ucontrol->value.integer.value[0];
144
145 value += 1;
146
147 if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK) == value)
148 return 0;
149
150 max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OUTMODE_MASK;
151 max9877_regs[MAX9877_OUTPUT_MODE] |= value;
152 max9877_write_regs();
153 return 1;
154}
155
156static int max9877_get_osc_mode(struct snd_kcontrol *kcontrol,
157 struct snd_ctl_elem_value *ucontrol)
158{
159 u8 value = (max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK);
160
161 value = value >> MAX9877_OSC_OFFSET;
162
163 ucontrol->value.integer.value[0] = value;
164 return 0;
165}
166
167static int max9877_set_osc_mode(struct snd_kcontrol *kcontrol,
168 struct snd_ctl_elem_value *ucontrol)
169{
170 u8 value = ucontrol->value.integer.value[0];
171
172 value = value << MAX9877_OSC_OFFSET;
173 if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK) == value)
174 return 0;
175
176 max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OSC_MASK;
177 max9877_regs[MAX9877_OUTPUT_MODE] |= value;
178 max9877_write_regs();
179 return 1;
180}
181
182static const unsigned int max9877_pgain_tlv[] = {
183 TLV_DB_RANGE_HEAD(2),
184 0, 1, TLV_DB_SCALE_ITEM(0, 900, 0),
185 2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0),
186};
187
188static const unsigned int max9877_output_tlv[] = {
189 TLV_DB_RANGE_HEAD(4),
190 0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
191 8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
192 16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
193 24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
194};
195
196static const char *max9877_out_mode[] = {
197 "INA -> SPK",
198 "INA -> HP",
199 "INA -> SPK and HP",
200 "INB -> SPK",
201 "INB -> HP",
202 "INB -> SPK and HP",
203 "INA + INB -> SPK",
204 "INA + INB -> HP",
205 "INA + INB -> SPK and HP",
206};
207
208static const char *max9877_osc_mode[] = {
209 "1176KHz",
210 "1100KHz",
211 "700KHz",
212};
213
214static const struct soc_enum max9877_enum[] = {
215 SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_out_mode), max9877_out_mode),
216 SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
217};
218
219static const struct snd_kcontrol_new max9877_controls[] = {
220 SOC_SINGLE_EXT_TLV("MAX9877 PGAINA Playback Volume",
221 MAX9877_INPUT_MODE, 0, 2, 0,
222 max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
223 SOC_SINGLE_EXT_TLV("MAX9877 PGAINB Playback Volume",
224 MAX9877_INPUT_MODE, 2, 2, 0,
225 max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
226 SOC_SINGLE_EXT_TLV("MAX9877 Amp Speaker Playback Volume",
227 MAX9877_SPK_VOLUME, 0, 31, 0,
228 max9877_get_reg, max9877_set_reg, max9877_output_tlv),
229 SOC_DOUBLE_R_EXT_TLV("MAX9877 Amp HP Playback Volume",
230 MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
231 max9877_get_2reg, max9877_set_2reg, max9877_output_tlv),
232 SOC_SINGLE_EXT("MAX9877 INB Stereo Switch",
233 MAX9877_INPUT_MODE, 4, 1, 1,
234 max9877_get_reg, max9877_set_reg),
235 SOC_SINGLE_EXT("MAX9877 INA Stereo Switch",
236 MAX9877_INPUT_MODE, 5, 1, 1,
237 max9877_get_reg, max9877_set_reg),
238 SOC_SINGLE_EXT("MAX9877 Zero-crossing detection Switch",
239 MAX9877_INPUT_MODE, 6, 1, 0,
240 max9877_get_reg, max9877_set_reg),
241 SOC_SINGLE_EXT("MAX9877 Bypass Mode Switch",
242 MAX9877_OUTPUT_MODE, 6, 1, 0,
243 max9877_get_reg, max9877_set_reg),
244 SOC_SINGLE_EXT("MAX9877 Shutdown Mode Switch",
245 MAX9877_OUTPUT_MODE, 7, 1, 1,
246 max9877_get_reg, max9877_set_reg),
247 SOC_ENUM_EXT("MAX9877 Output Mode", max9877_enum[0],
248 max9877_get_out_mode, max9877_set_out_mode),
249 SOC_ENUM_EXT("MAX9877 Oscillator Mode", max9877_enum[1],
250 max9877_get_osc_mode, max9877_set_osc_mode),
251};
252
253/* This function is called from ASoC machine driver */
254int max9877_add_controls(struct snd_soc_codec *codec)
255{
256 return snd_soc_add_controls(codec, max9877_controls,
257 ARRAY_SIZE(max9877_controls));
258}
259EXPORT_SYMBOL_GPL(max9877_add_controls);
260
261static int __devinit max9877_i2c_probe(struct i2c_client *client,
262 const struct i2c_device_id *id)
263{
264 i2c = client;
265
266 max9877_write_regs();
267
268 return 0;
269}
270
271static __devexit int max9877_i2c_remove(struct i2c_client *client)
272{
273 i2c = NULL;
274
275 return 0;
276}
277
278static const struct i2c_device_id max9877_i2c_id[] = {
279 { "max9877", 0 },
280 { }
281};
282MODULE_DEVICE_TABLE(i2c, max9877_i2c_id);
283
284static struct i2c_driver max9877_i2c_driver = {
285 .driver = {
286 .name = "max9877",
287 .owner = THIS_MODULE,
288 },
289 .probe = max9877_i2c_probe,
290 .remove = __devexit_p(max9877_i2c_remove),
291 .id_table = max9877_i2c_id,
292};
293
294static int __init max9877_init(void)
295{
296 return i2c_add_driver(&max9877_i2c_driver);
297}
298module_init(max9877_init);
299
300static void __exit max9877_exit(void)
301{
302 i2c_del_driver(&max9877_i2c_driver);
303}
304module_exit(max9877_exit);
305
306MODULE_DESCRIPTION("ASoC MAX9877 amp driver");
307MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
308MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max9877.h b/sound/soc/codecs/max9877.h
new file mode 100644
index 000000000000..6da72290ac58
--- /dev/null
+++ b/sound/soc/codecs/max9877.h
@@ -0,0 +1,37 @@
1/*
2 * max9877.h -- amp driver for max9877
3 *
4 * Copyright (C) 2009 Samsung Electronics Co.Ltd
5 * Author: Joonyoung Shim <jy0922.shim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#ifndef _MAX9877_H
15#define _MAX9877_H
16
17#define MAX9877_INPUT_MODE 0x00
18#define MAX9877_SPK_VOLUME 0x01
19#define MAX9877_HPL_VOLUME 0x02
20#define MAX9877_HPR_VOLUME 0x03
21#define MAX9877_OUTPUT_MODE 0x04
22
23/* MAX9877_INPUT_MODE */
24#define MAX9877_INB (1 << 4)
25#define MAX9877_INA (1 << 5)
26#define MAX9877_ZCD (1 << 6)
27
28/* MAX9877_OUTPUT_MODE */
29#define MAX9877_OUTMODE_MASK (15 << 0)
30#define MAX9877_OSC_MASK (3 << 4)
31#define MAX9877_OSC_OFFSET 4
32#define MAX9877_BYPASS (1 << 6)
33#define MAX9877_SHDN (1 << 7)
34
35extern int max9877_add_controls(struct snd_soc_codec *codec);
36
37#endif
diff --git a/sound/soc/codecs/spdif_transciever.c b/sound/soc/codecs/spdif_transciever.c
index 218b33adad90..a63191141052 100644
--- a/sound/soc/codecs/spdif_transciever.c
+++ b/sound/soc/codecs/spdif_transciever.c
@@ -21,6 +21,8 @@
21 21
22#include "spdif_transciever.h" 22#include "spdif_transciever.h"
23 23
24MODULE_LICENSE("GPL");
25
24#define STUB_RATES SNDRV_PCM_RATE_8000_96000 26#define STUB_RATES SNDRV_PCM_RATE_8000_96000
25#define STUB_FORMATS SNDRV_PCM_FMTBIT_S16_LE 27#define STUB_FORMATS SNDRV_PCM_FMTBIT_S16_LE
26 28
@@ -34,6 +36,7 @@ struct snd_soc_dai dit_stub_dai = {
34 .formats = STUB_FORMATS, 36 .formats = STUB_FORMATS,
35 }, 37 },
36}; 38};
39EXPORT_SYMBOL_GPL(dit_stub_dai);
37 40
38static int spdif_dit_probe(struct platform_device *pdev) 41static int spdif_dit_probe(struct platform_device *pdev)
39{ 42{
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 8ad4b7b3e3ba..befc6488c39a 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -149,7 +149,7 @@ static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
149 stac9766_ac97_write(codec, AC97_INT_PAGING, 1); 149 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
150 return 0; 150 return 0;
151 } 151 }
152 if (reg / 2 > ARRAY_SIZE(stac9766_reg)) 152 if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
153 return -EIO; 153 return -EIO;
154 154
155 soc_ac97_ops.write(codec->ac97, reg, val); 155 soc_ac97_ops.write(codec->ac97, reg, val);
@@ -168,7 +168,7 @@ static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
168 stac9766_ac97_write(codec, AC97_INT_PAGING, 1); 168 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
169 return val; 169 return val;
170 } 170 }
171 if (reg / 2 > ARRAY_SIZE(stac9766_reg)) 171 if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
172 return -EIO; 172 return -EIO;
173 173
174 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS || 174 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index cb0d1bf34b57..3395cf945d56 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -53,6 +53,7 @@
53 53
54/* codec private data */ 54/* codec private data */
55struct aic3x_priv { 55struct aic3x_priv {
56 struct snd_soc_codec codec;
56 unsigned int sysclk; 57 unsigned int sysclk;
57 int master; 58 int master;
58}; 59};
@@ -145,8 +146,8 @@ static int aic3x_read(struct snd_soc_codec *codec, unsigned int reg,
145 u8 *value) 146 u8 *value)
146{ 147{
147 *value = reg & 0xff; 148 *value = reg & 0xff;
148 if (codec->hw_read(codec->control_data, value, 1) != 1) 149
149 return -EIO; 150 value[0] = i2c_smbus_read_byte_data(codec->control_data, value[0]);
150 151
151 aic3x_write_reg_cache(codec, reg, *value); 152 aic3x_write_reg_cache(codec, reg, *value);
152 return 0; 153 return 0;
@@ -1156,11 +1157,13 @@ static int aic3x_resume(struct platform_device *pdev)
1156 * initialise the AIC3X driver 1157 * initialise the AIC3X driver
1157 * register the mixer and dsp interfaces with the kernel 1158 * register the mixer and dsp interfaces with the kernel
1158 */ 1159 */
1159static int aic3x_init(struct snd_soc_device *socdev) 1160static int aic3x_init(struct snd_soc_codec *codec)
1160{ 1161{
1161 struct snd_soc_codec *codec = socdev->card->codec; 1162 int reg;
1162 struct aic3x_setup_data *setup = socdev->codec_data; 1163
1163 int reg, ret = 0; 1164 mutex_init(&codec->mutex);
1165 INIT_LIST_HEAD(&codec->dapm_widgets);
1166 INIT_LIST_HEAD(&codec->dapm_paths);
1164 1167
1165 codec->name = "tlv320aic3x"; 1168 codec->name = "tlv320aic3x";
1166 codec->owner = THIS_MODULE; 1169 codec->owner = THIS_MODULE;
@@ -1177,13 +1180,6 @@ static int aic3x_init(struct snd_soc_device *socdev)
1177 aic3x_write(codec, AIC3X_PAGE_SELECT, PAGE0_SELECT); 1180 aic3x_write(codec, AIC3X_PAGE_SELECT, PAGE0_SELECT);
1178 aic3x_write(codec, AIC3X_RESET, SOFT_RESET); 1181 aic3x_write(codec, AIC3X_RESET, SOFT_RESET);
1179 1182
1180 /* register pcms */
1181 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
1182 if (ret < 0) {
1183 printk(KERN_ERR "aic3x: failed to create pcms\n");
1184 goto pcm_err;
1185 }
1186
1187 /* DAC default volume and mute */ 1183 /* DAC default volume and mute */
1188 aic3x_write(codec, LDAC_VOL, DEFAULT_VOL | MUTE_ON); 1184 aic3x_write(codec, LDAC_VOL, DEFAULT_VOL | MUTE_ON);
1189 aic3x_write(codec, RDAC_VOL, DEFAULT_VOL | MUTE_ON); 1185 aic3x_write(codec, RDAC_VOL, DEFAULT_VOL | MUTE_ON);
@@ -1250,30 +1246,51 @@ static int aic3x_init(struct snd_soc_device *socdev)
1250 /* off, with power on */ 1246 /* off, with power on */
1251 aic3x_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1247 aic3x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1252 1248
1253 /* setup GPIO functions */ 1249 return 0;
1254 aic3x_write(codec, AIC3X_GPIO1_REG, (setup->gpio_func[0] & 0xf) << 4); 1250}
1255 aic3x_write(codec, AIC3X_GPIO2_REG, (setup->gpio_func[1] & 0xf) << 4);
1256 1251
1257 snd_soc_add_controls(codec, aic3x_snd_controls, 1252static struct snd_soc_codec *aic3x_codec;
1258 ARRAY_SIZE(aic3x_snd_controls)); 1253
1259 aic3x_add_widgets(codec); 1254static int aic3x_register(struct snd_soc_codec *codec)
1260 ret = snd_soc_init_card(socdev); 1255{
1256 int ret;
1257
1258 ret = aic3x_init(codec);
1261 if (ret < 0) { 1259 if (ret < 0) {
1262 printk(KERN_ERR "aic3x: failed to register card\n"); 1260 dev_err(codec->dev, "Failed to initialise device\n");
1263 goto card_err; 1261 return ret;
1264 } 1262 }
1265 1263
1266 return ret; 1264 aic3x_codec = codec;
1267 1265
1268card_err: 1266 ret = snd_soc_register_codec(codec);
1269 snd_soc_free_pcms(socdev); 1267 if (ret) {
1270 snd_soc_dapm_free(socdev); 1268 dev_err(codec->dev, "Failed to register codec\n");
1271pcm_err: 1269 return ret;
1272 kfree(codec->reg_cache); 1270 }
1273 return ret; 1271
1272 ret = snd_soc_register_dai(&aic3x_dai);
1273 if (ret) {
1274 dev_err(codec->dev, "Failed to register dai\n");
1275 snd_soc_unregister_codec(codec);
1276 return ret;
1277 }
1278
1279 return 0;
1274} 1280}
1275 1281
1276static struct snd_soc_device *aic3x_socdev; 1282static int aic3x_unregister(struct aic3x_priv *aic3x)
1283{
1284 aic3x_set_bias_level(&aic3x->codec, SND_SOC_BIAS_OFF);
1285
1286 snd_soc_unregister_dai(&aic3x_dai);
1287 snd_soc_unregister_codec(&aic3x->codec);
1288
1289 kfree(aic3x);
1290 aic3x_codec = NULL;
1291
1292 return 0;
1293}
1277 1294
1278#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 1295#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
1279/* 1296/*
@@ -1288,28 +1305,36 @@ static struct snd_soc_device *aic3x_socdev;
1288static int aic3x_i2c_probe(struct i2c_client *i2c, 1305static int aic3x_i2c_probe(struct i2c_client *i2c,
1289 const struct i2c_device_id *id) 1306 const struct i2c_device_id *id)
1290{ 1307{
1291 struct snd_soc_device *socdev = aic3x_socdev; 1308 struct snd_soc_codec *codec;
1292 struct snd_soc_codec *codec = socdev->card->codec; 1309 struct aic3x_priv *aic3x;
1293 int ret;
1294 1310
1295 i2c_set_clientdata(i2c, codec); 1311 aic3x = kzalloc(sizeof(struct aic3x_priv), GFP_KERNEL);
1312 if (aic3x == NULL) {
1313 dev_err(&i2c->dev, "failed to create private data\n");
1314 return -ENOMEM;
1315 }
1316
1317 codec = &aic3x->codec;
1318 codec->dev = &i2c->dev;
1319 codec->private_data = aic3x;
1296 codec->control_data = i2c; 1320 codec->control_data = i2c;
1321 codec->hw_write = (hw_write_t) i2c_master_send;
1297 1322
1298 ret = aic3x_init(socdev); 1323 i2c_set_clientdata(i2c, aic3x);
1299 if (ret < 0) 1324
1300 printk(KERN_ERR "aic3x: failed to initialise AIC3X\n"); 1325 return aic3x_register(codec);
1301 return ret;
1302} 1326}
1303 1327
1304static int aic3x_i2c_remove(struct i2c_client *client) 1328static int aic3x_i2c_remove(struct i2c_client *client)
1305{ 1329{
1306 struct snd_soc_codec *codec = i2c_get_clientdata(client); 1330 struct aic3x_priv *aic3x = i2c_get_clientdata(client);
1307 kfree(codec->reg_cache); 1331
1308 return 0; 1332 return aic3x_unregister(aic3x);
1309} 1333}
1310 1334
1311static const struct i2c_device_id aic3x_i2c_id[] = { 1335static const struct i2c_device_id aic3x_i2c_id[] = {
1312 { "tlv320aic3x", 0 }, 1336 { "tlv320aic3x", 0 },
1337 { "tlv320aic33", 0 },
1313 { } 1338 { }
1314}; 1339};
1315MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id); 1340MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
@@ -1320,56 +1345,28 @@ static struct i2c_driver aic3x_i2c_driver = {
1320 .name = "aic3x I2C Codec", 1345 .name = "aic3x I2C Codec",
1321 .owner = THIS_MODULE, 1346 .owner = THIS_MODULE,
1322 }, 1347 },
1323 .probe = aic3x_i2c_probe, 1348 .probe = aic3x_i2c_probe,
1324 .remove = aic3x_i2c_remove, 1349 .remove = aic3x_i2c_remove,
1325 .id_table = aic3x_i2c_id, 1350 .id_table = aic3x_i2c_id,
1326}; 1351};
1327 1352
1328static int aic3x_i2c_read(struct i2c_client *client, u8 *value, int len) 1353static inline void aic3x_i2c_init(void)
1329{ 1354{
1330 value[0] = i2c_smbus_read_byte_data(client, value[0]);
1331 return (len == 1);
1332}
1333
1334static int aic3x_add_i2c_device(struct platform_device *pdev,
1335 const struct aic3x_setup_data *setup)
1336{
1337 struct i2c_board_info info;
1338 struct i2c_adapter *adapter;
1339 struct i2c_client *client;
1340 int ret; 1355 int ret;
1341 1356
1342 ret = i2c_add_driver(&aic3x_i2c_driver); 1357 ret = i2c_add_driver(&aic3x_i2c_driver);
1343 if (ret != 0) { 1358 if (ret)
1344 dev_err(&pdev->dev, "can't add i2c driver\n"); 1359 printk(KERN_ERR "%s: error regsitering i2c driver, %d\n",
1345 return ret; 1360 __func__, ret);
1346 } 1361}
1347
1348 memset(&info, 0, sizeof(struct i2c_board_info));
1349 info.addr = setup->i2c_address;
1350 strlcpy(info.type, "tlv320aic3x", I2C_NAME_SIZE);
1351
1352 adapter = i2c_get_adapter(setup->i2c_bus);
1353 if (!adapter) {
1354 dev_err(&pdev->dev, "can't get i2c adapter %d\n",
1355 setup->i2c_bus);
1356 goto err_driver;
1357 }
1358
1359 client = i2c_new_device(adapter, &info);
1360 i2c_put_adapter(adapter);
1361 if (!client) {
1362 dev_err(&pdev->dev, "can't add i2c device at 0x%x\n",
1363 (unsigned int)info.addr);
1364 goto err_driver;
1365 }
1366
1367 return 0;
1368 1362
1369err_driver: 1363static inline void aic3x_i2c_exit(void)
1364{
1370 i2c_del_driver(&aic3x_i2c_driver); 1365 i2c_del_driver(&aic3x_i2c_driver);
1371 return -ENODEV;
1372} 1366}
1367#else
1368static inline void aic3x_i2c_init(void) { }
1369static inline void aic3x_i2c_exit(void) { }
1373#endif 1370#endif
1374 1371
1375static int aic3x_probe(struct platform_device *pdev) 1372static int aic3x_probe(struct platform_device *pdev)
@@ -1377,43 +1374,51 @@ static int aic3x_probe(struct platform_device *pdev)
1377 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 1374 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1378 struct aic3x_setup_data *setup; 1375 struct aic3x_setup_data *setup;
1379 struct snd_soc_codec *codec; 1376 struct snd_soc_codec *codec;
1380 struct aic3x_priv *aic3x;
1381 int ret = 0; 1377 int ret = 0;
1382 1378
1383 printk(KERN_INFO "AIC3X Audio Codec %s\n", AIC3X_VERSION); 1379 codec = aic3x_codec;
1380 if (!codec) {
1381 dev_err(&pdev->dev, "Codec not registered\n");
1382 return -ENODEV;
1383 }
1384 1384
1385 socdev->card->codec = codec;
1385 setup = socdev->codec_data; 1386 setup = socdev->codec_data;
1386 codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
1387 if (codec == NULL)
1388 return -ENOMEM;
1389 1387
1390 aic3x = kzalloc(sizeof(struct aic3x_priv), GFP_KERNEL); 1388 if (setup) {
1391 if (aic3x == NULL) { 1389 /* setup GPIO functions */
1392 kfree(codec); 1390 aic3x_write(codec, AIC3X_GPIO1_REG,
1393 return -ENOMEM; 1391 (setup->gpio_func[0] & 0xf) << 4);
1392 aic3x_write(codec, AIC3X_GPIO2_REG,
1393 (setup->gpio_func[1] & 0xf) << 4);
1394 } 1394 }
1395 1395
1396 codec->private_data = aic3x; 1396 /* register pcms */
1397 socdev->card->codec = codec; 1397 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
1398 mutex_init(&codec->mutex); 1398 if (ret < 0) {
1399 INIT_LIST_HEAD(&codec->dapm_widgets); 1399 printk(KERN_ERR "aic3x: failed to create pcms\n");
1400 INIT_LIST_HEAD(&codec->dapm_paths); 1400 goto pcm_err;
1401
1402 aic3x_socdev = socdev;
1403#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
1404 if (setup->i2c_address) {
1405 codec->hw_write = (hw_write_t) i2c_master_send;
1406 codec->hw_read = (hw_read_t) aic3x_i2c_read;
1407 ret = aic3x_add_i2c_device(pdev, setup);
1408 } 1401 }
1409#else
1410 /* Add other interfaces here */
1411#endif
1412 1402
1413 if (ret != 0) { 1403 snd_soc_add_controls(codec, aic3x_snd_controls,
1414 kfree(codec->private_data); 1404 ARRAY_SIZE(aic3x_snd_controls));
1415 kfree(codec); 1405
1406 aic3x_add_widgets(codec);
1407
1408 ret = snd_soc_init_card(socdev);
1409 if (ret < 0) {
1410 printk(KERN_ERR "aic3x: failed to register card\n");
1411 goto card_err;
1416 } 1412 }
1413
1414 return ret;
1415
1416card_err:
1417 snd_soc_free_pcms(socdev);
1418 snd_soc_dapm_free(socdev);
1419
1420pcm_err:
1421 kfree(codec->reg_cache);
1417 return ret; 1422 return ret;
1418} 1423}
1419 1424
@@ -1428,12 +1433,8 @@ static int aic3x_remove(struct platform_device *pdev)
1428 1433
1429 snd_soc_free_pcms(socdev); 1434 snd_soc_free_pcms(socdev);
1430 snd_soc_dapm_free(socdev); 1435 snd_soc_dapm_free(socdev);
1431#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 1436
1432 i2c_unregister_device(codec->control_data); 1437 kfree(codec->reg_cache);
1433 i2c_del_driver(&aic3x_i2c_driver);
1434#endif
1435 kfree(codec->private_data);
1436 kfree(codec);
1437 1438
1438 return 0; 1439 return 0;
1439} 1440}
@@ -1448,13 +1449,15 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_aic3x);
1448 1449
1449static int __init aic3x_modinit(void) 1450static int __init aic3x_modinit(void)
1450{ 1451{
1451 return snd_soc_register_dai(&aic3x_dai); 1452 aic3x_i2c_init();
1453
1454 return 0;
1452} 1455}
1453module_init(aic3x_modinit); 1456module_init(aic3x_modinit);
1454 1457
1455static void __exit aic3x_exit(void) 1458static void __exit aic3x_exit(void)
1456{ 1459{
1457 snd_soc_unregister_dai(&aic3x_dai); 1460 aic3x_i2c_exit();
1458} 1461}
1459module_exit(aic3x_exit); 1462module_exit(aic3x_exit);
1460 1463
diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
index ac827e578c4d..9af1c886213c 100644
--- a/sound/soc/codecs/tlv320aic3x.h
+++ b/sound/soc/codecs/tlv320aic3x.h
@@ -282,8 +282,6 @@ int aic3x_headset_detected(struct snd_soc_codec *codec);
282int aic3x_button_pressed(struct snd_soc_codec *codec); 282int aic3x_button_pressed(struct snd_soc_codec *codec);
283 283
284struct aic3x_setup_data { 284struct aic3x_setup_data {
285 int i2c_bus;
286 unsigned short i2c_address;
287 unsigned int gpio_func[2]; 285 unsigned int gpio_func[2];
288}; 286};
289 287
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 4dbb853eef5a..4df7c6c61c76 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -225,55 +225,11 @@ static void twl4030_codec_mute(struct snd_soc_codec *codec, int mute)
225 return; 225 return;
226 226
227 if (mute) { 227 if (mute) {
228 /* Bypass the reg_cache and mute the volumes
229 * Headset mute is done in it's own event handler
230 * Things to mute: Earpiece, PreDrivL/R, CarkitL/R
231 */
232 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_EAR_CTL);
233 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
234 reg_val & (~TWL4030_EAR_GAIN),
235 TWL4030_REG_EAR_CTL);
236
237 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PREDL_CTL);
238 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
239 reg_val & (~TWL4030_PREDL_GAIN),
240 TWL4030_REG_PREDL_CTL);
241 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PREDR_CTL);
242 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
243 reg_val & (~TWL4030_PREDR_GAIN),
244 TWL4030_REG_PREDL_CTL);
245
246 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKL_CTL);
247 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
248 reg_val & (~TWL4030_PRECKL_GAIN),
249 TWL4030_REG_PRECKL_CTL);
250 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL);
251 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
252 reg_val & (~TWL4030_PRECKR_GAIN),
253 TWL4030_REG_PRECKR_CTL);
254
255 /* Disable PLL */ 228 /* Disable PLL */
256 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL); 229 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL);
257 reg_val &= ~TWL4030_APLL_EN; 230 reg_val &= ~TWL4030_APLL_EN;
258 twl4030_write(codec, TWL4030_REG_APLL_CTL, reg_val); 231 twl4030_write(codec, TWL4030_REG_APLL_CTL, reg_val);
259 } else { 232 } else {
260 /* Restore the volumes
261 * Headset mute is done in it's own event handler
262 * Things to restore: Earpiece, PreDrivL/R, CarkitL/R
263 */
264 twl4030_write(codec, TWL4030_REG_EAR_CTL,
265 twl4030_read_reg_cache(codec, TWL4030_REG_EAR_CTL));
266
267 twl4030_write(codec, TWL4030_REG_PREDL_CTL,
268 twl4030_read_reg_cache(codec, TWL4030_REG_PREDL_CTL));
269 twl4030_write(codec, TWL4030_REG_PREDR_CTL,
270 twl4030_read_reg_cache(codec, TWL4030_REG_PREDR_CTL));
271
272 twl4030_write(codec, TWL4030_REG_PRECKL_CTL,
273 twl4030_read_reg_cache(codec, TWL4030_REG_PRECKL_CTL));
274 twl4030_write(codec, TWL4030_REG_PRECKR_CTL,
275 twl4030_read_reg_cache(codec, TWL4030_REG_PRECKR_CTL));
276
277 /* Enable PLL */ 233 /* Enable PLL */
278 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL); 234 reg_val = twl4030_read_reg_cache(codec, TWL4030_REG_APLL_CTL);
279 reg_val |= TWL4030_APLL_EN; 235 reg_val |= TWL4030_APLL_EN;
@@ -443,16 +399,20 @@ SOC_DAPM_ENUM("Route", twl4030_vibrapath_enum);
443 399
444/* Left analog microphone selection */ 400/* Left analog microphone selection */
445static const struct snd_kcontrol_new twl4030_dapm_analoglmic_controls[] = { 401static const struct snd_kcontrol_new twl4030_dapm_analoglmic_controls[] = {
446 SOC_DAPM_SINGLE("Main mic", TWL4030_REG_ANAMICL, 0, 1, 0), 402 SOC_DAPM_SINGLE("Main Mic Capture Switch",
447 SOC_DAPM_SINGLE("Headset mic", TWL4030_REG_ANAMICL, 1, 1, 0), 403 TWL4030_REG_ANAMICL, 0, 1, 0),
448 SOC_DAPM_SINGLE("AUXL", TWL4030_REG_ANAMICL, 2, 1, 0), 404 SOC_DAPM_SINGLE("Headset Mic Capture Switch",
449 SOC_DAPM_SINGLE("Carkit mic", TWL4030_REG_ANAMICL, 3, 1, 0), 405 TWL4030_REG_ANAMICL, 1, 1, 0),
406 SOC_DAPM_SINGLE("AUXL Capture Switch",
407 TWL4030_REG_ANAMICL, 2, 1, 0),
408 SOC_DAPM_SINGLE("Carkit Mic Capture Switch",
409 TWL4030_REG_ANAMICL, 3, 1, 0),
450}; 410};
451 411
452/* Right analog microphone selection */ 412/* Right analog microphone selection */
453static const struct snd_kcontrol_new twl4030_dapm_analogrmic_controls[] = { 413static const struct snd_kcontrol_new twl4030_dapm_analogrmic_controls[] = {
454 SOC_DAPM_SINGLE("Sub mic", TWL4030_REG_ANAMICR, 0, 1, 0), 414 SOC_DAPM_SINGLE("Sub Mic Capture Switch", TWL4030_REG_ANAMICR, 0, 1, 0),
455 SOC_DAPM_SINGLE("AUXR", TWL4030_REG_ANAMICR, 2, 1, 0), 415 SOC_DAPM_SINGLE("AUXR Capture Switch", TWL4030_REG_ANAMICR, 2, 1, 0),
456}; 416};
457 417
458/* TX1 L/R Analog/Digital microphone selection */ 418/* TX1 L/R Analog/Digital microphone selection */
@@ -560,6 +520,41 @@ static int micpath_event(struct snd_soc_dapm_widget *w,
560 return 0; 520 return 0;
561} 521}
562 522
523/*
524 * Output PGA builder:
525 * Handle the muting and unmuting of the given output (turning off the
526 * amplifier associated with the output pin)
527 * On mute bypass the reg_cache and mute the volume
528 * On unmute: restore the register content
529 * Outputs handled in this way: Earpiece, PreDrivL/R, CarkitL/R
530 */
531#define TWL4030_OUTPUT_PGA(pin_name, reg, mask) \
532static int pin_name##pga_event(struct snd_soc_dapm_widget *w, \
533 struct snd_kcontrol *kcontrol, int event) \
534{ \
535 u8 reg_val; \
536 \
537 switch (event) { \
538 case SND_SOC_DAPM_POST_PMU: \
539 twl4030_write(w->codec, reg, \
540 twl4030_read_reg_cache(w->codec, reg)); \
541 break; \
542 case SND_SOC_DAPM_POST_PMD: \
543 reg_val = twl4030_read_reg_cache(w->codec, reg); \
544 twl4030_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, \
545 reg_val & (~mask), \
546 reg); \
547 break; \
548 } \
549 return 0; \
550}
551
552TWL4030_OUTPUT_PGA(earpiece, TWL4030_REG_EAR_CTL, TWL4030_EAR_GAIN);
553TWL4030_OUTPUT_PGA(predrivel, TWL4030_REG_PREDL_CTL, TWL4030_PREDL_GAIN);
554TWL4030_OUTPUT_PGA(predriver, TWL4030_REG_PREDR_CTL, TWL4030_PREDR_GAIN);
555TWL4030_OUTPUT_PGA(carkitl, TWL4030_REG_PRECKL_CTL, TWL4030_PRECKL_GAIN);
556TWL4030_OUTPUT_PGA(carkitr, TWL4030_REG_PRECKR_CTL, TWL4030_PRECKR_GAIN);
557
563static void handsfree_ramp(struct snd_soc_codec *codec, int reg, int ramp) 558static void handsfree_ramp(struct snd_soc_codec *codec, int reg, int ramp)
564{ 559{
565 unsigned char hs_ctl; 560 unsigned char hs_ctl;
@@ -620,6 +615,9 @@ static int handsfreerpga_event(struct snd_soc_dapm_widget *w,
620 615
621static void headset_ramp(struct snd_soc_codec *codec, int ramp) 616static void headset_ramp(struct snd_soc_codec *codec, int ramp)
622{ 617{
618 struct snd_soc_device *socdev = codec->socdev;
619 struct twl4030_setup_data *setup = socdev->codec_data;
620
623 unsigned char hs_gain, hs_pop; 621 unsigned char hs_gain, hs_pop;
624 struct twl4030_priv *twl4030 = codec->private_data; 622 struct twl4030_priv *twl4030 = codec->private_data;
625 /* Base values for ramp delay calculation: 2^19 - 2^26 */ 623 /* Base values for ramp delay calculation: 2^19 - 2^26 */
@@ -629,6 +627,17 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
629 hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET); 627 hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
630 hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET); 628 hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
631 629
630 /* Enable external mute control, this dramatically reduces
631 * the pop-noise */
632 if (setup && setup->hs_extmute) {
633 if (setup->set_hs_extmute) {
634 setup->set_hs_extmute(1);
635 } else {
636 hs_pop |= TWL4030_EXTMUTE;
637 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
638 }
639 }
640
632 if (ramp) { 641 if (ramp) {
633 /* Headset ramp-up according to the TRM */ 642 /* Headset ramp-up according to the TRM */
634 hs_pop |= TWL4030_VMID_EN; 643 hs_pop |= TWL4030_VMID_EN;
@@ -636,6 +645,9 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
636 twl4030_write(codec, TWL4030_REG_HS_GAIN_SET, hs_gain); 645 twl4030_write(codec, TWL4030_REG_HS_GAIN_SET, hs_gain);
637 hs_pop |= TWL4030_RAMP_EN; 646 hs_pop |= TWL4030_RAMP_EN;
638 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop); 647 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
648 /* Wait ramp delay time + 1, so the VMID can settle */
649 mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
650 twl4030->sysclk) + 1);
639 } else { 651 } else {
640 /* Headset ramp-down _not_ according to 652 /* Headset ramp-down _not_ according to
641 * the TRM, but in a way that it is working */ 653 * the TRM, but in a way that it is working */
@@ -652,6 +664,16 @@ static void headset_ramp(struct snd_soc_codec *codec, int ramp)
652 hs_pop &= ~TWL4030_VMID_EN; 664 hs_pop &= ~TWL4030_VMID_EN;
653 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop); 665 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
654 } 666 }
667
668 /* Disable external mute */
669 if (setup && setup->hs_extmute) {
670 if (setup->set_hs_extmute) {
671 setup->set_hs_extmute(0);
672 } else {
673 hs_pop &= ~TWL4030_EXTMUTE;
674 twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
675 }
676 }
655} 677}
656 678
657static int headsetlpga_event(struct snd_soc_dapm_widget *w, 679static int headsetlpga_event(struct snd_soc_dapm_widget *w,
@@ -712,7 +734,19 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
712 734
713 reg = twl4030_read_reg_cache(w->codec, m->reg); 735 reg = twl4030_read_reg_cache(w->codec, m->reg);
714 736
715 if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) { 737 /*
738 * bypass_state[0:3] - analog HiFi bypass
739 * bypass_state[4] - analog voice bypass
740 * bypass_state[5] - digital voice bypass
741 * bypass_state[6:7] - digital HiFi bypass
742 */
743 if (m->reg == TWL4030_REG_VSTPGA) {
744 /* Voice digital bypass */
745 if (reg)
746 twl4030->bypass_state |= (1 << 5);
747 else
748 twl4030->bypass_state &= ~(1 << 5);
749 } else if (m->reg <= TWL4030_REG_ARXR2_APGA_CTL) {
716 /* Analog bypass */ 750 /* Analog bypass */
717 if (reg & (1 << m->shift)) 751 if (reg & (1 << m->shift))
718 twl4030->bypass_state |= 752 twl4030->bypass_state |=
@@ -726,12 +760,6 @@ static int bypass_event(struct snd_soc_dapm_widget *w,
726 twl4030->bypass_state |= (1 << 4); 760 twl4030->bypass_state |= (1 << 4);
727 else 761 else
728 twl4030->bypass_state &= ~(1 << 4); 762 twl4030->bypass_state &= ~(1 << 4);
729 } else if (m->reg == TWL4030_REG_VSTPGA) {
730 /* Voice digital bypass */
731 if (reg)
732 twl4030->bypass_state |= (1 << 5);
733 else
734 twl4030->bypass_state &= ~(1 << 5);
735 } else { 763 } else {
736 /* Digital bypass */ 764 /* Digital bypass */
737 if (reg & (0x7 << m->shift)) 765 if (reg & (0x7 << m->shift))
@@ -924,7 +952,7 @@ static const struct soc_enum twl4030_op_modes_enum =
924 ARRAY_SIZE(twl4030_op_modes_texts), 952 ARRAY_SIZE(twl4030_op_modes_texts),
925 twl4030_op_modes_texts); 953 twl4030_op_modes_texts);
926 954
927int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol, 955static int snd_soc_put_twl4030_opmode_enum_double(struct snd_kcontrol *kcontrol,
928 struct snd_ctl_elem_value *ucontrol) 956 struct snd_ctl_elem_value *ucontrol)
929{ 957{
930 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 958 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
@@ -1005,6 +1033,16 @@ static DECLARE_TLV_DB_SCALE(digital_capture_tlv, 0, 100, 0);
1005 */ 1033 */
1006static DECLARE_TLV_DB_SCALE(input_gain_tlv, 0, 600, 0); 1034static DECLARE_TLV_DB_SCALE(input_gain_tlv, 0, 600, 0);
1007 1035
1036/* AVADC clock priority */
1037static const char *twl4030_avadc_clk_priority_texts[] = {
1038 "Voice high priority", "HiFi high priority"
1039};
1040
1041static const struct soc_enum twl4030_avadc_clk_priority_enum =
1042 SOC_ENUM_SINGLE(TWL4030_REG_AVADC_CTL, 2,
1043 ARRAY_SIZE(twl4030_avadc_clk_priority_texts),
1044 twl4030_avadc_clk_priority_texts);
1045
1008static const char *twl4030_rampdelay_texts[] = { 1046static const char *twl4030_rampdelay_texts[] = {
1009 "27/20/14 ms", "55/40/27 ms", "109/81/55 ms", "218/161/109 ms", 1047 "27/20/14 ms", "55/40/27 ms", "109/81/55 ms", "218/161/109 ms",
1010 "437/323/218 ms", "874/645/437 ms", "1748/1291/874 ms", 1048 "437/323/218 ms", "874/645/437 ms", "1748/1291/874 ms",
@@ -1106,6 +1144,8 @@ static const struct snd_kcontrol_new twl4030_snd_controls[] = {
1106 SOC_DOUBLE_TLV("Analog Capture Volume", TWL4030_REG_ANAMIC_GAIN, 1144 SOC_DOUBLE_TLV("Analog Capture Volume", TWL4030_REG_ANAMIC_GAIN,
1107 0, 3, 5, 0, input_gain_tlv), 1145 0, 3, 5, 0, input_gain_tlv),
1108 1146
1147 SOC_ENUM("AVADC Clock Priority", twl4030_avadc_clk_priority_enum),
1148
1109 SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum), 1149 SOC_ENUM("HS ramp delay", twl4030_rampdelay_enum),
1110 1150
1111 SOC_ENUM("Vibra H-bridge mode", twl4030_vibradirmode_enum), 1151 SOC_ENUM("Vibra H-bridge mode", twl4030_vibradirmode_enum),
@@ -1208,13 +1248,22 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
1208 SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0, 1248 SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
1209 &twl4030_dapm_earpiece_controls[0], 1249 &twl4030_dapm_earpiece_controls[0],
1210 ARRAY_SIZE(twl4030_dapm_earpiece_controls)), 1250 ARRAY_SIZE(twl4030_dapm_earpiece_controls)),
1251 SND_SOC_DAPM_PGA_E("Earpiece PGA", SND_SOC_NOPM,
1252 0, 0, NULL, 0, earpiecepga_event,
1253 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1211 /* PreDrivL/R */ 1254 /* PreDrivL/R */
1212 SND_SOC_DAPM_MIXER("PredriveL Mixer", SND_SOC_NOPM, 0, 0, 1255 SND_SOC_DAPM_MIXER("PredriveL Mixer", SND_SOC_NOPM, 0, 0,
1213 &twl4030_dapm_predrivel_controls[0], 1256 &twl4030_dapm_predrivel_controls[0],
1214 ARRAY_SIZE(twl4030_dapm_predrivel_controls)), 1257 ARRAY_SIZE(twl4030_dapm_predrivel_controls)),
1258 SND_SOC_DAPM_PGA_E("PredriveL PGA", SND_SOC_NOPM,
1259 0, 0, NULL, 0, predrivelpga_event,
1260 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1215 SND_SOC_DAPM_MIXER("PredriveR Mixer", SND_SOC_NOPM, 0, 0, 1261 SND_SOC_DAPM_MIXER("PredriveR Mixer", SND_SOC_NOPM, 0, 0,
1216 &twl4030_dapm_predriver_controls[0], 1262 &twl4030_dapm_predriver_controls[0],
1217 ARRAY_SIZE(twl4030_dapm_predriver_controls)), 1263 ARRAY_SIZE(twl4030_dapm_predriver_controls)),
1264 SND_SOC_DAPM_PGA_E("PredriveR PGA", SND_SOC_NOPM,
1265 0, 0, NULL, 0, predriverpga_event,
1266 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1218 /* HeadsetL/R */ 1267 /* HeadsetL/R */
1219 SND_SOC_DAPM_MIXER("HeadsetL Mixer", SND_SOC_NOPM, 0, 0, 1268 SND_SOC_DAPM_MIXER("HeadsetL Mixer", SND_SOC_NOPM, 0, 0,
1220 &twl4030_dapm_hsol_controls[0], 1269 &twl4030_dapm_hsol_controls[0],
@@ -1232,22 +1281,28 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
1232 SND_SOC_DAPM_MIXER("CarkitL Mixer", SND_SOC_NOPM, 0, 0, 1281 SND_SOC_DAPM_MIXER("CarkitL Mixer", SND_SOC_NOPM, 0, 0,
1233 &twl4030_dapm_carkitl_controls[0], 1282 &twl4030_dapm_carkitl_controls[0],
1234 ARRAY_SIZE(twl4030_dapm_carkitl_controls)), 1283 ARRAY_SIZE(twl4030_dapm_carkitl_controls)),
1284 SND_SOC_DAPM_PGA_E("CarkitL PGA", SND_SOC_NOPM,
1285 0, 0, NULL, 0, carkitlpga_event,
1286 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1235 SND_SOC_DAPM_MIXER("CarkitR Mixer", SND_SOC_NOPM, 0, 0, 1287 SND_SOC_DAPM_MIXER("CarkitR Mixer", SND_SOC_NOPM, 0, 0,
1236 &twl4030_dapm_carkitr_controls[0], 1288 &twl4030_dapm_carkitr_controls[0],
1237 ARRAY_SIZE(twl4030_dapm_carkitr_controls)), 1289 ARRAY_SIZE(twl4030_dapm_carkitr_controls)),
1290 SND_SOC_DAPM_PGA_E("CarkitR PGA", SND_SOC_NOPM,
1291 0, 0, NULL, 0, carkitrpga_event,
1292 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1238 1293
1239 /* Output MUX controls */ 1294 /* Output MUX controls */
1240 /* HandsfreeL/R */ 1295 /* HandsfreeL/R */
1241 SND_SOC_DAPM_MUX("HandsfreeL Mux", SND_SOC_NOPM, 0, 0, 1296 SND_SOC_DAPM_MUX("HandsfreeL Mux", SND_SOC_NOPM, 0, 0,
1242 &twl4030_dapm_handsfreel_control), 1297 &twl4030_dapm_handsfreel_control),
1243 SND_SOC_DAPM_SWITCH("HandsfreeL Switch", SND_SOC_NOPM, 0, 0, 1298 SND_SOC_DAPM_SWITCH("HandsfreeL", SND_SOC_NOPM, 0, 0,
1244 &twl4030_dapm_handsfreelmute_control), 1299 &twl4030_dapm_handsfreelmute_control),
1245 SND_SOC_DAPM_PGA_E("HandsfreeL PGA", SND_SOC_NOPM, 1300 SND_SOC_DAPM_PGA_E("HandsfreeL PGA", SND_SOC_NOPM,
1246 0, 0, NULL, 0, handsfreelpga_event, 1301 0, 0, NULL, 0, handsfreelpga_event,
1247 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD), 1302 SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
1248 SND_SOC_DAPM_MUX("HandsfreeR Mux", SND_SOC_NOPM, 5, 0, 1303 SND_SOC_DAPM_MUX("HandsfreeR Mux", SND_SOC_NOPM, 5, 0,
1249 &twl4030_dapm_handsfreer_control), 1304 &twl4030_dapm_handsfreer_control),
1250 SND_SOC_DAPM_SWITCH("HandsfreeR Switch", SND_SOC_NOPM, 0, 0, 1305 SND_SOC_DAPM_SWITCH("HandsfreeR", SND_SOC_NOPM, 0, 0,
1251 &twl4030_dapm_handsfreermute_control), 1306 &twl4030_dapm_handsfreermute_control),
1252 SND_SOC_DAPM_PGA_E("HandsfreeR PGA", SND_SOC_NOPM, 1307 SND_SOC_DAPM_PGA_E("HandsfreeR PGA", SND_SOC_NOPM,
1253 0, 0, NULL, 0, handsfreerpga_event, 1308 0, 0, NULL, 0, handsfreerpga_event,
@@ -1282,11 +1337,11 @@ static const struct snd_soc_dapm_widget twl4030_dapm_widgets[] = {
1282 SND_SOC_DAPM_POST_REG), 1337 SND_SOC_DAPM_POST_REG),
1283 1338
1284 /* Analog input mixers for the capture amplifiers */ 1339 /* Analog input mixers for the capture amplifiers */
1285 SND_SOC_DAPM_MIXER("Analog Left Capture Route", 1340 SND_SOC_DAPM_MIXER("Analog Left",
1286 TWL4030_REG_ANAMICL, 4, 0, 1341 TWL4030_REG_ANAMICL, 4, 0,
1287 &twl4030_dapm_analoglmic_controls[0], 1342 &twl4030_dapm_analoglmic_controls[0],
1288 ARRAY_SIZE(twl4030_dapm_analoglmic_controls)), 1343 ARRAY_SIZE(twl4030_dapm_analoglmic_controls)),
1289 SND_SOC_DAPM_MIXER("Analog Right Capture Route", 1344 SND_SOC_DAPM_MIXER("Analog Right",
1290 TWL4030_REG_ANAMICR, 4, 0, 1345 TWL4030_REG_ANAMICR, 4, 0,
1291 &twl4030_dapm_analogrmic_controls[0], 1346 &twl4030_dapm_analogrmic_controls[0],
1292 ARRAY_SIZE(twl4030_dapm_analogrmic_controls)), 1347 ARRAY_SIZE(twl4030_dapm_analogrmic_controls)),
@@ -1326,16 +1381,19 @@ static const struct snd_soc_dapm_route intercon[] = {
1326 {"Earpiece Mixer", "AudioL1", "Analog L1 Playback Mixer"}, 1381 {"Earpiece Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1327 {"Earpiece Mixer", "AudioL2", "Analog L2 Playback Mixer"}, 1382 {"Earpiece Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1328 {"Earpiece Mixer", "AudioR1", "Analog R1 Playback Mixer"}, 1383 {"Earpiece Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1384 {"Earpiece PGA", NULL, "Earpiece Mixer"},
1329 /* PreDrivL */ 1385 /* PreDrivL */
1330 {"PredriveL Mixer", "Voice", "Analog Voice Playback Mixer"}, 1386 {"PredriveL Mixer", "Voice", "Analog Voice Playback Mixer"},
1331 {"PredriveL Mixer", "AudioL1", "Analog L1 Playback Mixer"}, 1387 {"PredriveL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1332 {"PredriveL Mixer", "AudioL2", "Analog L2 Playback Mixer"}, 1388 {"PredriveL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1333 {"PredriveL Mixer", "AudioR2", "Analog R2 Playback Mixer"}, 1389 {"PredriveL Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1390 {"PredriveL PGA", NULL, "PredriveL Mixer"},
1334 /* PreDrivR */ 1391 /* PreDrivR */
1335 {"PredriveR Mixer", "Voice", "Analog Voice Playback Mixer"}, 1392 {"PredriveR Mixer", "Voice", "Analog Voice Playback Mixer"},
1336 {"PredriveR Mixer", "AudioR1", "Analog R1 Playback Mixer"}, 1393 {"PredriveR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1337 {"PredriveR Mixer", "AudioR2", "Analog R2 Playback Mixer"}, 1394 {"PredriveR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1338 {"PredriveR Mixer", "AudioL2", "Analog L2 Playback Mixer"}, 1395 {"PredriveR Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1396 {"PredriveR PGA", NULL, "PredriveR Mixer"},
1339 /* HeadsetL */ 1397 /* HeadsetL */
1340 {"HeadsetL Mixer", "Voice", "Analog Voice Playback Mixer"}, 1398 {"HeadsetL Mixer", "Voice", "Analog Voice Playback Mixer"},
1341 {"HeadsetL Mixer", "AudioL1", "Analog L1 Playback Mixer"}, 1399 {"HeadsetL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
@@ -1350,24 +1408,26 @@ static const struct snd_soc_dapm_route intercon[] = {
1350 {"CarkitL Mixer", "Voice", "Analog Voice Playback Mixer"}, 1408 {"CarkitL Mixer", "Voice", "Analog Voice Playback Mixer"},
1351 {"CarkitL Mixer", "AudioL1", "Analog L1 Playback Mixer"}, 1409 {"CarkitL Mixer", "AudioL1", "Analog L1 Playback Mixer"},
1352 {"CarkitL Mixer", "AudioL2", "Analog L2 Playback Mixer"}, 1410 {"CarkitL Mixer", "AudioL2", "Analog L2 Playback Mixer"},
1411 {"CarkitL PGA", NULL, "CarkitL Mixer"},
1353 /* CarkitR */ 1412 /* CarkitR */
1354 {"CarkitR Mixer", "Voice", "Analog Voice Playback Mixer"}, 1413 {"CarkitR Mixer", "Voice", "Analog Voice Playback Mixer"},
1355 {"CarkitR Mixer", "AudioR1", "Analog R1 Playback Mixer"}, 1414 {"CarkitR Mixer", "AudioR1", "Analog R1 Playback Mixer"},
1356 {"CarkitR Mixer", "AudioR2", "Analog R2 Playback Mixer"}, 1415 {"CarkitR Mixer", "AudioR2", "Analog R2 Playback Mixer"},
1416 {"CarkitR PGA", NULL, "CarkitR Mixer"},
1357 /* HandsfreeL */ 1417 /* HandsfreeL */
1358 {"HandsfreeL Mux", "Voice", "Analog Voice Playback Mixer"}, 1418 {"HandsfreeL Mux", "Voice", "Analog Voice Playback Mixer"},
1359 {"HandsfreeL Mux", "AudioL1", "Analog L1 Playback Mixer"}, 1419 {"HandsfreeL Mux", "AudioL1", "Analog L1 Playback Mixer"},
1360 {"HandsfreeL Mux", "AudioL2", "Analog L2 Playback Mixer"}, 1420 {"HandsfreeL Mux", "AudioL2", "Analog L2 Playback Mixer"},
1361 {"HandsfreeL Mux", "AudioR2", "Analog R2 Playback Mixer"}, 1421 {"HandsfreeL Mux", "AudioR2", "Analog R2 Playback Mixer"},
1362 {"HandsfreeL Switch", "Switch", "HandsfreeL Mux"}, 1422 {"HandsfreeL", "Switch", "HandsfreeL Mux"},
1363 {"HandsfreeL PGA", NULL, "HandsfreeL Switch"}, 1423 {"HandsfreeL PGA", NULL, "HandsfreeL"},
1364 /* HandsfreeR */ 1424 /* HandsfreeR */
1365 {"HandsfreeR Mux", "Voice", "Analog Voice Playback Mixer"}, 1425 {"HandsfreeR Mux", "Voice", "Analog Voice Playback Mixer"},
1366 {"HandsfreeR Mux", "AudioR1", "Analog R1 Playback Mixer"}, 1426 {"HandsfreeR Mux", "AudioR1", "Analog R1 Playback Mixer"},
1367 {"HandsfreeR Mux", "AudioR2", "Analog R2 Playback Mixer"}, 1427 {"HandsfreeR Mux", "AudioR2", "Analog R2 Playback Mixer"},
1368 {"HandsfreeR Mux", "AudioL2", "Analog L2 Playback Mixer"}, 1428 {"HandsfreeR Mux", "AudioL2", "Analog L2 Playback Mixer"},
1369 {"HandsfreeR Switch", "Switch", "HandsfreeR Mux"}, 1429 {"HandsfreeR", "Switch", "HandsfreeR Mux"},
1370 {"HandsfreeR PGA", NULL, "HandsfreeR Switch"}, 1430 {"HandsfreeR PGA", NULL, "HandsfreeR"},
1371 /* Vibra */ 1431 /* Vibra */
1372 {"Vibra Mux", "AudioL1", "DAC Left1"}, 1432 {"Vibra Mux", "AudioL1", "DAC Left1"},
1373 {"Vibra Mux", "AudioR1", "DAC Right1"}, 1433 {"Vibra Mux", "AudioR1", "DAC Right1"},
@@ -1377,29 +1437,29 @@ static const struct snd_soc_dapm_route intercon[] = {
1377 /* outputs */ 1437 /* outputs */
1378 {"OUTL", NULL, "Analog L2 Playback Mixer"}, 1438 {"OUTL", NULL, "Analog L2 Playback Mixer"},
1379 {"OUTR", NULL, "Analog R2 Playback Mixer"}, 1439 {"OUTR", NULL, "Analog R2 Playback Mixer"},
1380 {"EARPIECE", NULL, "Earpiece Mixer"}, 1440 {"EARPIECE", NULL, "Earpiece PGA"},
1381 {"PREDRIVEL", NULL, "PredriveL Mixer"}, 1441 {"PREDRIVEL", NULL, "PredriveL PGA"},
1382 {"PREDRIVER", NULL, "PredriveR Mixer"}, 1442 {"PREDRIVER", NULL, "PredriveR PGA"},
1383 {"HSOL", NULL, "HeadsetL PGA"}, 1443 {"HSOL", NULL, "HeadsetL PGA"},
1384 {"HSOR", NULL, "HeadsetR PGA"}, 1444 {"HSOR", NULL, "HeadsetR PGA"},
1385 {"CARKITL", NULL, "CarkitL Mixer"}, 1445 {"CARKITL", NULL, "CarkitL PGA"},
1386 {"CARKITR", NULL, "CarkitR Mixer"}, 1446 {"CARKITR", NULL, "CarkitR PGA"},
1387 {"HFL", NULL, "HandsfreeL PGA"}, 1447 {"HFL", NULL, "HandsfreeL PGA"},
1388 {"HFR", NULL, "HandsfreeR PGA"}, 1448 {"HFR", NULL, "HandsfreeR PGA"},
1389 {"Vibra Route", "Audio", "Vibra Mux"}, 1449 {"Vibra Route", "Audio", "Vibra Mux"},
1390 {"VIBRA", NULL, "Vibra Route"}, 1450 {"VIBRA", NULL, "Vibra Route"},
1391 1451
1392 /* Capture path */ 1452 /* Capture path */
1393 {"Analog Left Capture Route", "Main mic", "MAINMIC"}, 1453 {"Analog Left", "Main Mic Capture Switch", "MAINMIC"},
1394 {"Analog Left Capture Route", "Headset mic", "HSMIC"}, 1454 {"Analog Left", "Headset Mic Capture Switch", "HSMIC"},
1395 {"Analog Left Capture Route", "AUXL", "AUXL"}, 1455 {"Analog Left", "AUXL Capture Switch", "AUXL"},
1396 {"Analog Left Capture Route", "Carkit mic", "CARKITMIC"}, 1456 {"Analog Left", "Carkit Mic Capture Switch", "CARKITMIC"},
1397 1457
1398 {"Analog Right Capture Route", "Sub mic", "SUBMIC"}, 1458 {"Analog Right", "Sub Mic Capture Switch", "SUBMIC"},
1399 {"Analog Right Capture Route", "AUXR", "AUXR"}, 1459 {"Analog Right", "AUXR Capture Switch", "AUXR"},
1400 1460
1401 {"ADC Physical Left", NULL, "Analog Left Capture Route"}, 1461 {"ADC Physical Left", NULL, "Analog Left"},
1402 {"ADC Physical Right", NULL, "Analog Right Capture Route"}, 1462 {"ADC Physical Right", NULL, "Analog Right"},
1403 1463
1404 {"Digimic0 Enable", NULL, "DIGIMIC0"}, 1464 {"Digimic0 Enable", NULL, "DIGIMIC0"},
1405 {"Digimic1 Enable", NULL, "DIGIMIC1"}, 1465 {"Digimic1 Enable", NULL, "DIGIMIC1"},
@@ -1423,11 +1483,11 @@ static const struct snd_soc_dapm_route intercon[] = {
1423 {"ADC Virtual Right2", NULL, "TX2 Capture Route"}, 1483 {"ADC Virtual Right2", NULL, "TX2 Capture Route"},
1424 1484
1425 /* Analog bypass routes */ 1485 /* Analog bypass routes */
1426 {"Right1 Analog Loopback", "Switch", "Analog Right Capture Route"}, 1486 {"Right1 Analog Loopback", "Switch", "Analog Right"},
1427 {"Left1 Analog Loopback", "Switch", "Analog Left Capture Route"}, 1487 {"Left1 Analog Loopback", "Switch", "Analog Left"},
1428 {"Right2 Analog Loopback", "Switch", "Analog Right Capture Route"}, 1488 {"Right2 Analog Loopback", "Switch", "Analog Right"},
1429 {"Left2 Analog Loopback", "Switch", "Analog Left Capture Route"}, 1489 {"Left2 Analog Loopback", "Switch", "Analog Left"},
1430 {"Voice Analog Loopback", "Switch", "Analog Left Capture Route"}, 1490 {"Voice Analog Loopback", "Switch", "Analog Left"},
1431 1491
1432 {"Analog R1 Playback Mixer", NULL, "Right1 Analog Loopback"}, 1492 {"Analog R1 Playback Mixer", NULL, "Right1 Analog Loopback"},
1433 {"Analog L1 Playback Mixer", NULL, "Left1 Analog Loopback"}, 1493 {"Analog L1 Playback Mixer", NULL, "Left1 Analog Loopback"},
@@ -1609,8 +1669,6 @@ static int twl4030_hw_params(struct snd_pcm_substream *substream,
1609 1669
1610 /* If the substream has 4 channel, do the necessary setup */ 1670 /* If the substream has 4 channel, do the necessary setup */
1611 if (params_channels(params) == 4) { 1671 if (params_channels(params) == 4) {
1612 u8 format, mode;
1613
1614 format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF); 1672 format = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
1615 mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE); 1673 mode = twl4030_read_reg_cache(codec, TWL4030_REG_CODEC_MODE);
1616 1674
@@ -1806,6 +1864,19 @@ static int twl4030_set_dai_fmt(struct snd_soc_dai *codec_dai,
1806 return 0; 1864 return 0;
1807} 1865}
1808 1866
1867static int twl4030_set_tristate(struct snd_soc_dai *dai, int tristate)
1868{
1869 struct snd_soc_codec *codec = dai->codec;
1870 u8 reg = twl4030_read_reg_cache(codec, TWL4030_REG_AUDIO_IF);
1871
1872 if (tristate)
1873 reg |= TWL4030_AIF_TRI_EN;
1874 else
1875 reg &= ~TWL4030_AIF_TRI_EN;
1876
1877 return twl4030_write(codec, TWL4030_REG_AUDIO_IF, reg);
1878}
1879
1809/* In case of voice mode, the RX1 L(VRX) for downlink and the TX2 L/R 1880/* In case of voice mode, the RX1 L(VRX) for downlink and the TX2 L/R
1810 * (VTXL, VTXR) for uplink has to be enabled/disabled. */ 1881 * (VTXL, VTXR) for uplink has to be enabled/disabled. */
1811static void twl4030_voice_enable(struct snd_soc_codec *codec, int direction, 1882static void twl4030_voice_enable(struct snd_soc_codec *codec, int direction,
@@ -1948,7 +2019,7 @@ static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
1948 2019
1949 /* set master/slave audio interface */ 2020 /* set master/slave audio interface */
1950 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 2021 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
1951 case SND_SOC_DAIFMT_CBS_CFM: 2022 case SND_SOC_DAIFMT_CBM_CFM:
1952 format &= ~(TWL4030_VIF_SLAVE_EN); 2023 format &= ~(TWL4030_VIF_SLAVE_EN);
1953 break; 2024 break;
1954 case SND_SOC_DAIFMT_CBS_CFS: 2025 case SND_SOC_DAIFMT_CBS_CFS:
@@ -1980,6 +2051,19 @@ static int twl4030_voice_set_dai_fmt(struct snd_soc_dai *codec_dai,
1980 return 0; 2051 return 0;
1981} 2052}
1982 2053
2054static int twl4030_voice_set_tristate(struct snd_soc_dai *dai, int tristate)
2055{
2056 struct snd_soc_codec *codec = dai->codec;
2057 u8 reg = twl4030_read_reg_cache(codec, TWL4030_REG_VOICE_IF);
2058
2059 if (tristate)
2060 reg |= TWL4030_VIF_TRI_EN;
2061 else
2062 reg &= ~TWL4030_VIF_TRI_EN;
2063
2064 return twl4030_write(codec, TWL4030_REG_VOICE_IF, reg);
2065}
2066
1983#define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000) 2067#define TWL4030_RATES (SNDRV_PCM_RATE_8000_48000)
1984#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE) 2068#define TWL4030_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
1985 2069
@@ -1989,6 +2073,7 @@ static struct snd_soc_dai_ops twl4030_dai_ops = {
1989 .hw_params = twl4030_hw_params, 2073 .hw_params = twl4030_hw_params,
1990 .set_sysclk = twl4030_set_dai_sysclk, 2074 .set_sysclk = twl4030_set_dai_sysclk,
1991 .set_fmt = twl4030_set_dai_fmt, 2075 .set_fmt = twl4030_set_dai_fmt,
2076 .set_tristate = twl4030_set_tristate,
1992}; 2077};
1993 2078
1994static struct snd_soc_dai_ops twl4030_dai_voice_ops = { 2079static struct snd_soc_dai_ops twl4030_dai_voice_ops = {
@@ -1997,6 +2082,7 @@ static struct snd_soc_dai_ops twl4030_dai_voice_ops = {
1997 .hw_params = twl4030_voice_hw_params, 2082 .hw_params = twl4030_voice_hw_params,
1998 .set_sysclk = twl4030_voice_set_dai_sysclk, 2083 .set_sysclk = twl4030_voice_set_dai_sysclk,
1999 .set_fmt = twl4030_voice_set_dai_fmt, 2084 .set_fmt = twl4030_voice_set_dai_fmt,
2085 .set_tristate = twl4030_voice_set_tristate,
2000}; 2086};
2001 2087
2002struct snd_soc_dai twl4030_dai[] = { 2088struct snd_soc_dai twl4030_dai[] = {
diff --git a/sound/soc/codecs/twl4030.h b/sound/soc/codecs/twl4030.h
index fe5f395d9e4f..2b4bfa23f985 100644
--- a/sound/soc/codecs/twl4030.h
+++ b/sound/soc/codecs/twl4030.h
@@ -274,6 +274,8 @@ extern struct snd_soc_codec_device soc_codec_dev_twl4030;
274struct twl4030_setup_data { 274struct twl4030_setup_data {
275 unsigned int ramp_delay_value; 275 unsigned int ramp_delay_value;
276 unsigned int sysclk; 276 unsigned int sysclk;
277 unsigned int hs_extmute:1;
278 void (*set_hs_extmute)(int mute);
277}; 279};
278 280
279#endif /* End of __TWL4030_AUDIO_H__ */ 281#endif /* End of __TWL4030_AUDIO_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 269b108e1de6..c33b92edbded 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -163,7 +163,7 @@ static int uda134x_mute(struct snd_soc_dai *dai, int mute)
163 else 163 else
164 mute_reg &= ~(1<<2); 164 mute_reg &= ~(1<<2);
165 165
166 uda134x_write(codec, UDA134X_DATA010, mute_reg & ~(1<<2)); 166 uda134x_write(codec, UDA134X_DATA010, mute_reg);
167 167
168 return 0; 168 return 0;
169} 169}
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 5b21594e0e58..92ec03442154 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -5,9 +5,7 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 * 7 *
8 * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com> 8 * Copyright (c) 2007-2009 Philipp Zabel <philipp.zabel@gmail.com>
9 * Improved support for DAPM and audio routing/mixing capabilities,
10 * added TLV support.
11 * 9 *
12 * Modified by Richard Purdie <richard@openedhand.com> to fit into SoC 10 * Modified by Richard Purdie <richard@openedhand.com> to fit into SoC
13 * codec model. 11 * codec model.
@@ -19,26 +17,32 @@
19#include <linux/module.h> 17#include <linux/module.h>
20#include <linux/init.h> 18#include <linux/init.h>
21#include <linux/types.h> 19#include <linux/types.h>
22#include <linux/string.h>
23#include <linux/slab.h> 20#include <linux/slab.h>
24#include <linux/errno.h> 21#include <linux/errno.h>
25#include <linux/ioctl.h> 22#include <linux/gpio.h>
26#include <linux/delay.h> 23#include <linux/delay.h>
27#include <linux/i2c.h> 24#include <linux/i2c.h>
28#include <linux/workqueue.h> 25#include <linux/workqueue.h>
29#include <sound/core.h> 26#include <sound/core.h>
30#include <sound/control.h> 27#include <sound/control.h>
31#include <sound/initval.h> 28#include <sound/initval.h>
32#include <sound/info.h>
33#include <sound/soc.h> 29#include <sound/soc.h>
34#include <sound/soc-dapm.h> 30#include <sound/soc-dapm.h>
35#include <sound/tlv.h> 31#include <sound/tlv.h>
32#include <sound/uda1380.h>
36 33
37#include "uda1380.h" 34#include "uda1380.h"
38 35
39static struct work_struct uda1380_work;
40static struct snd_soc_codec *uda1380_codec; 36static struct snd_soc_codec *uda1380_codec;
41 37
38/* codec private data */
39struct uda1380_priv {
40 struct snd_soc_codec codec;
41 u16 reg_cache[UDA1380_CACHEREGNUM];
42 unsigned int dac_clk;
43 struct work_struct work;
44};
45
42/* 46/*
43 * uda1380 register cache 47 * uda1380 register cache
44 */ 48 */
@@ -473,6 +477,7 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
473 struct snd_soc_pcm_runtime *rtd = substream->private_data; 477 struct snd_soc_pcm_runtime *rtd = substream->private_data;
474 struct snd_soc_device *socdev = rtd->socdev; 478 struct snd_soc_device *socdev = rtd->socdev;
475 struct snd_soc_codec *codec = socdev->card->codec; 479 struct snd_soc_codec *codec = socdev->card->codec;
480 struct uda1380_priv *uda1380 = codec->private_data;
476 int mixer = uda1380_read_reg_cache(codec, UDA1380_MIXER); 481 int mixer = uda1380_read_reg_cache(codec, UDA1380_MIXER);
477 482
478 switch (cmd) { 483 switch (cmd) {
@@ -480,13 +485,13 @@ static int uda1380_trigger(struct snd_pcm_substream *substream, int cmd,
480 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 485 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
481 uda1380_write_reg_cache(codec, UDA1380_MIXER, 486 uda1380_write_reg_cache(codec, UDA1380_MIXER,
482 mixer & ~R14_SILENCE); 487 mixer & ~R14_SILENCE);
483 schedule_work(&uda1380_work); 488 schedule_work(&uda1380->work);
484 break; 489 break;
485 case SNDRV_PCM_TRIGGER_STOP: 490 case SNDRV_PCM_TRIGGER_STOP:
486 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 491 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
487 uda1380_write_reg_cache(codec, UDA1380_MIXER, 492 uda1380_write_reg_cache(codec, UDA1380_MIXER,
488 mixer | R14_SILENCE); 493 mixer | R14_SILENCE);
489 schedule_work(&uda1380_work); 494 schedule_work(&uda1380->work);
490 break; 495 break;
491 } 496 }
492 return 0; 497 return 0;
@@ -670,44 +675,33 @@ static int uda1380_resume(struct platform_device *pdev)
670 return 0; 675 return 0;
671} 676}
672 677
673/* 678static int uda1380_probe(struct platform_device *pdev)
674 * initialise the UDA1380 driver
675 * register mixer and dsp interfaces with the kernel
676 */
677static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
678{ 679{
679 struct snd_soc_codec *codec = socdev->card->codec; 680 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
681 struct snd_soc_codec *codec;
682 struct uda1380_platform_data *pdata;
680 int ret = 0; 683 int ret = 0;
681 684
682 codec->name = "UDA1380"; 685 if (uda1380_codec == NULL) {
683 codec->owner = THIS_MODULE; 686 dev_err(&pdev->dev, "Codec device not registered\n");
684 codec->read = uda1380_read_reg_cache; 687 return -ENODEV;
685 codec->write = uda1380_write; 688 }
686 codec->set_bias_level = uda1380_set_bias_level;
687 codec->dai = uda1380_dai;
688 codec->num_dai = ARRAY_SIZE(uda1380_dai);
689 codec->reg_cache = kmemdup(uda1380_reg, sizeof(uda1380_reg),
690 GFP_KERNEL);
691 if (codec->reg_cache == NULL)
692 return -ENOMEM;
693 codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
694 codec->reg_cache_step = 1;
695 uda1380_reset(codec);
696 689
697 uda1380_codec = codec; 690 socdev->card->codec = uda1380_codec;
698 INIT_WORK(&uda1380_work, uda1380_flush_work); 691 codec = uda1380_codec;
692 pdata = codec->dev->platform_data;
699 693
700 /* register pcms */ 694 /* register pcms */
701 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 695 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
702 if (ret < 0) { 696 if (ret < 0) {
703 pr_err("uda1380: failed to create pcms\n"); 697 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
704 goto pcm_err; 698 goto pcm_err;
705 } 699 }
706 700
707 /* power on device */ 701 /* power on device */
708 uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 702 uda1380_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
709 /* set clock input */ 703 /* set clock input */
710 switch (dac_clk) { 704 switch (pdata->dac_clk) {
711 case UDA1380_DAC_CLK_SYSCLK: 705 case UDA1380_DAC_CLK_SYSCLK:
712 uda1380_write(codec, UDA1380_CLK, 0); 706 uda1380_write(codec, UDA1380_CLK, 0);
713 break; 707 break;
@@ -716,13 +710,12 @@ static int uda1380_init(struct snd_soc_device *socdev, int dac_clk)
716 break; 710 break;
717 } 711 }
718 712
719 /* uda1380 init */
720 snd_soc_add_controls(codec, uda1380_snd_controls, 713 snd_soc_add_controls(codec, uda1380_snd_controls,
721 ARRAY_SIZE(uda1380_snd_controls)); 714 ARRAY_SIZE(uda1380_snd_controls));
722 uda1380_add_widgets(codec); 715 uda1380_add_widgets(codec);
723 ret = snd_soc_init_card(socdev); 716 ret = snd_soc_init_card(socdev);
724 if (ret < 0) { 717 if (ret < 0) {
725 pr_err("uda1380: failed to register card\n"); 718 dev_err(codec->dev, "failed to register card: %d\n", ret);
726 goto card_err; 719 goto card_err;
727 } 720 }
728 721
@@ -732,165 +725,201 @@ card_err:
732 snd_soc_free_pcms(socdev); 725 snd_soc_free_pcms(socdev);
733 snd_soc_dapm_free(socdev); 726 snd_soc_dapm_free(socdev);
734pcm_err: 727pcm_err:
735 kfree(codec->reg_cache);
736 return ret; 728 return ret;
737} 729}
738 730
739static struct snd_soc_device *uda1380_socdev; 731/* power down chip */
740 732static int uda1380_remove(struct platform_device *pdev)
741#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
742
743static int uda1380_i2c_probe(struct i2c_client *i2c,
744 const struct i2c_device_id *id)
745{ 733{
746 struct snd_soc_device *socdev = uda1380_socdev; 734 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
747 struct uda1380_setup_data *setup = socdev->codec_data;
748 struct snd_soc_codec *codec = socdev->card->codec; 735 struct snd_soc_codec *codec = socdev->card->codec;
749 int ret;
750
751 i2c_set_clientdata(i2c, codec);
752 codec->control_data = i2c;
753 736
754 ret = uda1380_init(socdev, setup->dac_clk); 737 if (codec->control_data)
755 if (ret < 0) 738 uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
756 pr_err("uda1380: failed to initialise UDA1380\n");
757 739
758 return ret; 740 snd_soc_free_pcms(socdev);
759} 741 snd_soc_dapm_free(socdev);
760 742
761static int uda1380_i2c_remove(struct i2c_client *client)
762{
763 struct snd_soc_codec *codec = i2c_get_clientdata(client);
764 kfree(codec->reg_cache);
765 return 0; 743 return 0;
766} 744}
767 745
768static const struct i2c_device_id uda1380_i2c_id[] = { 746struct snd_soc_codec_device soc_codec_dev_uda1380 = {
769 { "uda1380", 0 }, 747 .probe = uda1380_probe,
770 { } 748 .remove = uda1380_remove,
771}; 749 .suspend = uda1380_suspend,
772MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id); 750 .resume = uda1380_resume,
773
774static struct i2c_driver uda1380_i2c_driver = {
775 .driver = {
776 .name = "UDA1380 I2C Codec",
777 .owner = THIS_MODULE,
778 },
779 .probe = uda1380_i2c_probe,
780 .remove = uda1380_i2c_remove,
781 .id_table = uda1380_i2c_id,
782}; 751};
752EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380);
783 753
784static int uda1380_add_i2c_device(struct platform_device *pdev, 754static int uda1380_register(struct uda1380_priv *uda1380)
785 const struct uda1380_setup_data *setup)
786{ 755{
787 struct i2c_board_info info; 756 int ret, i;
788 struct i2c_adapter *adapter; 757 struct snd_soc_codec *codec = &uda1380->codec;
789 struct i2c_client *client; 758 struct uda1380_platform_data *pdata = codec->dev->platform_data;
790 int ret;
791 759
792 ret = i2c_add_driver(&uda1380_i2c_driver); 760 if (uda1380_codec) {
793 if (ret != 0) { 761 dev_err(codec->dev, "Another UDA1380 is registered\n");
794 dev_err(&pdev->dev, "can't add i2c driver\n"); 762 return -EINVAL;
795 return ret; 763 }
764
765 if (!pdata || !pdata->gpio_power || !pdata->gpio_reset)
766 return -EINVAL;
767
768 ret = gpio_request(pdata->gpio_power, "uda1380 power");
769 if (ret)
770 goto err_out;
771 ret = gpio_request(pdata->gpio_reset, "uda1380 reset");
772 if (ret)
773 goto err_gpio;
774
775 gpio_direction_output(pdata->gpio_power, 1);
776
777 /* we may need to have the clock running here - pH5 */
778 gpio_direction_output(pdata->gpio_reset, 1);
779 udelay(5);
780 gpio_set_value(pdata->gpio_reset, 0);
781
782 mutex_init(&codec->mutex);
783 INIT_LIST_HEAD(&codec->dapm_widgets);
784 INIT_LIST_HEAD(&codec->dapm_paths);
785
786 codec->private_data = uda1380;
787 codec->name = "UDA1380";
788 codec->owner = THIS_MODULE;
789 codec->read = uda1380_read_reg_cache;
790 codec->write = uda1380_write;
791 codec->bias_level = SND_SOC_BIAS_OFF;
792 codec->set_bias_level = uda1380_set_bias_level;
793 codec->dai = uda1380_dai;
794 codec->num_dai = ARRAY_SIZE(uda1380_dai);
795 codec->reg_cache_size = ARRAY_SIZE(uda1380_reg);
796 codec->reg_cache = &uda1380->reg_cache;
797 codec->reg_cache_step = 1;
798
799 memcpy(codec->reg_cache, uda1380_reg, sizeof(uda1380_reg));
800
801 ret = uda1380_reset(codec);
802 if (ret < 0) {
803 dev_err(codec->dev, "Failed to issue reset\n");
804 goto err_reset;
796 } 805 }
797 806
798 memset(&info, 0, sizeof(struct i2c_board_info)); 807 INIT_WORK(&uda1380->work, uda1380_flush_work);
799 info.addr = setup->i2c_address; 808
800 strlcpy(info.type, "uda1380", I2C_NAME_SIZE); 809 for (i = 0; i < ARRAY_SIZE(uda1380_dai); i++)
810 uda1380_dai[i].dev = codec->dev;
801 811
802 adapter = i2c_get_adapter(setup->i2c_bus); 812 uda1380_codec = codec;
803 if (!adapter) { 813
804 dev_err(&pdev->dev, "can't get i2c adapter %d\n", 814 ret = snd_soc_register_codec(codec);
805 setup->i2c_bus); 815 if (ret != 0) {
806 goto err_driver; 816 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
817 goto err_reset;
807 } 818 }
808 819
809 client = i2c_new_device(adapter, &info); 820 ret = snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
810 i2c_put_adapter(adapter); 821 if (ret != 0) {
811 if (!client) { 822 dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
812 dev_err(&pdev->dev, "can't add i2c device at 0x%x\n", 823 goto err_dai;
813 (unsigned int)info.addr);
814 goto err_driver;
815 } 824 }
816 825
817 return 0; 826 return 0;
818 827
819err_driver: 828err_dai:
820 i2c_del_driver(&uda1380_i2c_driver); 829 snd_soc_unregister_codec(codec);
821 return -ENODEV; 830err_reset:
831 gpio_set_value(pdata->gpio_power, 0);
832 gpio_free(pdata->gpio_reset);
833err_gpio:
834 gpio_free(pdata->gpio_power);
835err_out:
836 return ret;
822} 837}
823#endif
824 838
825static int uda1380_probe(struct platform_device *pdev) 839static void uda1380_unregister(struct uda1380_priv *uda1380)
826{ 840{
827 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 841 struct snd_soc_codec *codec = &uda1380->codec;
828 struct uda1380_setup_data *setup; 842 struct uda1380_platform_data *pdata = codec->dev->platform_data;
843
844 snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai));
845 snd_soc_unregister_codec(&uda1380->codec);
846
847 gpio_set_value(pdata->gpio_power, 0);
848 gpio_free(pdata->gpio_reset);
849 gpio_free(pdata->gpio_power);
850
851 kfree(uda1380);
852 uda1380_codec = NULL;
853}
854
855#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
856static __devinit int uda1380_i2c_probe(struct i2c_client *i2c,
857 const struct i2c_device_id *id)
858{
859 struct uda1380_priv *uda1380;
829 struct snd_soc_codec *codec; 860 struct snd_soc_codec *codec;
830 int ret; 861 int ret;
831 862
832 setup = socdev->codec_data; 863 uda1380 = kzalloc(sizeof(struct uda1380_priv), GFP_KERNEL);
833 codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL); 864 if (uda1380 == NULL)
834 if (codec == NULL)
835 return -ENOMEM; 865 return -ENOMEM;
836 866
837 socdev->card->codec = codec; 867 codec = &uda1380->codec;
838 mutex_init(&codec->mutex); 868 codec->hw_write = (hw_write_t)i2c_master_send;
839 INIT_LIST_HEAD(&codec->dapm_widgets);
840 INIT_LIST_HEAD(&codec->dapm_paths);
841 869
842 uda1380_socdev = socdev; 870 i2c_set_clientdata(i2c, uda1380);
843 ret = -ENODEV; 871 codec->control_data = i2c;
844 872
845#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 873 codec->dev = &i2c->dev;
846 if (setup->i2c_address) {
847 codec->hw_write = (hw_write_t)i2c_master_send;
848 ret = uda1380_add_i2c_device(pdev, setup);
849 }
850#endif
851 874
875 ret = uda1380_register(uda1380);
852 if (ret != 0) 876 if (ret != 0)
853 kfree(codec); 877 kfree(uda1380);
878
854 return ret; 879 return ret;
855} 880}
856 881
857/* power down chip */ 882static int __devexit uda1380_i2c_remove(struct i2c_client *i2c)
858static int uda1380_remove(struct platform_device *pdev)
859{ 883{
860 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 884 struct uda1380_priv *uda1380 = i2c_get_clientdata(i2c);
861 struct snd_soc_codec *codec = socdev->card->codec; 885 uda1380_unregister(uda1380);
862
863 if (codec->control_data)
864 uda1380_set_bias_level(codec, SND_SOC_BIAS_OFF);
865
866 snd_soc_free_pcms(socdev);
867 snd_soc_dapm_free(socdev);
868#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
869 i2c_unregister_device(codec->control_data);
870 i2c_del_driver(&uda1380_i2c_driver);
871#endif
872 kfree(codec);
873
874 return 0; 886 return 0;
875} 887}
876 888
877struct snd_soc_codec_device soc_codec_dev_uda1380 = { 889static const struct i2c_device_id uda1380_i2c_id[] = {
878 .probe = uda1380_probe, 890 { "uda1380", 0 },
879 .remove = uda1380_remove, 891 { }
880 .suspend = uda1380_suspend,
881 .resume = uda1380_resume,
882}; 892};
883EXPORT_SYMBOL_GPL(soc_codec_dev_uda1380); 893MODULE_DEVICE_TABLE(i2c, uda1380_i2c_id);
894
895static struct i2c_driver uda1380_i2c_driver = {
896 .driver = {
897 .name = "UDA1380 I2C Codec",
898 .owner = THIS_MODULE,
899 },
900 .probe = uda1380_i2c_probe,
901 .remove = __devexit_p(uda1380_i2c_remove),
902 .id_table = uda1380_i2c_id,
903};
904#endif
884 905
885static int __init uda1380_modinit(void) 906static int __init uda1380_modinit(void)
886{ 907{
887 return snd_soc_register_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai)); 908 int ret;
909#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
910 ret = i2c_add_driver(&uda1380_i2c_driver);
911 if (ret != 0)
912 pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
913#endif
914 return 0;
888} 915}
889module_init(uda1380_modinit); 916module_init(uda1380_modinit);
890 917
891static void __exit uda1380_exit(void) 918static void __exit uda1380_exit(void)
892{ 919{
893 snd_soc_unregister_dais(uda1380_dai, ARRAY_SIZE(uda1380_dai)); 920#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
921 i2c_del_driver(&uda1380_i2c_driver);
922#endif
894} 923}
895module_exit(uda1380_exit); 924module_exit(uda1380_exit);
896 925
diff --git a/sound/soc/codecs/uda1380.h b/sound/soc/codecs/uda1380.h
index c55c17a52a12..9cefa8a54770 100644
--- a/sound/soc/codecs/uda1380.h
+++ b/sound/soc/codecs/uda1380.h
@@ -72,14 +72,6 @@
72#define R22_SKIP_DCFIL 0x0002 72#define R22_SKIP_DCFIL 0x0002
73#define R23_AGC_EN 0x0001 73#define R23_AGC_EN 0x0001
74 74
75struct uda1380_setup_data {
76 int i2c_bus;
77 unsigned short i2c_address;
78 int dac_clk;
79#define UDA1380_DAC_CLK_SYSCLK 0
80#define UDA1380_DAC_CLK_WSPLL 1
81};
82
83#define UDA1380_DAI_DUPLEX 0 /* playback and capture on single DAI */ 75#define UDA1380_DAI_DUPLEX 0 /* playback and capture on single DAI */
84#define UDA1380_DAI_PLAYBACK 1 /* playback DAI */ 76#define UDA1380_DAI_PLAYBACK 1 /* playback DAI */
85#define UDA1380_DAI_CAPTURE 2 /* capture DAI */ 77#define UDA1380_DAI_CAPTURE 2 /* capture DAI */
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index e7348d341b76..3ff0373dff89 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -63,6 +63,8 @@ struct wm8350_data {
63 struct wm8350_jack_data hpl; 63 struct wm8350_jack_data hpl;
64 struct wm8350_jack_data hpr; 64 struct wm8350_jack_data hpr;
65 struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)]; 65 struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
66 int fll_freq_out;
67 int fll_freq_in;
66}; 68};
67 69
68static unsigned int wm8350_codec_cache_read(struct snd_soc_codec *codec, 70static unsigned int wm8350_codec_cache_read(struct snd_soc_codec *codec,
@@ -406,7 +408,6 @@ static const char *wm8350_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" };
406static const char *wm8350_pol[] = { "Normal", "Inv R", "Inv L", "Inv L & R" }; 408static const char *wm8350_pol[] = { "Normal", "Inv R", "Inv L", "Inv L & R" };
407static const char *wm8350_dacmutem[] = { "Normal", "Soft" }; 409static const char *wm8350_dacmutem[] = { "Normal", "Soft" };
408static const char *wm8350_dacmutes[] = { "Fast", "Slow" }; 410static const char *wm8350_dacmutes[] = { "Fast", "Slow" };
409static const char *wm8350_dacfilter[] = { "Normal", "Sloping" };
410static const char *wm8350_adcfilter[] = { "None", "High Pass" }; 411static const char *wm8350_adcfilter[] = { "None", "High Pass" };
411static const char *wm8350_adchp[] = { "44.1kHz", "8kHz", "16kHz", "32kHz" }; 412static const char *wm8350_adchp[] = { "44.1kHz", "8kHz", "16kHz", "32kHz" };
412static const char *wm8350_lr[] = { "Left", "Right" }; 413static const char *wm8350_lr[] = { "Left", "Right" };
@@ -416,7 +417,6 @@ static const struct soc_enum wm8350_enum[] = {
416 SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 0, 4, wm8350_pol), 417 SOC_ENUM_SINGLE(WM8350_DAC_CONTROL, 0, 4, wm8350_pol),
417 SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 14, 2, wm8350_dacmutem), 418 SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 14, 2, wm8350_dacmutem),
418 SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 13, 2, wm8350_dacmutes), 419 SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 13, 2, wm8350_dacmutes),
419 SOC_ENUM_SINGLE(WM8350_DAC_MUTE_VOLUME, 12, 2, wm8350_dacfilter),
420 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 15, 2, wm8350_adcfilter), 420 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 15, 2, wm8350_adcfilter),
421 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 8, 4, wm8350_adchp), 421 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 8, 4, wm8350_adchp),
422 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 0, 4, wm8350_pol), 422 SOC_ENUM_SINGLE(WM8350_ADC_CONTROL, 0, 4, wm8350_pol),
@@ -444,10 +444,9 @@ static const struct snd_kcontrol_new wm8350_snd_controls[] = {
444 0, 255, 0, dac_pcm_tlv), 444 0, 255, 0, dac_pcm_tlv),
445 SOC_ENUM("Playback PCM Mute Function", wm8350_enum[2]), 445 SOC_ENUM("Playback PCM Mute Function", wm8350_enum[2]),
446 SOC_ENUM("Playback PCM Mute Speed", wm8350_enum[3]), 446 SOC_ENUM("Playback PCM Mute Speed", wm8350_enum[3]),
447 SOC_ENUM("Playback PCM Filter", wm8350_enum[4]), 447 SOC_ENUM("Capture PCM Filter", wm8350_enum[4]),
448 SOC_ENUM("Capture PCM Filter", wm8350_enum[5]), 448 SOC_ENUM("Capture PCM HP Filter", wm8350_enum[5]),
449 SOC_ENUM("Capture PCM HP Filter", wm8350_enum[6]), 449 SOC_ENUM("Capture ADC Inversion", wm8350_enum[6]),
450 SOC_ENUM("Capture ADC Inversion", wm8350_enum[7]),
451 SOC_WM8350_DOUBLE_R_TLV("Capture PCM Volume", 450 SOC_WM8350_DOUBLE_R_TLV("Capture PCM Volume",
452 WM8350_ADC_DIGITAL_VOLUME_L, 451 WM8350_ADC_DIGITAL_VOLUME_L,
453 WM8350_ADC_DIGITAL_VOLUME_R, 452 WM8350_ADC_DIGITAL_VOLUME_R,
@@ -613,7 +612,7 @@ SOC_DAPM_SINGLE("Switch", WM8350_BEEP_VOLUME, 15, 1, 1);
613 612
614/* Out4 Capture Mux */ 613/* Out4 Capture Mux */
615static const struct snd_kcontrol_new wm8350_out4_capture_controls = 614static const struct snd_kcontrol_new wm8350_out4_capture_controls =
616SOC_DAPM_ENUM("Route", wm8350_enum[8]); 615SOC_DAPM_ENUM("Route", wm8350_enum[7]);
617 616
618static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = { 617static const struct snd_soc_dapm_widget wm8350_dapm_widgets[] = {
619 618
@@ -993,6 +992,7 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
993 struct snd_soc_dai *codec_dai) 992 struct snd_soc_dai *codec_dai)
994{ 993{
995 struct snd_soc_codec *codec = codec_dai->codec; 994 struct snd_soc_codec *codec = codec_dai->codec;
995 struct wm8350 *wm8350 = codec->control_data;
996 u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) & 996 u16 iface = wm8350_codec_read(codec, WM8350_AI_FORMATING) &
997 ~WM8350_AIF_WL_MASK; 997 ~WM8350_AIF_WL_MASK;
998 998
@@ -1012,6 +1012,19 @@ static int wm8350_pcm_hw_params(struct snd_pcm_substream *substream,
1012 } 1012 }
1013 1013
1014 wm8350_codec_write(codec, WM8350_AI_FORMATING, iface); 1014 wm8350_codec_write(codec, WM8350_AI_FORMATING, iface);
1015
1016 /* The sloping stopband filter is recommended for use with
1017 * lower sample rates to improve performance.
1018 */
1019 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
1020 if (params_rate(params) < 24000)
1021 wm8350_set_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
1022 WM8350_DAC_SB_FILT);
1023 else
1024 wm8350_clear_bits(wm8350, WM8350_DAC_MUTE_VOLUME,
1025 WM8350_DAC_SB_FILT);
1026 }
1027
1015 return 0; 1028 return 0;
1016} 1029}
1017 1030
@@ -1093,10 +1106,14 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
1093{ 1106{
1094 struct snd_soc_codec *codec = codec_dai->codec; 1107 struct snd_soc_codec *codec = codec_dai->codec;
1095 struct wm8350 *wm8350 = codec->control_data; 1108 struct wm8350 *wm8350 = codec->control_data;
1109 struct wm8350_data *priv = codec->private_data;
1096 struct _fll_div fll_div; 1110 struct _fll_div fll_div;
1097 int ret = 0; 1111 int ret = 0;
1098 u16 fll_1, fll_4; 1112 u16 fll_1, fll_4;
1099 1113
1114 if (freq_in == priv->fll_freq_in && freq_out == priv->fll_freq_out)
1115 return 0;
1116
1100 /* power down FLL - we need to do this for reconfiguration */ 1117 /* power down FLL - we need to do this for reconfiguration */
1101 wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, 1118 wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4,
1102 WM8350_FLL_ENA | WM8350_FLL_OSC_ENA); 1119 WM8350_FLL_ENA | WM8350_FLL_OSC_ENA);
@@ -1131,6 +1148,9 @@ static int wm8350_set_fll(struct snd_soc_dai *codec_dai,
1131 wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_OSC_ENA); 1148 wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_OSC_ENA);
1132 wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_ENA); 1149 wm8350_set_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_FLL_ENA);
1133 1150
1151 priv->fll_freq_out = freq_out;
1152 priv->fll_freq_in = freq_in;
1153
1134 return 0; 1154 return 0;
1135} 1155}
1136 1156
@@ -1660,6 +1680,21 @@ static int __devexit wm8350_codec_remove(struct platform_device *pdev)
1660 return 0; 1680 return 0;
1661} 1681}
1662 1682
1683#ifdef CONFIG_PM
1684static int wm8350_codec_suspend(struct platform_device *pdev, pm_message_t m)
1685{
1686 return snd_soc_suspend_device(&pdev->dev);
1687}
1688
1689static int wm8350_codec_resume(struct platform_device *pdev)
1690{
1691 return snd_soc_resume_device(&pdev->dev);
1692}
1693#else
1694#define wm8350_codec_suspend NULL
1695#define wm8350_codec_resume NULL
1696#endif
1697
1663static struct platform_driver wm8350_codec_driver = { 1698static struct platform_driver wm8350_codec_driver = {
1664 .driver = { 1699 .driver = {
1665 .name = "wm8350-codec", 1700 .name = "wm8350-codec",
@@ -1667,6 +1702,8 @@ static struct platform_driver wm8350_codec_driver = {
1667 }, 1702 },
1668 .probe = wm8350_codec_probe, 1703 .probe = wm8350_codec_probe,
1669 .remove = __devexit_p(wm8350_codec_remove), 1704 .remove = __devexit_p(wm8350_codec_remove),
1705 .suspend = wm8350_codec_suspend,
1706 .resume = wm8350_codec_resume,
1670}; 1707};
1671 1708
1672static __init int wm8350_init(void) 1709static __init int wm8350_init(void)
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 502eefac1ecd..b9ef4d915221 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1022,10 +1022,15 @@ static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
1022 if (freq_in == wm8400->fll_in && freq_out == wm8400->fll_out) 1022 if (freq_in == wm8400->fll_in && freq_out == wm8400->fll_out)
1023 return 0; 1023 return 0;
1024 1024
1025 if (freq_out != 0) { 1025 if (freq_out) {
1026 ret = fll_factors(wm8400, &factors, freq_in, freq_out); 1026 ret = fll_factors(wm8400, &factors, freq_in, freq_out);
1027 if (ret != 0) 1027 if (ret != 0)
1028 return ret; 1028 return ret;
1029 } else {
1030 /* Bodge GCC 4.4.0 uninitialised variable warning - it
1031 * doesn't seem capable of working out that we exit if
1032 * freq_out is 0 before any of the uses. */
1033 memset(&factors, 0, sizeof(factors));
1029 } 1034 }
1030 1035
1031 wm8400->fll_out = freq_out; 1036 wm8400->fll_out = freq_out;
@@ -1040,7 +1045,7 @@ static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
1040 reg &= ~WM8400_FLL_OSC_ENA; 1045 reg &= ~WM8400_FLL_OSC_ENA;
1041 wm8400_write(codec, WM8400_FLL_CONTROL_1, reg); 1046 wm8400_write(codec, WM8400_FLL_CONTROL_1, reg);
1042 1047
1043 if (freq_out == 0) 1048 if (!freq_out)
1044 return 0; 1049 return 0;
1045 1050
1046 reg &= ~(WM8400_FLL_REF_FREQ | WM8400_FLL_FRATIO_MASK); 1051 reg &= ~(WM8400_FLL_REF_FREQ | WM8400_FLL_FRATIO_MASK);
@@ -1553,6 +1558,21 @@ static int __exit wm8400_codec_remove(struct platform_device *dev)
1553 return 0; 1558 return 0;
1554} 1559}
1555 1560
1561#ifdef CONFIG_PM
1562static int wm8400_pdev_suspend(struct platform_device *pdev, pm_message_t msg)
1563{
1564 return snd_soc_suspend_device(&pdev->dev);
1565}
1566
1567static int wm8400_pdev_resume(struct platform_device *pdev)
1568{
1569 return snd_soc_resume_device(&pdev->dev);
1570}
1571#else
1572#define wm8400_pdev_suspend NULL
1573#define wm8400_pdev_resume NULL
1574#endif
1575
1556static struct platform_driver wm8400_codec_driver = { 1576static struct platform_driver wm8400_codec_driver = {
1557 .driver = { 1577 .driver = {
1558 .name = "wm8400-codec", 1578 .name = "wm8400-codec",
@@ -1560,6 +1580,8 @@ static struct platform_driver wm8400_codec_driver = {
1560 }, 1580 },
1561 .probe = wm8400_codec_probe, 1581 .probe = wm8400_codec_probe,
1562 .remove = __exit_p(wm8400_codec_remove), 1582 .remove = __exit_p(wm8400_codec_remove),
1583 .suspend = wm8400_pdev_suspend,
1584 .resume = wm8400_pdev_resume,
1563}; 1585};
1564 1586
1565static int __init wm8400_codec_init(void) 1587static int __init wm8400_codec_init(void)
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index c8b8dba85890..060d5d06ba95 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -58,55 +58,7 @@ static const u16 wm8510_reg[WM8510_CACHEREGNUM] = {
58#define WM8510_POWER1_BIASEN 0x08 58#define WM8510_POWER1_BIASEN 0x08
59#define WM8510_POWER1_BUFIOEN 0x10 59#define WM8510_POWER1_BUFIOEN 0x10
60 60
61/* 61#define wm8510_reset(c) snd_soc_write(c, WM8510_RESET, 0)
62 * read wm8510 register cache
63 */
64static inline unsigned int wm8510_read_reg_cache(struct snd_soc_codec *codec,
65 unsigned int reg)
66{
67 u16 *cache = codec->reg_cache;
68 if (reg == WM8510_RESET)
69 return 0;
70 if (reg >= WM8510_CACHEREGNUM)
71 return -1;
72 return cache[reg];
73}
74
75/*
76 * write wm8510 register cache
77 */
78static inline void wm8510_write_reg_cache(struct snd_soc_codec *codec,
79 u16 reg, unsigned int value)
80{
81 u16 *cache = codec->reg_cache;
82 if (reg >= WM8510_CACHEREGNUM)
83 return;
84 cache[reg] = value;
85}
86
87/*
88 * write to the WM8510 register space
89 */
90static int wm8510_write(struct snd_soc_codec *codec, unsigned int reg,
91 unsigned int value)
92{
93 u8 data[2];
94
95 /* data is
96 * D15..D9 WM8510 register offset
97 * D8...D0 register data
98 */
99 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
100 data[1] = value & 0x00ff;
101
102 wm8510_write_reg_cache(codec, reg, value);
103 if (codec->hw_write(codec->control_data, data, 2) == 2)
104 return 0;
105 else
106 return -EIO;
107}
108
109#define wm8510_reset(c) wm8510_write(c, WM8510_RESET, 0)
110 62
111static const char *wm8510_companding[] = { "Off", "NC", "u-law", "A-law" }; 63static const char *wm8510_companding[] = { "Off", "NC", "u-law", "A-law" };
112static const char *wm8510_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" }; 64static const char *wm8510_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" };
@@ -327,27 +279,27 @@ static int wm8510_set_dai_pll(struct snd_soc_dai *codec_dai,
327 279
328 if (freq_in == 0 || freq_out == 0) { 280 if (freq_in == 0 || freq_out == 0) {
329 /* Clock CODEC directly from MCLK */ 281 /* Clock CODEC directly from MCLK */
330 reg = wm8510_read_reg_cache(codec, WM8510_CLOCK); 282 reg = snd_soc_read(codec, WM8510_CLOCK);
331 wm8510_write(codec, WM8510_CLOCK, reg & 0x0ff); 283 snd_soc_write(codec, WM8510_CLOCK, reg & 0x0ff);
332 284
333 /* Turn off PLL */ 285 /* Turn off PLL */
334 reg = wm8510_read_reg_cache(codec, WM8510_POWER1); 286 reg = snd_soc_read(codec, WM8510_POWER1);
335 wm8510_write(codec, WM8510_POWER1, reg & 0x1df); 287 snd_soc_write(codec, WM8510_POWER1, reg & 0x1df);
336 return 0; 288 return 0;
337 } 289 }
338 290
339 pll_factors(freq_out*4, freq_in); 291 pll_factors(freq_out*4, freq_in);
340 292
341 wm8510_write(codec, WM8510_PLLN, (pll_div.pre_div << 4) | pll_div.n); 293 snd_soc_write(codec, WM8510_PLLN, (pll_div.pre_div << 4) | pll_div.n);
342 wm8510_write(codec, WM8510_PLLK1, pll_div.k >> 18); 294 snd_soc_write(codec, WM8510_PLLK1, pll_div.k >> 18);
343 wm8510_write(codec, WM8510_PLLK2, (pll_div.k >> 9) & 0x1ff); 295 snd_soc_write(codec, WM8510_PLLK2, (pll_div.k >> 9) & 0x1ff);
344 wm8510_write(codec, WM8510_PLLK3, pll_div.k & 0x1ff); 296 snd_soc_write(codec, WM8510_PLLK3, pll_div.k & 0x1ff);
345 reg = wm8510_read_reg_cache(codec, WM8510_POWER1); 297 reg = snd_soc_read(codec, WM8510_POWER1);
346 wm8510_write(codec, WM8510_POWER1, reg | 0x020); 298 snd_soc_write(codec, WM8510_POWER1, reg | 0x020);
347 299
348 /* Run CODEC from PLL instead of MCLK */ 300 /* Run CODEC from PLL instead of MCLK */
349 reg = wm8510_read_reg_cache(codec, WM8510_CLOCK); 301 reg = snd_soc_read(codec, WM8510_CLOCK);
350 wm8510_write(codec, WM8510_CLOCK, reg | 0x100); 302 snd_soc_write(codec, WM8510_CLOCK, reg | 0x100);
351 303
352 return 0; 304 return 0;
353} 305}
@@ -363,24 +315,24 @@ static int wm8510_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
363 315
364 switch (div_id) { 316 switch (div_id) {
365 case WM8510_OPCLKDIV: 317 case WM8510_OPCLKDIV:
366 reg = wm8510_read_reg_cache(codec, WM8510_GPIO) & 0x1cf; 318 reg = snd_soc_read(codec, WM8510_GPIO) & 0x1cf;
367 wm8510_write(codec, WM8510_GPIO, reg | div); 319 snd_soc_write(codec, WM8510_GPIO, reg | div);
368 break; 320 break;
369 case WM8510_MCLKDIV: 321 case WM8510_MCLKDIV:
370 reg = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x11f; 322 reg = snd_soc_read(codec, WM8510_CLOCK) & 0x11f;
371 wm8510_write(codec, WM8510_CLOCK, reg | div); 323 snd_soc_write(codec, WM8510_CLOCK, reg | div);
372 break; 324 break;
373 case WM8510_ADCCLK: 325 case WM8510_ADCCLK:
374 reg = wm8510_read_reg_cache(codec, WM8510_ADC) & 0x1f7; 326 reg = snd_soc_read(codec, WM8510_ADC) & 0x1f7;
375 wm8510_write(codec, WM8510_ADC, reg | div); 327 snd_soc_write(codec, WM8510_ADC, reg | div);
376 break; 328 break;
377 case WM8510_DACCLK: 329 case WM8510_DACCLK:
378 reg = wm8510_read_reg_cache(codec, WM8510_DAC) & 0x1f7; 330 reg = snd_soc_read(codec, WM8510_DAC) & 0x1f7;
379 wm8510_write(codec, WM8510_DAC, reg | div); 331 snd_soc_write(codec, WM8510_DAC, reg | div);
380 break; 332 break;
381 case WM8510_BCLKDIV: 333 case WM8510_BCLKDIV:
382 reg = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x1e3; 334 reg = snd_soc_read(codec, WM8510_CLOCK) & 0x1e3;
383 wm8510_write(codec, WM8510_CLOCK, reg | div); 335 snd_soc_write(codec, WM8510_CLOCK, reg | div);
384 break; 336 break;
385 default: 337 default:
386 return -EINVAL; 338 return -EINVAL;
@@ -394,7 +346,7 @@ static int wm8510_set_dai_fmt(struct snd_soc_dai *codec_dai,
394{ 346{
395 struct snd_soc_codec *codec = codec_dai->codec; 347 struct snd_soc_codec *codec = codec_dai->codec;
396 u16 iface = 0; 348 u16 iface = 0;
397 u16 clk = wm8510_read_reg_cache(codec, WM8510_CLOCK) & 0x1fe; 349 u16 clk = snd_soc_read(codec, WM8510_CLOCK) & 0x1fe;
398 350
399 /* set master/slave audio interface */ 351 /* set master/slave audio interface */
400 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 352 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -441,8 +393,8 @@ static int wm8510_set_dai_fmt(struct snd_soc_dai *codec_dai,
441 return -EINVAL; 393 return -EINVAL;
442 } 394 }
443 395
444 wm8510_write(codec, WM8510_IFACE, iface); 396 snd_soc_write(codec, WM8510_IFACE, iface);
445 wm8510_write(codec, WM8510_CLOCK, clk); 397 snd_soc_write(codec, WM8510_CLOCK, clk);
446 return 0; 398 return 0;
447} 399}
448 400
@@ -453,8 +405,8 @@ static int wm8510_pcm_hw_params(struct snd_pcm_substream *substream,
453 struct snd_soc_pcm_runtime *rtd = substream->private_data; 405 struct snd_soc_pcm_runtime *rtd = substream->private_data;
454 struct snd_soc_device *socdev = rtd->socdev; 406 struct snd_soc_device *socdev = rtd->socdev;
455 struct snd_soc_codec *codec = socdev->card->codec; 407 struct snd_soc_codec *codec = socdev->card->codec;
456 u16 iface = wm8510_read_reg_cache(codec, WM8510_IFACE) & 0x19f; 408 u16 iface = snd_soc_read(codec, WM8510_IFACE) & 0x19f;
457 u16 adn = wm8510_read_reg_cache(codec, WM8510_ADD) & 0x1f1; 409 u16 adn = snd_soc_read(codec, WM8510_ADD) & 0x1f1;
458 410
459 /* bit size */ 411 /* bit size */
460 switch (params_format(params)) { 412 switch (params_format(params)) {
@@ -493,20 +445,20 @@ static int wm8510_pcm_hw_params(struct snd_pcm_substream *substream,
493 break; 445 break;
494 } 446 }
495 447
496 wm8510_write(codec, WM8510_IFACE, iface); 448 snd_soc_write(codec, WM8510_IFACE, iface);
497 wm8510_write(codec, WM8510_ADD, adn); 449 snd_soc_write(codec, WM8510_ADD, adn);
498 return 0; 450 return 0;
499} 451}
500 452
501static int wm8510_mute(struct snd_soc_dai *dai, int mute) 453static int wm8510_mute(struct snd_soc_dai *dai, int mute)
502{ 454{
503 struct snd_soc_codec *codec = dai->codec; 455 struct snd_soc_codec *codec = dai->codec;
504 u16 mute_reg = wm8510_read_reg_cache(codec, WM8510_DAC) & 0xffbf; 456 u16 mute_reg = snd_soc_read(codec, WM8510_DAC) & 0xffbf;
505 457
506 if (mute) 458 if (mute)
507 wm8510_write(codec, WM8510_DAC, mute_reg | 0x40); 459 snd_soc_write(codec, WM8510_DAC, mute_reg | 0x40);
508 else 460 else
509 wm8510_write(codec, WM8510_DAC, mute_reg); 461 snd_soc_write(codec, WM8510_DAC, mute_reg);
510 return 0; 462 return 0;
511} 463}
512 464
@@ -514,13 +466,13 @@ static int wm8510_mute(struct snd_soc_dai *dai, int mute)
514static int wm8510_set_bias_level(struct snd_soc_codec *codec, 466static int wm8510_set_bias_level(struct snd_soc_codec *codec,
515 enum snd_soc_bias_level level) 467 enum snd_soc_bias_level level)
516{ 468{
517 u16 power1 = wm8510_read_reg_cache(codec, WM8510_POWER1) & ~0x3; 469 u16 power1 = snd_soc_read(codec, WM8510_POWER1) & ~0x3;
518 470
519 switch (level) { 471 switch (level) {
520 case SND_SOC_BIAS_ON: 472 case SND_SOC_BIAS_ON:
521 case SND_SOC_BIAS_PREPARE: 473 case SND_SOC_BIAS_PREPARE:
522 power1 |= 0x1; /* VMID 50k */ 474 power1 |= 0x1; /* VMID 50k */
523 wm8510_write(codec, WM8510_POWER1, power1); 475 snd_soc_write(codec, WM8510_POWER1, power1);
524 break; 476 break;
525 477
526 case SND_SOC_BIAS_STANDBY: 478 case SND_SOC_BIAS_STANDBY:
@@ -528,18 +480,18 @@ static int wm8510_set_bias_level(struct snd_soc_codec *codec,
528 480
529 if (codec->bias_level == SND_SOC_BIAS_OFF) { 481 if (codec->bias_level == SND_SOC_BIAS_OFF) {
530 /* Initial cap charge at VMID 5k */ 482 /* Initial cap charge at VMID 5k */
531 wm8510_write(codec, WM8510_POWER1, power1 | 0x3); 483 snd_soc_write(codec, WM8510_POWER1, power1 | 0x3);
532 mdelay(100); 484 mdelay(100);
533 } 485 }
534 486
535 power1 |= 0x2; /* VMID 500k */ 487 power1 |= 0x2; /* VMID 500k */
536 wm8510_write(codec, WM8510_POWER1, power1); 488 snd_soc_write(codec, WM8510_POWER1, power1);
537 break; 489 break;
538 490
539 case SND_SOC_BIAS_OFF: 491 case SND_SOC_BIAS_OFF:
540 wm8510_write(codec, WM8510_POWER1, 0); 492 snd_soc_write(codec, WM8510_POWER1, 0);
541 wm8510_write(codec, WM8510_POWER2, 0); 493 snd_soc_write(codec, WM8510_POWER2, 0);
542 wm8510_write(codec, WM8510_POWER3, 0); 494 snd_soc_write(codec, WM8510_POWER3, 0);
543 break; 495 break;
544 } 496 }
545 497
@@ -577,6 +529,7 @@ struct snd_soc_dai wm8510_dai = {
577 .rates = WM8510_RATES, 529 .rates = WM8510_RATES,
578 .formats = WM8510_FORMATS,}, 530 .formats = WM8510_FORMATS,},
579 .ops = &wm8510_dai_ops, 531 .ops = &wm8510_dai_ops,
532 .symmetric_rates = 1,
580}; 533};
581EXPORT_SYMBOL_GPL(wm8510_dai); 534EXPORT_SYMBOL_GPL(wm8510_dai);
582 535
@@ -612,15 +565,14 @@ static int wm8510_resume(struct platform_device *pdev)
612 * initialise the WM8510 driver 565 * initialise the WM8510 driver
613 * register the mixer and dsp interfaces with the kernel 566 * register the mixer and dsp interfaces with the kernel
614 */ 567 */
615static int wm8510_init(struct snd_soc_device *socdev) 568static int wm8510_init(struct snd_soc_device *socdev,
569 enum snd_soc_control_type control)
616{ 570{
617 struct snd_soc_codec *codec = socdev->card->codec; 571 struct snd_soc_codec *codec = socdev->card->codec;
618 int ret = 0; 572 int ret = 0;
619 573
620 codec->name = "WM8510"; 574 codec->name = "WM8510";
621 codec->owner = THIS_MODULE; 575 codec->owner = THIS_MODULE;
622 codec->read = wm8510_read_reg_cache;
623 codec->write = wm8510_write;
624 codec->set_bias_level = wm8510_set_bias_level; 576 codec->set_bias_level = wm8510_set_bias_level;
625 codec->dai = &wm8510_dai; 577 codec->dai = &wm8510_dai;
626 codec->num_dai = 1; 578 codec->num_dai = 1;
@@ -630,13 +582,20 @@ static int wm8510_init(struct snd_soc_device *socdev)
630 if (codec->reg_cache == NULL) 582 if (codec->reg_cache == NULL)
631 return -ENOMEM; 583 return -ENOMEM;
632 584
585 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
586 if (ret < 0) {
587 printk(KERN_ERR "wm8510: failed to set cache I/O: %d\n",
588 ret);
589 goto err;
590 }
591
633 wm8510_reset(codec); 592 wm8510_reset(codec);
634 593
635 /* register pcms */ 594 /* register pcms */
636 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 595 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
637 if (ret < 0) { 596 if (ret < 0) {
638 printk(KERN_ERR "wm8510: failed to create pcms\n"); 597 printk(KERN_ERR "wm8510: failed to create pcms\n");
639 goto pcm_err; 598 goto err;
640 } 599 }
641 600
642 /* power on device */ 601 /* power on device */
@@ -655,7 +614,7 @@ static int wm8510_init(struct snd_soc_device *socdev)
655card_err: 614card_err:
656 snd_soc_free_pcms(socdev); 615 snd_soc_free_pcms(socdev);
657 snd_soc_dapm_free(socdev); 616 snd_soc_dapm_free(socdev);
658pcm_err: 617err:
659 kfree(codec->reg_cache); 618 kfree(codec->reg_cache);
660 return ret; 619 return ret;
661} 620}
@@ -678,7 +637,7 @@ static int wm8510_i2c_probe(struct i2c_client *i2c,
678 i2c_set_clientdata(i2c, codec); 637 i2c_set_clientdata(i2c, codec);
679 codec->control_data = i2c; 638 codec->control_data = i2c;
680 639
681 ret = wm8510_init(socdev); 640 ret = wm8510_init(socdev, SND_SOC_I2C);
682 if (ret < 0) 641 if (ret < 0)
683 pr_err("failed to initialise WM8510\n"); 642 pr_err("failed to initialise WM8510\n");
684 643
@@ -758,7 +717,7 @@ static int __devinit wm8510_spi_probe(struct spi_device *spi)
758 717
759 codec->control_data = spi; 718 codec->control_data = spi;
760 719
761 ret = wm8510_init(socdev); 720 ret = wm8510_init(socdev, SND_SOC_SPI);
762 if (ret < 0) 721 if (ret < 0)
763 dev_err(&spi->dev, "failed to initialise WM8510\n"); 722 dev_err(&spi->dev, "failed to initialise WM8510\n");
764 723
@@ -779,30 +738,6 @@ static struct spi_driver wm8510_spi_driver = {
779 .probe = wm8510_spi_probe, 738 .probe = wm8510_spi_probe,
780 .remove = __devexit_p(wm8510_spi_remove), 739 .remove = __devexit_p(wm8510_spi_remove),
781}; 740};
782
783static int wm8510_spi_write(struct spi_device *spi, const char *data, int len)
784{
785 struct spi_transfer t;
786 struct spi_message m;
787 u8 msg[2];
788
789 if (len <= 0)
790 return 0;
791
792 msg[0] = data[0];
793 msg[1] = data[1];
794
795 spi_message_init(&m);
796 memset(&t, 0, (sizeof t));
797
798 t.tx_buf = &msg[0];
799 t.len = len;
800
801 spi_message_add_tail(&t, &m);
802 spi_sync(spi, &m);
803
804 return len;
805}
806#endif /* CONFIG_SPI_MASTER */ 741#endif /* CONFIG_SPI_MASTER */
807 742
808static int wm8510_probe(struct platform_device *pdev) 743static int wm8510_probe(struct platform_device *pdev)
@@ -827,13 +762,11 @@ static int wm8510_probe(struct platform_device *pdev)
827 wm8510_socdev = socdev; 762 wm8510_socdev = socdev;
828#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 763#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
829 if (setup->i2c_address) { 764 if (setup->i2c_address) {
830 codec->hw_write = (hw_write_t)i2c_master_send;
831 ret = wm8510_add_i2c_device(pdev, setup); 765 ret = wm8510_add_i2c_device(pdev, setup);
832 } 766 }
833#endif 767#endif
834#if defined(CONFIG_SPI_MASTER) 768#if defined(CONFIG_SPI_MASTER)
835 if (setup->spi) { 769 if (setup->spi) {
836 codec->hw_write = (hw_write_t)wm8510_spi_write;
837 ret = spi_register_driver(&wm8510_spi_driver); 770 ret = spi_register_driver(&wm8510_spi_driver);
838 if (ret != 0) 771 if (ret != 0)
839 printk(KERN_ERR "can't add spi driver"); 772 printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
new file mode 100644
index 000000000000..25870a4652fb
--- /dev/null
+++ b/sound/soc/codecs/wm8523.c
@@ -0,0 +1,699 @@
1/*
2 * wm8523.c -- WM8523 ALSA SoC Audio driver
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <linux/regulator/consumer.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include <sound/soc-dapm.h>
27#include <sound/initval.h>
28#include <sound/tlv.h>
29
30#include "wm8523.h"
31
32static struct snd_soc_codec *wm8523_codec;
33struct snd_soc_codec_device soc_codec_dev_wm8523;
34
35#define WM8523_NUM_SUPPLIES 2
36static const char *wm8523_supply_names[WM8523_NUM_SUPPLIES] = {
37 "AVDD",
38 "LINEVDD",
39};
40
41#define WM8523_NUM_RATES 7
42
43/* codec private data */
44struct wm8523_priv {
45 struct snd_soc_codec codec;
46 u16 reg_cache[WM8523_REGISTER_COUNT];
47 struct regulator_bulk_data supplies[WM8523_NUM_SUPPLIES];
48 unsigned int sysclk;
49 unsigned int rate_constraint_list[WM8523_NUM_RATES];
50 struct snd_pcm_hw_constraint_list rate_constraint;
51};
52
53static const u16 wm8523_reg[WM8523_REGISTER_COUNT] = {
54 0x8523, /* R0 - DEVICE_ID */
55 0x0001, /* R1 - REVISION */
56 0x0000, /* R2 - PSCTRL1 */
57 0x1812, /* R3 - AIF_CTRL1 */
58 0x0000, /* R4 - AIF_CTRL2 */
59 0x0001, /* R5 - DAC_CTRL3 */
60 0x0190, /* R6 - DAC_GAINL */
61 0x0190, /* R7 - DAC_GAINR */
62 0x0000, /* R8 - ZERO_DETECT */
63};
64
65static int wm8523_volatile_register(unsigned int reg)
66{
67 switch (reg) {
68 case WM8523_DEVICE_ID:
69 case WM8523_REVISION:
70 return 1;
71 default:
72 return 0;
73 }
74}
75
76static int wm8523_reset(struct snd_soc_codec *codec)
77{
78 return snd_soc_write(codec, WM8523_DEVICE_ID, 0);
79}
80
81static const DECLARE_TLV_DB_SCALE(dac_tlv, -10000, 25, 0);
82
83static const char *wm8523_zd_count_text[] = {
84 "1024",
85 "2048",
86};
87
88static const struct soc_enum wm8523_zc_count =
89 SOC_ENUM_SINGLE(WM8523_ZERO_DETECT, 0, 2, wm8523_zd_count_text);
90
91static const struct snd_kcontrol_new wm8523_snd_controls[] = {
92SOC_DOUBLE_R_TLV("Playback Volume", WM8523_DAC_GAINL, WM8523_DAC_GAINR,
93 0, 448, 0, dac_tlv),
94SOC_SINGLE("ZC Switch", WM8523_DAC_CTRL3, 4, 1, 0),
95SOC_SINGLE("Playback Deemphasis Switch", WM8523_AIF_CTRL1, 8, 1, 0),
96SOC_DOUBLE("Playback Switch", WM8523_DAC_CTRL3, 2, 3, 1, 1),
97SOC_SINGLE("Volume Ramp Up Switch", WM8523_DAC_CTRL3, 1, 1, 0),
98SOC_SINGLE("Volume Ramp Down Switch", WM8523_DAC_CTRL3, 0, 1, 0),
99SOC_ENUM("Zero Detect Count", wm8523_zc_count),
100};
101
102static const struct snd_soc_dapm_widget wm8523_dapm_widgets[] = {
103SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
104SND_SOC_DAPM_OUTPUT("LINEVOUTL"),
105SND_SOC_DAPM_OUTPUT("LINEVOUTR"),
106};
107
108static const struct snd_soc_dapm_route intercon[] = {
109 { "LINEVOUTL", NULL, "DAC" },
110 { "LINEVOUTR", NULL, "DAC" },
111};
112
113static int wm8523_add_widgets(struct snd_soc_codec *codec)
114{
115 snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
116 ARRAY_SIZE(wm8523_dapm_widgets));
117
118 snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
119
120 snd_soc_dapm_new_widgets(codec);
121 return 0;
122}
123
124static struct {
125 int value;
126 int ratio;
127} lrclk_ratios[WM8523_NUM_RATES] = {
128 { 1, 128 },
129 { 2, 192 },
130 { 3, 256 },
131 { 4, 384 },
132 { 5, 512 },
133 { 6, 768 },
134 { 7, 1152 },
135};
136
137static int wm8523_startup(struct snd_pcm_substream *substream,
138 struct snd_soc_dai *dai)
139{
140 struct snd_soc_codec *codec = dai->codec;
141 struct wm8523_priv *wm8523 = codec->private_data;
142
143 /* The set of sample rates that can be supported depends on the
144 * MCLK supplied to the CODEC - enforce this.
145 */
146 if (!wm8523->sysclk) {
147 dev_err(codec->dev,
148 "No MCLK configured, call set_sysclk() on init\n");
149 return -EINVAL;
150 }
151
152 return 0;
153 snd_pcm_hw_constraint_list(substream->runtime, 0,
154 SNDRV_PCM_HW_PARAM_RATE,
155 &wm8523->rate_constraint);
156
157 return 0;
158}
159
160static int wm8523_hw_params(struct snd_pcm_substream *substream,
161 struct snd_pcm_hw_params *params,
162 struct snd_soc_dai *dai)
163{
164 struct snd_soc_pcm_runtime *rtd = substream->private_data;
165 struct snd_soc_device *socdev = rtd->socdev;
166 struct snd_soc_codec *codec = socdev->card->codec;
167 struct wm8523_priv *wm8523 = codec->private_data;
168 int i;
169 u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1);
170 u16 aifctrl2 = snd_soc_read(codec, WM8523_AIF_CTRL2);
171
172 /* Find a supported LRCLK ratio */
173 for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
174 if (wm8523->sysclk / params_rate(params) ==
175 lrclk_ratios[i].ratio)
176 break;
177 }
178
179 /* Should never happen, should be handled by constraints */
180 if (i == ARRAY_SIZE(lrclk_ratios)) {
181 dev_err(codec->dev, "MCLK/fs ratio %d unsupported\n",
182 wm8523->sysclk / params_rate(params));
183 return -EINVAL;
184 }
185
186 aifctrl2 &= ~WM8523_SR_MASK;
187 aifctrl2 |= lrclk_ratios[i].value;
188
189 aifctrl1 &= ~WM8523_WL_MASK;
190 switch (params_format(params)) {
191 case SNDRV_PCM_FORMAT_S16_LE:
192 break;
193 case SNDRV_PCM_FORMAT_S20_3LE:
194 aifctrl1 |= 0x8;
195 break;
196 case SNDRV_PCM_FORMAT_S24_LE:
197 aifctrl1 |= 0x10;
198 break;
199 case SNDRV_PCM_FORMAT_S32_LE:
200 aifctrl1 |= 0x18;
201 break;
202 }
203
204 snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1);
205 snd_soc_write(codec, WM8523_AIF_CTRL2, aifctrl2);
206
207 return 0;
208}
209
210static int wm8523_set_dai_sysclk(struct snd_soc_dai *codec_dai,
211 int clk_id, unsigned int freq, int dir)
212{
213 struct snd_soc_codec *codec = codec_dai->codec;
214 struct wm8523_priv *wm8523 = codec->private_data;
215 unsigned int val;
216 int i;
217
218 wm8523->sysclk = freq;
219
220 wm8523->rate_constraint.count = 0;
221 for (i = 0; i < ARRAY_SIZE(lrclk_ratios); i++) {
222 val = freq / lrclk_ratios[i].ratio;
223 /* Check that it's a standard rate since core can't
224 * cope with others and having the odd rates confuses
225 * constraint matching.
226 */
227 switch (val) {
228 case 8000:
229 case 11025:
230 case 16000:
231 case 22050:
232 case 32000:
233 case 44100:
234 case 48000:
235 case 64000:
236 case 88200:
237 case 96000:
238 case 176400:
239 case 192000:
240 dev_dbg(codec->dev, "Supported sample rate: %dHz\n",
241 val);
242 wm8523->rate_constraint_list[i] = val;
243 wm8523->rate_constraint.count++;
244 break;
245 default:
246 dev_dbg(codec->dev, "Skipping sample rate: %dHz\n",
247 val);
248 }
249 }
250
251 /* Need at least one supported rate... */
252 if (wm8523->rate_constraint.count == 0)
253 return -EINVAL;
254
255 return 0;
256}
257
258
259static int wm8523_set_dai_fmt(struct snd_soc_dai *codec_dai,
260 unsigned int fmt)
261{
262 struct snd_soc_codec *codec = codec_dai->codec;
263 u16 aifctrl1 = snd_soc_read(codec, WM8523_AIF_CTRL1);
264
265 aifctrl1 &= ~(WM8523_BCLK_INV_MASK | WM8523_LRCLK_INV_MASK |
266 WM8523_FMT_MASK | WM8523_AIF_MSTR_MASK);
267
268 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
269 case SND_SOC_DAIFMT_CBM_CFM:
270 aifctrl1 |= WM8523_AIF_MSTR;
271 break;
272 case SND_SOC_DAIFMT_CBS_CFS:
273 break;
274 default:
275 return -EINVAL;
276 }
277
278 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
279 case SND_SOC_DAIFMT_I2S:
280 aifctrl1 |= 0x0002;
281 break;
282 case SND_SOC_DAIFMT_RIGHT_J:
283 break;
284 case SND_SOC_DAIFMT_LEFT_J:
285 aifctrl1 |= 0x0001;
286 break;
287 case SND_SOC_DAIFMT_DSP_A:
288 aifctrl1 |= 0x0003;
289 break;
290 case SND_SOC_DAIFMT_DSP_B:
291 aifctrl1 |= 0x0023;
292 break;
293 default:
294 return -EINVAL;
295 }
296
297 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
298 case SND_SOC_DAIFMT_NB_NF:
299 break;
300 case SND_SOC_DAIFMT_IB_IF:
301 aifctrl1 |= WM8523_BCLK_INV | WM8523_LRCLK_INV;
302 break;
303 case SND_SOC_DAIFMT_IB_NF:
304 aifctrl1 |= WM8523_BCLK_INV;
305 break;
306 case SND_SOC_DAIFMT_NB_IF:
307 aifctrl1 |= WM8523_LRCLK_INV;
308 break;
309 default:
310 return -EINVAL;
311 }
312
313 snd_soc_write(codec, WM8523_AIF_CTRL1, aifctrl1);
314
315 return 0;
316}
317
318static int wm8523_set_bias_level(struct snd_soc_codec *codec,
319 enum snd_soc_bias_level level)
320{
321 struct wm8523_priv *wm8523 = codec->private_data;
322 int ret, i;
323
324 switch (level) {
325 case SND_SOC_BIAS_ON:
326 break;
327
328 case SND_SOC_BIAS_PREPARE:
329 /* Full power on */
330 snd_soc_update_bits(codec, WM8523_PSCTRL1,
331 WM8523_SYS_ENA_MASK, 3);
332 break;
333
334 case SND_SOC_BIAS_STANDBY:
335 if (codec->bias_level == SND_SOC_BIAS_OFF) {
336 ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
337 wm8523->supplies);
338 if (ret != 0) {
339 dev_err(codec->dev,
340 "Failed to enable supplies: %d\n",
341 ret);
342 return ret;
343 }
344
345 /* Initial power up */
346 snd_soc_update_bits(codec, WM8523_PSCTRL1,
347 WM8523_SYS_ENA_MASK, 1);
348
349 /* Sync back default/cached values */
350 for (i = WM8523_AIF_CTRL1;
351 i < WM8523_MAX_REGISTER; i++)
352 snd_soc_write(codec, i, wm8523->reg_cache[i]);
353
354
355 msleep(100);
356 }
357
358 /* Power up to mute */
359 snd_soc_update_bits(codec, WM8523_PSCTRL1,
360 WM8523_SYS_ENA_MASK, 2);
361
362 break;
363
364 case SND_SOC_BIAS_OFF:
365 /* The chip runs through the power down sequence for us. */
366 snd_soc_update_bits(codec, WM8523_PSCTRL1,
367 WM8523_SYS_ENA_MASK, 0);
368 msleep(100);
369
370 regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies),
371 wm8523->supplies);
372 break;
373 }
374 codec->bias_level = level;
375 return 0;
376}
377
378#define WM8523_RATES SNDRV_PCM_RATE_8000_192000
379
380#define WM8523_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
381 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
382
383static struct snd_soc_dai_ops wm8523_dai_ops = {
384 .startup = wm8523_startup,
385 .hw_params = wm8523_hw_params,
386 .set_sysclk = wm8523_set_dai_sysclk,
387 .set_fmt = wm8523_set_dai_fmt,
388};
389
390struct snd_soc_dai wm8523_dai = {
391 .name = "WM8523",
392 .playback = {
393 .stream_name = "Playback",
394 .channels_min = 2, /* Mono modes not yet supported */
395 .channels_max = 2,
396 .rates = WM8523_RATES,
397 .formats = WM8523_FORMATS,
398 },
399 .ops = &wm8523_dai_ops,
400};
401EXPORT_SYMBOL_GPL(wm8523_dai);
402
403#ifdef CONFIG_PM
404static int wm8523_suspend(struct platform_device *pdev, pm_message_t state)
405{
406 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
407 struct snd_soc_codec *codec = socdev->card->codec;
408
409 wm8523_set_bias_level(codec, SND_SOC_BIAS_OFF);
410 return 0;
411}
412
413static int wm8523_resume(struct platform_device *pdev)
414{
415 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
416 struct snd_soc_codec *codec = socdev->card->codec;
417
418 wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
419
420 return 0;
421}
422#else
423#define wm8523_suspend NULL
424#define wm8523_resume NULL
425#endif
426
427static int wm8523_probe(struct platform_device *pdev)
428{
429 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
430 struct snd_soc_codec *codec;
431 int ret = 0;
432
433 if (wm8523_codec == NULL) {
434 dev_err(&pdev->dev, "Codec device not registered\n");
435 return -ENODEV;
436 }
437
438 socdev->card->codec = wm8523_codec;
439 codec = wm8523_codec;
440
441 /* register pcms */
442 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
443 if (ret < 0) {
444 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
445 goto pcm_err;
446 }
447
448 snd_soc_add_controls(codec, wm8523_snd_controls,
449 ARRAY_SIZE(wm8523_snd_controls));
450 wm8523_add_widgets(codec);
451 ret = snd_soc_init_card(socdev);
452 if (ret < 0) {
453 dev_err(codec->dev, "failed to register card: %d\n", ret);
454 goto card_err;
455 }
456
457 return ret;
458
459card_err:
460 snd_soc_free_pcms(socdev);
461 snd_soc_dapm_free(socdev);
462pcm_err:
463 return ret;
464}
465
466static int wm8523_remove(struct platform_device *pdev)
467{
468 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
469
470 snd_soc_free_pcms(socdev);
471 snd_soc_dapm_free(socdev);
472
473 return 0;
474}
475
476struct snd_soc_codec_device soc_codec_dev_wm8523 = {
477 .probe = wm8523_probe,
478 .remove = wm8523_remove,
479 .suspend = wm8523_suspend,
480 .resume = wm8523_resume,
481};
482EXPORT_SYMBOL_GPL(soc_codec_dev_wm8523);
483
484static int wm8523_register(struct wm8523_priv *wm8523,
485 enum snd_soc_control_type control)
486{
487 int ret;
488 struct snd_soc_codec *codec = &wm8523->codec;
489 int i;
490
491 if (wm8523_codec) {
492 dev_err(codec->dev, "Another WM8523 is registered\n");
493 return -EINVAL;
494 }
495
496 mutex_init(&codec->mutex);
497 INIT_LIST_HEAD(&codec->dapm_widgets);
498 INIT_LIST_HEAD(&codec->dapm_paths);
499
500 codec->private_data = wm8523;
501 codec->name = "WM8523";
502 codec->owner = THIS_MODULE;
503 codec->bias_level = SND_SOC_BIAS_OFF;
504 codec->set_bias_level = wm8523_set_bias_level;
505 codec->dai = &wm8523_dai;
506 codec->num_dai = 1;
507 codec->reg_cache_size = WM8523_REGISTER_COUNT;
508 codec->reg_cache = &wm8523->reg_cache;
509 codec->volatile_register = wm8523_volatile_register;
510
511 wm8523->rate_constraint.list = &wm8523->rate_constraint_list[0];
512 wm8523->rate_constraint.count =
513 ARRAY_SIZE(wm8523->rate_constraint_list);
514
515 memcpy(codec->reg_cache, wm8523_reg, sizeof(wm8523_reg));
516
517 ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
518 if (ret != 0) {
519 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
520 goto err;
521 }
522
523 for (i = 0; i < ARRAY_SIZE(wm8523->supplies); i++)
524 wm8523->supplies[i].supply = wm8523_supply_names[i];
525
526 ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8523->supplies),
527 wm8523->supplies);
528 if (ret != 0) {
529 dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
530 goto err;
531 }
532
533 ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
534 wm8523->supplies);
535 if (ret != 0) {
536 dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
537 goto err_get;
538 }
539
540 ret = snd_soc_read(codec, WM8523_DEVICE_ID);
541 if (ret < 0) {
542 dev_err(codec->dev, "Failed to read ID register\n");
543 goto err_enable;
544 }
545 if (ret != wm8523_reg[WM8523_DEVICE_ID]) {
546 dev_err(codec->dev, "Device is not a WM8523, ID is %x\n", ret);
547 ret = -EINVAL;
548 goto err_enable;
549 }
550
551 ret = snd_soc_read(codec, WM8523_REVISION);
552 if (ret < 0) {
553 dev_err(codec->dev, "Failed to read revision register\n");
554 goto err_enable;
555 }
556 dev_info(codec->dev, "revision %c\n",
557 (ret & WM8523_CHIP_REV_MASK) + 'A');
558
559 ret = wm8523_reset(codec);
560 if (ret < 0) {
561 dev_err(codec->dev, "Failed to issue reset\n");
562 goto err_enable;
563 }
564
565 wm8523_dai.dev = codec->dev;
566
567 /* Change some default settings - latch VU and enable ZC */
568 wm8523->reg_cache[WM8523_DAC_GAINR] |= WM8523_DACR_VU;
569 wm8523->reg_cache[WM8523_DAC_CTRL3] |= WM8523_ZC;
570
571 wm8523_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
572
573 /* Bias level configuration will have done an extra enable */
574 regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
575
576 wm8523_codec = codec;
577
578 ret = snd_soc_register_codec(codec);
579 if (ret != 0) {
580 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
581 return ret;
582 }
583
584 ret = snd_soc_register_dai(&wm8523_dai);
585 if (ret != 0) {
586 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
587 snd_soc_unregister_codec(codec);
588 return ret;
589 }
590
591 return 0;
592
593err_enable:
594 regulator_bulk_disable(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
595err_get:
596 regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
597err:
598 kfree(wm8523);
599 return ret;
600}
601
602static void wm8523_unregister(struct wm8523_priv *wm8523)
603{
604 wm8523_set_bias_level(&wm8523->codec, SND_SOC_BIAS_OFF);
605 regulator_bulk_free(ARRAY_SIZE(wm8523->supplies), wm8523->supplies);
606 snd_soc_unregister_dai(&wm8523_dai);
607 snd_soc_unregister_codec(&wm8523->codec);
608 kfree(wm8523);
609 wm8523_codec = NULL;
610}
611
612#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
613static __devinit int wm8523_i2c_probe(struct i2c_client *i2c,
614 const struct i2c_device_id *id)
615{
616 struct wm8523_priv *wm8523;
617 struct snd_soc_codec *codec;
618
619 wm8523 = kzalloc(sizeof(struct wm8523_priv), GFP_KERNEL);
620 if (wm8523 == NULL)
621 return -ENOMEM;
622
623 codec = &wm8523->codec;
624 codec->hw_write = (hw_write_t)i2c_master_send;
625
626 i2c_set_clientdata(i2c, wm8523);
627 codec->control_data = i2c;
628
629 codec->dev = &i2c->dev;
630
631 return wm8523_register(wm8523, SND_SOC_I2C);
632}
633
634static __devexit int wm8523_i2c_remove(struct i2c_client *client)
635{
636 struct wm8523_priv *wm8523 = i2c_get_clientdata(client);
637 wm8523_unregister(wm8523);
638 return 0;
639}
640
641#ifdef CONFIG_PM
642static int wm8523_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
643{
644 return snd_soc_suspend_device(&i2c->dev);
645}
646
647static int wm8523_i2c_resume(struct i2c_client *i2c)
648{
649 return snd_soc_resume_device(&i2c->dev);
650}
651#else
652#define wm8523_i2c_suspend NULL
653#define wm8523_i2c_resume NULL
654#endif
655
656static const struct i2c_device_id wm8523_i2c_id[] = {
657 { "wm8523", 0 },
658 { }
659};
660MODULE_DEVICE_TABLE(i2c, wm8523_i2c_id);
661
662static struct i2c_driver wm8523_i2c_driver = {
663 .driver = {
664 .name = "WM8523",
665 .owner = THIS_MODULE,
666 },
667 .probe = wm8523_i2c_probe,
668 .remove = __devexit_p(wm8523_i2c_remove),
669 .suspend = wm8523_i2c_suspend,
670 .resume = wm8523_i2c_resume,
671 .id_table = wm8523_i2c_id,
672};
673#endif
674
675static int __init wm8523_modinit(void)
676{
677 int ret;
678#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
679 ret = i2c_add_driver(&wm8523_i2c_driver);
680 if (ret != 0) {
681 printk(KERN_ERR "Failed to register WM8523 I2C driver: %d\n",
682 ret);
683 }
684#endif
685 return 0;
686}
687module_init(wm8523_modinit);
688
689static void __exit wm8523_exit(void)
690{
691#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
692 i2c_del_driver(&wm8523_i2c_driver);
693#endif
694}
695module_exit(wm8523_exit);
696
697MODULE_DESCRIPTION("ASoC WM8523 driver");
698MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
699MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8523.h b/sound/soc/codecs/wm8523.h
new file mode 100644
index 000000000000..1aa9ce3e1357
--- /dev/null
+++ b/sound/soc/codecs/wm8523.h
@@ -0,0 +1,160 @@
1/*
2 * wm8523.h -- WM8423 ASoC driver
3 *
4 * Copyright 2009 Wolfson Microelectronics, plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * Based on wm8753.h
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef _WM8523_H
16#define _WM8523_H
17
18/*
19 * Register values.
20 */
21#define WM8523_DEVICE_ID 0x00
22#define WM8523_REVISION 0x01
23#define WM8523_PSCTRL1 0x02
24#define WM8523_AIF_CTRL1 0x03
25#define WM8523_AIF_CTRL2 0x04
26#define WM8523_DAC_CTRL3 0x05
27#define WM8523_DAC_GAINL 0x06
28#define WM8523_DAC_GAINR 0x07
29#define WM8523_ZERO_DETECT 0x08
30
31#define WM8523_REGISTER_COUNT 9
32#define WM8523_MAX_REGISTER 0x08
33
34/*
35 * Field Definitions.
36 */
37
38/*
39 * R0 (0x00) - DEVICE_ID
40 */
41#define WM8523_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */
42#define WM8523_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */
43#define WM8523_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */
44
45/*
46 * R1 (0x01) - REVISION
47 */
48#define WM8523_CHIP_REV_MASK 0x0007 /* CHIP_REV - [2:0] */
49#define WM8523_CHIP_REV_SHIFT 0 /* CHIP_REV - [2:0] */
50#define WM8523_CHIP_REV_WIDTH 3 /* CHIP_REV - [2:0] */
51
52/*
53 * R2 (0x02) - PSCTRL1
54 */
55#define WM8523_SYS_ENA_MASK 0x0003 /* SYS_ENA - [1:0] */
56#define WM8523_SYS_ENA_SHIFT 0 /* SYS_ENA - [1:0] */
57#define WM8523_SYS_ENA_WIDTH 2 /* SYS_ENA - [1:0] */
58
59/*
60 * R3 (0x03) - AIF_CTRL1
61 */
62#define WM8523_TDM_MODE_MASK 0x1800 /* TDM_MODE - [12:11] */
63#define WM8523_TDM_MODE_SHIFT 11 /* TDM_MODE - [12:11] */
64#define WM8523_TDM_MODE_WIDTH 2 /* TDM_MODE - [12:11] */
65#define WM8523_TDM_SLOT_MASK 0x0600 /* TDM_SLOT - [10:9] */
66#define WM8523_TDM_SLOT_SHIFT 9 /* TDM_SLOT - [10:9] */
67#define WM8523_TDM_SLOT_WIDTH 2 /* TDM_SLOT - [10:9] */
68#define WM8523_DEEMPH 0x0100 /* DEEMPH */
69#define WM8523_DEEMPH_MASK 0x0100 /* DEEMPH */
70#define WM8523_DEEMPH_SHIFT 8 /* DEEMPH */
71#define WM8523_DEEMPH_WIDTH 1 /* DEEMPH */
72#define WM8523_AIF_MSTR 0x0080 /* AIF_MSTR */
73#define WM8523_AIF_MSTR_MASK 0x0080 /* AIF_MSTR */
74#define WM8523_AIF_MSTR_SHIFT 7 /* AIF_MSTR */
75#define WM8523_AIF_MSTR_WIDTH 1 /* AIF_MSTR */
76#define WM8523_LRCLK_INV 0x0040 /* LRCLK_INV */
77#define WM8523_LRCLK_INV_MASK 0x0040 /* LRCLK_INV */
78#define WM8523_LRCLK_INV_SHIFT 6 /* LRCLK_INV */
79#define WM8523_LRCLK_INV_WIDTH 1 /* LRCLK_INV */
80#define WM8523_BCLK_INV 0x0020 /* BCLK_INV */
81#define WM8523_BCLK_INV_MASK 0x0020 /* BCLK_INV */
82#define WM8523_BCLK_INV_SHIFT 5 /* BCLK_INV */
83#define WM8523_BCLK_INV_WIDTH 1 /* BCLK_INV */
84#define WM8523_WL_MASK 0x0018 /* WL - [4:3] */
85#define WM8523_WL_SHIFT 3 /* WL - [4:3] */
86#define WM8523_WL_WIDTH 2 /* WL - [4:3] */
87#define WM8523_FMT_MASK 0x0007 /* FMT - [2:0] */
88#define WM8523_FMT_SHIFT 0 /* FMT - [2:0] */
89#define WM8523_FMT_WIDTH 3 /* FMT - [2:0] */
90
91/*
92 * R4 (0x04) - AIF_CTRL2
93 */
94#define WM8523_DAC_OP_MUX_MASK 0x00C0 /* DAC_OP_MUX - [7:6] */
95#define WM8523_DAC_OP_MUX_SHIFT 6 /* DAC_OP_MUX - [7:6] */
96#define WM8523_DAC_OP_MUX_WIDTH 2 /* DAC_OP_MUX - [7:6] */
97#define WM8523_BCLKDIV_MASK 0x0038 /* BCLKDIV - [5:3] */
98#define WM8523_BCLKDIV_SHIFT 3 /* BCLKDIV - [5:3] */
99#define WM8523_BCLKDIV_WIDTH 3 /* BCLKDIV - [5:3] */
100#define WM8523_SR_MASK 0x0007 /* SR - [2:0] */
101#define WM8523_SR_SHIFT 0 /* SR - [2:0] */
102#define WM8523_SR_WIDTH 3 /* SR - [2:0] */
103
104/*
105 * R5 (0x05) - DAC_CTRL3
106 */
107#define WM8523_ZC 0x0010 /* ZC */
108#define WM8523_ZC_MASK 0x0010 /* ZC */
109#define WM8523_ZC_SHIFT 4 /* ZC */
110#define WM8523_ZC_WIDTH 1 /* ZC */
111#define WM8523_DACR 0x0008 /* DACR */
112#define WM8523_DACR_MASK 0x0008 /* DACR */
113#define WM8523_DACR_SHIFT 3 /* DACR */
114#define WM8523_DACR_WIDTH 1 /* DACR */
115#define WM8523_DACL 0x0004 /* DACL */
116#define WM8523_DACL_MASK 0x0004 /* DACL */
117#define WM8523_DACL_SHIFT 2 /* DACL */
118#define WM8523_DACL_WIDTH 1 /* DACL */
119#define WM8523_VOL_UP_RAMP 0x0002 /* VOL_UP_RAMP */
120#define WM8523_VOL_UP_RAMP_MASK 0x0002 /* VOL_UP_RAMP */
121#define WM8523_VOL_UP_RAMP_SHIFT 1 /* VOL_UP_RAMP */
122#define WM8523_VOL_UP_RAMP_WIDTH 1 /* VOL_UP_RAMP */
123#define WM8523_VOL_DOWN_RAMP 0x0001 /* VOL_DOWN_RAMP */
124#define WM8523_VOL_DOWN_RAMP_MASK 0x0001 /* VOL_DOWN_RAMP */
125#define WM8523_VOL_DOWN_RAMP_SHIFT 0 /* VOL_DOWN_RAMP */
126#define WM8523_VOL_DOWN_RAMP_WIDTH 1 /* VOL_DOWN_RAMP */
127
128/*
129 * R6 (0x06) - DAC_GAINL
130 */
131#define WM8523_DACL_VU 0x0200 /* DACL_VU */
132#define WM8523_DACL_VU_MASK 0x0200 /* DACL_VU */
133#define WM8523_DACL_VU_SHIFT 9 /* DACL_VU */
134#define WM8523_DACL_VU_WIDTH 1 /* DACL_VU */
135#define WM8523_DACL_VOL_MASK 0x01FF /* DACL_VOL - [8:0] */
136#define WM8523_DACL_VOL_SHIFT 0 /* DACL_VOL - [8:0] */
137#define WM8523_DACL_VOL_WIDTH 9 /* DACL_VOL - [8:0] */
138
139/*
140 * R7 (0x07) - DAC_GAINR
141 */
142#define WM8523_DACR_VU 0x0200 /* DACR_VU */
143#define WM8523_DACR_VU_MASK 0x0200 /* DACR_VU */
144#define WM8523_DACR_VU_SHIFT 9 /* DACR_VU */
145#define WM8523_DACR_VU_WIDTH 1 /* DACR_VU */
146#define WM8523_DACR_VOL_MASK 0x01FF /* DACR_VOL - [8:0] */
147#define WM8523_DACR_VOL_SHIFT 0 /* DACR_VOL - [8:0] */
148#define WM8523_DACR_VOL_WIDTH 9 /* DACR_VOL - [8:0] */
149
150/*
151 * R8 (0x08) - ZERO_DETECT
152 */
153#define WM8523_ZD_COUNT_MASK 0x0003 /* ZD_COUNT - [1:0] */
154#define WM8523_ZD_COUNT_SHIFT 0 /* ZD_COUNT - [1:0] */
155#define WM8523_ZD_COUNT_WIDTH 2 /* ZD_COUNT - [1:0] */
156
157extern struct snd_soc_dai wm8523_dai;
158extern struct snd_soc_codec_device soc_codec_dev_wm8523;
159
160#endif
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 86c4b24db817..6bded8c78150 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -24,6 +24,8 @@
24#include <linux/pm.h> 24#include <linux/pm.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/regulator/consumer.h>
28
27#include <sound/core.h> 29#include <sound/core.h>
28#include <sound/pcm.h> 30#include <sound/pcm.h>
29#include <sound/pcm_params.h> 31#include <sound/pcm_params.h>
@@ -187,82 +189,22 @@ struct pll_state {
187 unsigned int out; 189 unsigned int out;
188}; 190};
189 191
192#define WM8580_NUM_SUPPLIES 3
193static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
194 "AVDD",
195 "DVDD",
196 "PVDD",
197};
198
190/* codec private data */ 199/* codec private data */
191struct wm8580_priv { 200struct wm8580_priv {
192 struct snd_soc_codec codec; 201 struct snd_soc_codec codec;
202 struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
193 u16 reg_cache[WM8580_MAX_REGISTER + 1]; 203 u16 reg_cache[WM8580_MAX_REGISTER + 1];
194 struct pll_state a; 204 struct pll_state a;
195 struct pll_state b; 205 struct pll_state b;
196}; 206};
197 207
198
199/*
200 * read wm8580 register cache
201 */
202static inline unsigned int wm8580_read_reg_cache(struct snd_soc_codec *codec,
203 unsigned int reg)
204{
205 u16 *cache = codec->reg_cache;
206 BUG_ON(reg >= ARRAY_SIZE(wm8580_reg));
207 return cache[reg];
208}
209
210/*
211 * write wm8580 register cache
212 */
213static inline void wm8580_write_reg_cache(struct snd_soc_codec *codec,
214 unsigned int reg, unsigned int value)
215{
216 u16 *cache = codec->reg_cache;
217
218 cache[reg] = value;
219}
220
221/*
222 * write to the WM8580 register space
223 */
224static int wm8580_write(struct snd_soc_codec *codec, unsigned int reg,
225 unsigned int value)
226{
227 u8 data[2];
228
229 BUG_ON(reg >= ARRAY_SIZE(wm8580_reg));
230
231 /* Registers are 9 bits wide */
232 value &= 0x1ff;
233
234 switch (reg) {
235 case WM8580_RESET:
236 /* Uncached */
237 break;
238 default:
239 if (value == wm8580_read_reg_cache(codec, reg))
240 return 0;
241 }
242
243 /* data is
244 * D15..D9 WM8580 register offset
245 * D8...D0 register data
246 */
247 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
248 data[1] = value & 0x00ff;
249
250 wm8580_write_reg_cache(codec, reg, value);
251 if (codec->hw_write(codec->control_data, data, 2) == 2)
252 return 0;
253 else
254 return -EIO;
255}
256
257static inline unsigned int wm8580_read(struct snd_soc_codec *codec,
258 unsigned int reg)
259{
260 switch (reg) {
261 default:
262 return wm8580_read_reg_cache(codec, reg);
263 }
264}
265
266static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); 208static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
267 209
268static int wm8580_out_vu(struct snd_kcontrol *kcontrol, 210static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
@@ -271,25 +213,22 @@ static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
271 struct soc_mixer_control *mc = 213 struct soc_mixer_control *mc =
272 (struct soc_mixer_control *)kcontrol->private_value; 214 (struct soc_mixer_control *)kcontrol->private_value;
273 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 215 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
216 u16 *reg_cache = codec->reg_cache;
274 unsigned int reg = mc->reg; 217 unsigned int reg = mc->reg;
275 unsigned int reg2 = mc->rreg; 218 unsigned int reg2 = mc->rreg;
276 int ret; 219 int ret;
277 u16 val;
278 220
279 /* Clear the register cache so we write without VU set */ 221 /* Clear the register cache so we write without VU set */
280 wm8580_write_reg_cache(codec, reg, 0); 222 reg_cache[reg] = 0;
281 wm8580_write_reg_cache(codec, reg2, 0); 223 reg_cache[reg2] = 0;
282 224
283 ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); 225 ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
284 if (ret < 0) 226 if (ret < 0)
285 return ret; 227 return ret;
286 228
287 /* Now write again with the volume update bit set */ 229 /* Now write again with the volume update bit set */
288 val = wm8580_read_reg_cache(codec, reg); 230 snd_soc_update_bits(codec, reg, 0x100, 0x100);
289 wm8580_write(codec, reg, val | 0x0100); 231 snd_soc_update_bits(codec, reg2, 0x100, 0x100);
290
291 val = wm8580_read_reg_cache(codec, reg2);
292 wm8580_write(codec, reg2, val | 0x0100);
293 232
294 return 0; 233 return 0;
295} 234}
@@ -512,27 +451,27 @@ static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai,
512 /* Always disable the PLL - it is not safe to leave it running 451 /* Always disable the PLL - it is not safe to leave it running
513 * while reprogramming it. 452 * while reprogramming it.
514 */ 453 */
515 reg = wm8580_read(codec, WM8580_PWRDN2); 454 reg = snd_soc_read(codec, WM8580_PWRDN2);
516 wm8580_write(codec, WM8580_PWRDN2, reg | pwr_mask); 455 snd_soc_write(codec, WM8580_PWRDN2, reg | pwr_mask);
517 456
518 if (!freq_in || !freq_out) 457 if (!freq_in || !freq_out)
519 return 0; 458 return 0;
520 459
521 wm8580_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff); 460 snd_soc_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
522 wm8580_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0xff); 461 snd_soc_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0x1ff);
523 wm8580_write(codec, WM8580_PLLA3 + offset, 462 snd_soc_write(codec, WM8580_PLLA3 + offset,
524 (pll_div.k >> 18 & 0xf) | (pll_div.n << 4)); 463 (pll_div.k >> 18 & 0xf) | (pll_div.n << 4));
525 464
526 reg = wm8580_read(codec, WM8580_PLLA4 + offset); 465 reg = snd_soc_read(codec, WM8580_PLLA4 + offset);
527 reg &= ~0x3f; 466 reg &= ~0x1b;
528 reg |= pll_div.prescale | pll_div.postscale << 1 | 467 reg |= pll_div.prescale | pll_div.postscale << 1 |
529 pll_div.freqmode << 3; 468 pll_div.freqmode << 3;
530 469
531 wm8580_write(codec, WM8580_PLLA4 + offset, reg); 470 snd_soc_write(codec, WM8580_PLLA4 + offset, reg);
532 471
533 /* All done, turn it on */ 472 /* All done, turn it on */
534 reg = wm8580_read(codec, WM8580_PWRDN2); 473 reg = snd_soc_read(codec, WM8580_PWRDN2);
535 wm8580_write(codec, WM8580_PWRDN2, reg & ~pwr_mask); 474 snd_soc_write(codec, WM8580_PWRDN2, reg & ~pwr_mask);
536 475
537 return 0; 476 return 0;
538} 477}
@@ -547,7 +486,7 @@ static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
547 struct snd_soc_pcm_runtime *rtd = substream->private_data; 486 struct snd_soc_pcm_runtime *rtd = substream->private_data;
548 struct snd_soc_device *socdev = rtd->socdev; 487 struct snd_soc_device *socdev = rtd->socdev;
549 struct snd_soc_codec *codec = socdev->card->codec; 488 struct snd_soc_codec *codec = socdev->card->codec;
550 u16 paifb = wm8580_read(codec, WM8580_PAIF3 + dai->id); 489 u16 paifb = snd_soc_read(codec, WM8580_PAIF3 + dai->id);
551 490
552 paifb &= ~WM8580_AIF_LENGTH_MASK; 491 paifb &= ~WM8580_AIF_LENGTH_MASK;
553 /* bit size */ 492 /* bit size */
@@ -567,7 +506,7 @@ static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
567 return -EINVAL; 506 return -EINVAL;
568 } 507 }
569 508
570 wm8580_write(codec, WM8580_PAIF3 + dai->id, paifb); 509 snd_soc_write(codec, WM8580_PAIF3 + dai->id, paifb);
571 return 0; 510 return 0;
572} 511}
573 512
@@ -579,8 +518,8 @@ static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
579 unsigned int aifb; 518 unsigned int aifb;
580 int can_invert_lrclk; 519 int can_invert_lrclk;
581 520
582 aifa = wm8580_read(codec, WM8580_PAIF1 + codec_dai->id); 521 aifa = snd_soc_read(codec, WM8580_PAIF1 + codec_dai->id);
583 aifb = wm8580_read(codec, WM8580_PAIF3 + codec_dai->id); 522 aifb = snd_soc_read(codec, WM8580_PAIF3 + codec_dai->id);
584 523
585 aifb &= ~(WM8580_AIF_FMT_MASK | WM8580_AIF_LRP | WM8580_AIF_BCP); 524 aifb &= ~(WM8580_AIF_FMT_MASK | WM8580_AIF_LRP | WM8580_AIF_BCP);
586 525
@@ -646,8 +585,8 @@ static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
646 return -EINVAL; 585 return -EINVAL;
647 } 586 }
648 587
649 wm8580_write(codec, WM8580_PAIF1 + codec_dai->id, aifa); 588 snd_soc_write(codec, WM8580_PAIF1 + codec_dai->id, aifa);
650 wm8580_write(codec, WM8580_PAIF3 + codec_dai->id, aifb); 589 snd_soc_write(codec, WM8580_PAIF3 + codec_dai->id, aifb);
651 590
652 return 0; 591 return 0;
653} 592}
@@ -660,7 +599,7 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
660 599
661 switch (div_id) { 600 switch (div_id) {
662 case WM8580_MCLK: 601 case WM8580_MCLK:
663 reg = wm8580_read(codec, WM8580_PLLB4); 602 reg = snd_soc_read(codec, WM8580_PLLB4);
664 reg &= ~WM8580_PLLB4_MCLKOUTSRC_MASK; 603 reg &= ~WM8580_PLLB4_MCLKOUTSRC_MASK;
665 604
666 switch (div) { 605 switch (div) {
@@ -682,11 +621,11 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
682 default: 621 default:
683 return -EINVAL; 622 return -EINVAL;
684 } 623 }
685 wm8580_write(codec, WM8580_PLLB4, reg); 624 snd_soc_write(codec, WM8580_PLLB4, reg);
686 break; 625 break;
687 626
688 case WM8580_DAC_CLKSEL: 627 case WM8580_DAC_CLKSEL:
689 reg = wm8580_read(codec, WM8580_CLKSEL); 628 reg = snd_soc_read(codec, WM8580_CLKSEL);
690 reg &= ~WM8580_CLKSEL_DAC_CLKSEL_MASK; 629 reg &= ~WM8580_CLKSEL_DAC_CLKSEL_MASK;
691 630
692 switch (div) { 631 switch (div) {
@@ -704,11 +643,11 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
704 default: 643 default:
705 return -EINVAL; 644 return -EINVAL;
706 } 645 }
707 wm8580_write(codec, WM8580_CLKSEL, reg); 646 snd_soc_write(codec, WM8580_CLKSEL, reg);
708 break; 647 break;
709 648
710 case WM8580_CLKOUTSRC: 649 case WM8580_CLKOUTSRC:
711 reg = wm8580_read(codec, WM8580_PLLB4); 650 reg = snd_soc_read(codec, WM8580_PLLB4);
712 reg &= ~WM8580_PLLB4_CLKOUTSRC_MASK; 651 reg &= ~WM8580_PLLB4_CLKOUTSRC_MASK;
713 652
714 switch (div) { 653 switch (div) {
@@ -730,7 +669,7 @@ static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
730 default: 669 default:
731 return -EINVAL; 670 return -EINVAL;
732 } 671 }
733 wm8580_write(codec, WM8580_PLLB4, reg); 672 snd_soc_write(codec, WM8580_PLLB4, reg);
734 break; 673 break;
735 674
736 default: 675 default:
@@ -745,14 +684,14 @@ static int wm8580_digital_mute(struct snd_soc_dai *codec_dai, int mute)
745 struct snd_soc_codec *codec = codec_dai->codec; 684 struct snd_soc_codec *codec = codec_dai->codec;
746 unsigned int reg; 685 unsigned int reg;
747 686
748 reg = wm8580_read(codec, WM8580_DAC_CONTROL5); 687 reg = snd_soc_read(codec, WM8580_DAC_CONTROL5);
749 688
750 if (mute) 689 if (mute)
751 reg |= WM8580_DAC_CONTROL5_MUTEALL; 690 reg |= WM8580_DAC_CONTROL5_MUTEALL;
752 else 691 else
753 reg &= ~WM8580_DAC_CONTROL5_MUTEALL; 692 reg &= ~WM8580_DAC_CONTROL5_MUTEALL;
754 693
755 wm8580_write(codec, WM8580_DAC_CONTROL5, reg); 694 snd_soc_write(codec, WM8580_DAC_CONTROL5, reg);
756 695
757 return 0; 696 return 0;
758} 697}
@@ -769,20 +708,20 @@ static int wm8580_set_bias_level(struct snd_soc_codec *codec,
769 case SND_SOC_BIAS_STANDBY: 708 case SND_SOC_BIAS_STANDBY:
770 if (codec->bias_level == SND_SOC_BIAS_OFF) { 709 if (codec->bias_level == SND_SOC_BIAS_OFF) {
771 /* Power up and get individual control of the DACs */ 710 /* Power up and get individual control of the DACs */
772 reg = wm8580_read(codec, WM8580_PWRDN1); 711 reg = snd_soc_read(codec, WM8580_PWRDN1);
773 reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD); 712 reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
774 wm8580_write(codec, WM8580_PWRDN1, reg); 713 snd_soc_write(codec, WM8580_PWRDN1, reg);
775 714
776 /* Make VMID high impedence */ 715 /* Make VMID high impedence */
777 reg = wm8580_read(codec, WM8580_ADC_CONTROL1); 716 reg = snd_soc_read(codec, WM8580_ADC_CONTROL1);
778 reg &= ~0x100; 717 reg &= ~0x100;
779 wm8580_write(codec, WM8580_ADC_CONTROL1, reg); 718 snd_soc_write(codec, WM8580_ADC_CONTROL1, reg);
780 } 719 }
781 break; 720 break;
782 721
783 case SND_SOC_BIAS_OFF: 722 case SND_SOC_BIAS_OFF:
784 reg = wm8580_read(codec, WM8580_PWRDN1); 723 reg = snd_soc_read(codec, WM8580_PWRDN1);
785 wm8580_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN); 724 snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
786 break; 725 break;
787 } 726 }
788 codec->bias_level = level; 727 codec->bias_level = level;
@@ -893,7 +832,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8580 = {
893}; 832};
894EXPORT_SYMBOL_GPL(soc_codec_dev_wm8580); 833EXPORT_SYMBOL_GPL(soc_codec_dev_wm8580);
895 834
896static int wm8580_register(struct wm8580_priv *wm8580) 835static int wm8580_register(struct wm8580_priv *wm8580,
836 enum snd_soc_control_type control)
897{ 837{
898 int ret, i; 838 int ret, i;
899 struct snd_soc_codec *codec = &wm8580->codec; 839 struct snd_soc_codec *codec = &wm8580->codec;
@@ -911,8 +851,6 @@ static int wm8580_register(struct wm8580_priv *wm8580)
911 codec->private_data = wm8580; 851 codec->private_data = wm8580;
912 codec->name = "WM8580"; 852 codec->name = "WM8580";
913 codec->owner = THIS_MODULE; 853 codec->owner = THIS_MODULE;
914 codec->read = wm8580_read_reg_cache;
915 codec->write = wm8580_write;
916 codec->bias_level = SND_SOC_BIAS_OFF; 854 codec->bias_level = SND_SOC_BIAS_OFF;
917 codec->set_bias_level = wm8580_set_bias_level; 855 codec->set_bias_level = wm8580_set_bias_level;
918 codec->dai = wm8580_dai; 856 codec->dai = wm8580_dai;
@@ -922,11 +860,34 @@ static int wm8580_register(struct wm8580_priv *wm8580)
922 860
923 memcpy(codec->reg_cache, wm8580_reg, sizeof(wm8580_reg)); 861 memcpy(codec->reg_cache, wm8580_reg, sizeof(wm8580_reg));
924 862
863 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
864 if (ret < 0) {
865 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
866 goto err;
867 }
868
869 for (i = 0; i < ARRAY_SIZE(wm8580->supplies); i++)
870 wm8580->supplies[i].supply = wm8580_supply_names[i];
871
872 ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8580->supplies),
873 wm8580->supplies);
874 if (ret != 0) {
875 dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
876 goto err;
877 }
878
879 ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
880 wm8580->supplies);
881 if (ret != 0) {
882 dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
883 goto err_regulator_get;
884 }
885
925 /* Get the codec into a known state */ 886 /* Get the codec into a known state */
926 ret = wm8580_write(codec, WM8580_RESET, 0); 887 ret = snd_soc_write(codec, WM8580_RESET, 0);
927 if (ret != 0) { 888 if (ret != 0) {
928 dev_err(codec->dev, "Failed to reset codec: %d\n", ret); 889 dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
929 goto err; 890 goto err_regulator_enable;
930 } 891 }
931 892
932 for (i = 0; i < ARRAY_SIZE(wm8580_dai); i++) 893 for (i = 0; i < ARRAY_SIZE(wm8580_dai); i++)
@@ -939,7 +900,7 @@ static int wm8580_register(struct wm8580_priv *wm8580)
939 ret = snd_soc_register_codec(codec); 900 ret = snd_soc_register_codec(codec);
940 if (ret != 0) { 901 if (ret != 0) {
941 dev_err(codec->dev, "Failed to register codec: %d\n", ret); 902 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
942 goto err; 903 goto err_regulator_enable;
943 } 904 }
944 905
945 ret = snd_soc_register_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai)); 906 ret = snd_soc_register_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
@@ -952,6 +913,10 @@ static int wm8580_register(struct wm8580_priv *wm8580)
952 913
953err_codec: 914err_codec:
954 snd_soc_unregister_codec(codec); 915 snd_soc_unregister_codec(codec);
916err_regulator_enable:
917 regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
918err_regulator_get:
919 regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
955err: 920err:
956 kfree(wm8580); 921 kfree(wm8580);
957 return ret; 922 return ret;
@@ -962,6 +927,8 @@ static void wm8580_unregister(struct wm8580_priv *wm8580)
962 wm8580_set_bias_level(&wm8580->codec, SND_SOC_BIAS_OFF); 927 wm8580_set_bias_level(&wm8580->codec, SND_SOC_BIAS_OFF);
963 snd_soc_unregister_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai)); 928 snd_soc_unregister_dais(wm8580_dai, ARRAY_SIZE(wm8580_dai));
964 snd_soc_unregister_codec(&wm8580->codec); 929 snd_soc_unregister_codec(&wm8580->codec);
930 regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
931 regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
965 kfree(wm8580); 932 kfree(wm8580);
966 wm8580_codec = NULL; 933 wm8580_codec = NULL;
967} 934}
@@ -978,14 +945,13 @@ static int wm8580_i2c_probe(struct i2c_client *i2c,
978 return -ENOMEM; 945 return -ENOMEM;
979 946
980 codec = &wm8580->codec; 947 codec = &wm8580->codec;
981 codec->hw_write = (hw_write_t)i2c_master_send;
982 948
983 i2c_set_clientdata(i2c, wm8580); 949 i2c_set_clientdata(i2c, wm8580);
984 codec->control_data = i2c; 950 codec->control_data = i2c;
985 951
986 codec->dev = &i2c->dev; 952 codec->dev = &i2c->dev;
987 953
988 return wm8580_register(wm8580); 954 return wm8580_register(wm8580, SND_SOC_I2C);
989} 955}
990 956
991static int wm8580_i2c_remove(struct i2c_client *client) 957static int wm8580_i2c_remove(struct i2c_client *client)
@@ -995,6 +961,21 @@ static int wm8580_i2c_remove(struct i2c_client *client)
995 return 0; 961 return 0;
996} 962}
997 963
964#ifdef CONFIG_PM
965static int wm8580_i2c_suspend(struct i2c_client *client, pm_message_t msg)
966{
967 return snd_soc_suspend_device(&client->dev);
968}
969
970static int wm8580_i2c_resume(struct i2c_client *client)
971{
972 return snd_soc_resume_device(&client->dev);
973}
974#else
975#define wm8580_i2c_suspend NULL
976#define wm8580_i2c_resume NULL
977#endif
978
998static const struct i2c_device_id wm8580_i2c_id[] = { 979static const struct i2c_device_id wm8580_i2c_id[] = {
999 { "wm8580", 0 }, 980 { "wm8580", 0 },
1000 { } 981 { }
@@ -1008,6 +989,8 @@ static struct i2c_driver wm8580_i2c_driver = {
1008 }, 989 },
1009 .probe = wm8580_i2c_probe, 990 .probe = wm8580_i2c_probe,
1010 .remove = wm8580_i2c_remove, 991 .remove = wm8580_i2c_remove,
992 .suspend = wm8580_i2c_suspend,
993 .resume = wm8580_i2c_resume,
1011 .id_table = wm8580_i2c_id, 994 .id_table = wm8580_i2c_id,
1012}; 995};
1013#endif 996#endif
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index e7ff2121ede9..16e969a762c3 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -43,45 +43,6 @@ static const u16 wm8728_reg_defaults[] = {
43 0x100, 43 0x100,
44}; 44};
45 45
46static inline unsigned int wm8728_read_reg_cache(struct snd_soc_codec *codec,
47 unsigned int reg)
48{
49 u16 *cache = codec->reg_cache;
50 BUG_ON(reg >= ARRAY_SIZE(wm8728_reg_defaults));
51 return cache[reg];
52}
53
54static inline void wm8728_write_reg_cache(struct snd_soc_codec *codec,
55 u16 reg, unsigned int value)
56{
57 u16 *cache = codec->reg_cache;
58 BUG_ON(reg >= ARRAY_SIZE(wm8728_reg_defaults));
59 cache[reg] = value;
60}
61
62/*
63 * write to the WM8728 register space
64 */
65static int wm8728_write(struct snd_soc_codec *codec, unsigned int reg,
66 unsigned int value)
67{
68 u8 data[2];
69
70 /* data is
71 * D15..D9 WM8728 register offset
72 * D8...D0 register data
73 */
74 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
75 data[1] = value & 0x00ff;
76
77 wm8728_write_reg_cache(codec, reg, value);
78
79 if (codec->hw_write(codec->control_data, data, 2) == 2)
80 return 0;
81 else
82 return -EIO;
83}
84
85static const DECLARE_TLV_DB_SCALE(wm8728_tlv, -12750, 50, 1); 46static const DECLARE_TLV_DB_SCALE(wm8728_tlv, -12750, 50, 1);
86 47
87static const struct snd_kcontrol_new wm8728_snd_controls[] = { 48static const struct snd_kcontrol_new wm8728_snd_controls[] = {
@@ -121,12 +82,12 @@ static int wm8728_add_widgets(struct snd_soc_codec *codec)
121static int wm8728_mute(struct snd_soc_dai *dai, int mute) 82static int wm8728_mute(struct snd_soc_dai *dai, int mute)
122{ 83{
123 struct snd_soc_codec *codec = dai->codec; 84 struct snd_soc_codec *codec = dai->codec;
124 u16 mute_reg = wm8728_read_reg_cache(codec, WM8728_DACCTL); 85 u16 mute_reg = snd_soc_read(codec, WM8728_DACCTL);
125 86
126 if (mute) 87 if (mute)
127 wm8728_write(codec, WM8728_DACCTL, mute_reg | 1); 88 snd_soc_write(codec, WM8728_DACCTL, mute_reg | 1);
128 else 89 else
129 wm8728_write(codec, WM8728_DACCTL, mute_reg & ~1); 90 snd_soc_write(codec, WM8728_DACCTL, mute_reg & ~1);
130 91
131 return 0; 92 return 0;
132} 93}
@@ -138,7 +99,7 @@ static int wm8728_hw_params(struct snd_pcm_substream *substream,
138 struct snd_soc_pcm_runtime *rtd = substream->private_data; 99 struct snd_soc_pcm_runtime *rtd = substream->private_data;
139 struct snd_soc_device *socdev = rtd->socdev; 100 struct snd_soc_device *socdev = rtd->socdev;
140 struct snd_soc_codec *codec = socdev->card->codec; 101 struct snd_soc_codec *codec = socdev->card->codec;
141 u16 dac = wm8728_read_reg_cache(codec, WM8728_DACCTL); 102 u16 dac = snd_soc_read(codec, WM8728_DACCTL);
142 103
143 dac &= ~0x18; 104 dac &= ~0x18;
144 105
@@ -155,7 +116,7 @@ static int wm8728_hw_params(struct snd_pcm_substream *substream,
155 return -EINVAL; 116 return -EINVAL;
156 } 117 }
157 118
158 wm8728_write(codec, WM8728_DACCTL, dac); 119 snd_soc_write(codec, WM8728_DACCTL, dac);
159 120
160 return 0; 121 return 0;
161} 122}
@@ -164,7 +125,7 @@ static int wm8728_set_dai_fmt(struct snd_soc_dai *codec_dai,
164 unsigned int fmt) 125 unsigned int fmt)
165{ 126{
166 struct snd_soc_codec *codec = codec_dai->codec; 127 struct snd_soc_codec *codec = codec_dai->codec;
167 u16 iface = wm8728_read_reg_cache(codec, WM8728_IFCTL); 128 u16 iface = snd_soc_read(codec, WM8728_IFCTL);
168 129
169 /* Currently only I2S is supported by the driver, though the 130 /* Currently only I2S is supported by the driver, though the
170 * hardware is more flexible. 131 * hardware is more flexible.
@@ -204,7 +165,7 @@ static int wm8728_set_dai_fmt(struct snd_soc_dai *codec_dai,
204 return -EINVAL; 165 return -EINVAL;
205 } 166 }
206 167
207 wm8728_write(codec, WM8728_IFCTL, iface); 168 snd_soc_write(codec, WM8728_IFCTL, iface);
208 return 0; 169 return 0;
209} 170}
210 171
@@ -220,19 +181,19 @@ static int wm8728_set_bias_level(struct snd_soc_codec *codec,
220 case SND_SOC_BIAS_STANDBY: 181 case SND_SOC_BIAS_STANDBY:
221 if (codec->bias_level == SND_SOC_BIAS_OFF) { 182 if (codec->bias_level == SND_SOC_BIAS_OFF) {
222 /* Power everything up... */ 183 /* Power everything up... */
223 reg = wm8728_read_reg_cache(codec, WM8728_DACCTL); 184 reg = snd_soc_read(codec, WM8728_DACCTL);
224 wm8728_write(codec, WM8728_DACCTL, reg & ~0x4); 185 snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4);
225 186
226 /* ..then sync in the register cache. */ 187 /* ..then sync in the register cache. */
227 for (i = 0; i < ARRAY_SIZE(wm8728_reg_defaults); i++) 188 for (i = 0; i < ARRAY_SIZE(wm8728_reg_defaults); i++)
228 wm8728_write(codec, i, 189 snd_soc_write(codec, i,
229 wm8728_read_reg_cache(codec, i)); 190 snd_soc_read(codec, i));
230 } 191 }
231 break; 192 break;
232 193
233 case SND_SOC_BIAS_OFF: 194 case SND_SOC_BIAS_OFF:
234 reg = wm8728_read_reg_cache(codec, WM8728_DACCTL); 195 reg = snd_soc_read(codec, WM8728_DACCTL);
235 wm8728_write(codec, WM8728_DACCTL, reg | 0x4); 196 snd_soc_write(codec, WM8728_DACCTL, reg | 0x4);
236 break; 197 break;
237 } 198 }
238 codec->bias_level = level; 199 codec->bias_level = level;
@@ -287,15 +248,14 @@ static int wm8728_resume(struct platform_device *pdev)
287 * initialise the WM8728 driver 248 * initialise the WM8728 driver
288 * register the mixer and dsp interfaces with the kernel 249 * register the mixer and dsp interfaces with the kernel
289 */ 250 */
290static int wm8728_init(struct snd_soc_device *socdev) 251static int wm8728_init(struct snd_soc_device *socdev,
252 enum snd_soc_control_type control)
291{ 253{
292 struct snd_soc_codec *codec = socdev->card->codec; 254 struct snd_soc_codec *codec = socdev->card->codec;
293 int ret = 0; 255 int ret = 0;
294 256
295 codec->name = "WM8728"; 257 codec->name = "WM8728";
296 codec->owner = THIS_MODULE; 258 codec->owner = THIS_MODULE;
297 codec->read = wm8728_read_reg_cache;
298 codec->write = wm8728_write;
299 codec->set_bias_level = wm8728_set_bias_level; 259 codec->set_bias_level = wm8728_set_bias_level;
300 codec->dai = &wm8728_dai; 260 codec->dai = &wm8728_dai;
301 codec->num_dai = 1; 261 codec->num_dai = 1;
@@ -307,11 +267,18 @@ static int wm8728_init(struct snd_soc_device *socdev)
307 if (codec->reg_cache == NULL) 267 if (codec->reg_cache == NULL)
308 return -ENOMEM; 268 return -ENOMEM;
309 269
270 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
271 if (ret < 0) {
272 printk(KERN_ERR "wm8728: failed to configure cache I/O: %d\n",
273 ret);
274 goto err;
275 }
276
310 /* register pcms */ 277 /* register pcms */
311 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 278 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
312 if (ret < 0) { 279 if (ret < 0) {
313 printk(KERN_ERR "wm8728: failed to create pcms\n"); 280 printk(KERN_ERR "wm8728: failed to create pcms\n");
314 goto pcm_err; 281 goto err;
315 } 282 }
316 283
317 /* power on device */ 284 /* power on device */
@@ -331,7 +298,7 @@ static int wm8728_init(struct snd_soc_device *socdev)
331card_err: 298card_err:
332 snd_soc_free_pcms(socdev); 299 snd_soc_free_pcms(socdev);
333 snd_soc_dapm_free(socdev); 300 snd_soc_dapm_free(socdev);
334pcm_err: 301err:
335 kfree(codec->reg_cache); 302 kfree(codec->reg_cache);
336 return ret; 303 return ret;
337} 304}
@@ -357,7 +324,7 @@ static int wm8728_i2c_probe(struct i2c_client *i2c,
357 i2c_set_clientdata(i2c, codec); 324 i2c_set_clientdata(i2c, codec);
358 codec->control_data = i2c; 325 codec->control_data = i2c;
359 326
360 ret = wm8728_init(socdev); 327 ret = wm8728_init(socdev, SND_SOC_I2C);
361 if (ret < 0) 328 if (ret < 0)
362 pr_err("failed to initialise WM8728\n"); 329 pr_err("failed to initialise WM8728\n");
363 330
@@ -437,7 +404,7 @@ static int __devinit wm8728_spi_probe(struct spi_device *spi)
437 404
438 codec->control_data = spi; 405 codec->control_data = spi;
439 406
440 ret = wm8728_init(socdev); 407 ret = wm8728_init(socdev, SND_SOC_SPI);
441 if (ret < 0) 408 if (ret < 0)
442 dev_err(&spi->dev, "failed to initialise WM8728\n"); 409 dev_err(&spi->dev, "failed to initialise WM8728\n");
443 410
@@ -458,30 +425,6 @@ static struct spi_driver wm8728_spi_driver = {
458 .probe = wm8728_spi_probe, 425 .probe = wm8728_spi_probe,
459 .remove = __devexit_p(wm8728_spi_remove), 426 .remove = __devexit_p(wm8728_spi_remove),
460}; 427};
461
462static int wm8728_spi_write(struct spi_device *spi, const char *data, int len)
463{
464 struct spi_transfer t;
465 struct spi_message m;
466 u8 msg[2];
467
468 if (len <= 0)
469 return 0;
470
471 msg[0] = data[0];
472 msg[1] = data[1];
473
474 spi_message_init(&m);
475 memset(&t, 0, (sizeof t));
476
477 t.tx_buf = &msg[0];
478 t.len = len;
479
480 spi_message_add_tail(&t, &m);
481 spi_sync(spi, &m);
482
483 return len;
484}
485#endif /* CONFIG_SPI_MASTER */ 428#endif /* CONFIG_SPI_MASTER */
486 429
487static int wm8728_probe(struct platform_device *pdev) 430static int wm8728_probe(struct platform_device *pdev)
@@ -506,13 +449,11 @@ static int wm8728_probe(struct platform_device *pdev)
506 449
507#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 450#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
508 if (setup->i2c_address) { 451 if (setup->i2c_address) {
509 codec->hw_write = (hw_write_t)i2c_master_send;
510 ret = wm8728_add_i2c_device(pdev, setup); 452 ret = wm8728_add_i2c_device(pdev, setup);
511 } 453 }
512#endif 454#endif
513#if defined(CONFIG_SPI_MASTER) 455#if defined(CONFIG_SPI_MASTER)
514 if (setup->spi) { 456 if (setup->spi) {
515 codec->hw_write = (hw_write_t)wm8728_spi_write;
516 ret = spi_register_driver(&wm8728_spi_driver); 457 ret = spi_register_driver(&wm8728_spi_driver);
517 if (ret != 0) 458 if (ret != 0)
518 printk(KERN_ERR "can't add spi driver"); 459 printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 7a205876ef4f..d3fd4f28d96e 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -26,6 +26,7 @@
26#include <sound/soc.h> 26#include <sound/soc.h>
27#include <sound/soc-dapm.h> 27#include <sound/soc-dapm.h>
28#include <sound/initval.h> 28#include <sound/initval.h>
29#include <sound/tlv.h>
29 30
30#include "wm8731.h" 31#include "wm8731.h"
31 32
@@ -39,9 +40,6 @@ struct wm8731_priv {
39 unsigned int sysclk; 40 unsigned int sysclk;
40}; 41};
41 42
42#ifdef CONFIG_SPI_MASTER
43static int wm8731_spi_write(struct spi_device *spi, const char *data, int len);
44#endif
45 43
46/* 44/*
47 * wm8731 register cache 45 * wm8731 register cache
@@ -50,60 +48,12 @@ static int wm8731_spi_write(struct spi_device *spi, const char *data, int len);
50 * There is no point in caching the reset register 48 * There is no point in caching the reset register
51 */ 49 */
52static const u16 wm8731_reg[WM8731_CACHEREGNUM] = { 50static const u16 wm8731_reg[WM8731_CACHEREGNUM] = {
53 0x0097, 0x0097, 0x0079, 0x0079, 51 0x0097, 0x0097, 0x0079, 0x0079,
54 0x000a, 0x0008, 0x009f, 0x000a, 52 0x000a, 0x0008, 0x009f, 0x000a,
55 0x0000, 0x0000 53 0x0000, 0x0000
56}; 54};
57 55
58/* 56#define wm8731_reset(c) snd_soc_write(c, WM8731_RESET, 0)
59 * read wm8731 register cache
60 */
61static inline unsigned int wm8731_read_reg_cache(struct snd_soc_codec *codec,
62 unsigned int reg)
63{
64 u16 *cache = codec->reg_cache;
65 if (reg == WM8731_RESET)
66 return 0;
67 if (reg >= WM8731_CACHEREGNUM)
68 return -1;
69 return cache[reg];
70}
71
72/*
73 * write wm8731 register cache
74 */
75static inline void wm8731_write_reg_cache(struct snd_soc_codec *codec,
76 u16 reg, unsigned int value)
77{
78 u16 *cache = codec->reg_cache;
79 if (reg >= WM8731_CACHEREGNUM)
80 return;
81 cache[reg] = value;
82}
83
84/*
85 * write to the WM8731 register space
86 */
87static int wm8731_write(struct snd_soc_codec *codec, unsigned int reg,
88 unsigned int value)
89{
90 u8 data[2];
91
92 /* data is
93 * D15..D9 WM8731 register offset
94 * D8...D0 register data
95 */
96 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
97 data[1] = value & 0x00ff;
98
99 wm8731_write_reg_cache(codec, reg, value);
100 if (codec->hw_write(codec->control_data, data, 2) == 2)
101 return 0;
102 else
103 return -EIO;
104}
105
106#define wm8731_reset(c) wm8731_write(c, WM8731_RESET, 0)
107 57
108static const char *wm8731_input_select[] = {"Line In", "Mic"}; 58static const char *wm8731_input_select[] = {"Line In", "Mic"};
109static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"}; 59static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
@@ -113,20 +63,26 @@ static const struct soc_enum wm8731_enum[] = {
113 SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph), 63 SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph),
114}; 64};
115 65
66static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
67static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
68static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
69
116static const struct snd_kcontrol_new wm8731_snd_controls[] = { 70static const struct snd_kcontrol_new wm8731_snd_controls[] = {
117 71
118SOC_DOUBLE_R("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V, 72SOC_DOUBLE_R_TLV("Master Playback Volume", WM8731_LOUT1V, WM8731_ROUT1V,
119 0, 127, 0), 73 0, 127, 0, out_tlv),
120SOC_DOUBLE_R("Master Playback ZC Switch", WM8731_LOUT1V, WM8731_ROUT1V, 74SOC_DOUBLE_R("Master Playback ZC Switch", WM8731_LOUT1V, WM8731_ROUT1V,
121 7, 1, 0), 75 7, 1, 0),
122 76
123SOC_DOUBLE_R("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0), 77SOC_DOUBLE_R_TLV("Capture Volume", WM8731_LINVOL, WM8731_RINVOL, 0, 31, 0,
78 in_tlv),
124SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1), 79SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
125 80
126SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0), 81SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0),
127SOC_SINGLE("Capture Mic Switch", WM8731_APANA, 1, 1, 1), 82SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
128 83
129SOC_SINGLE("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1), 84SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
85 sidetone_tlv),
130 86
131SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1), 87SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
132SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0), 88SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
@@ -260,12 +216,12 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
260 struct snd_soc_device *socdev = rtd->socdev; 216 struct snd_soc_device *socdev = rtd->socdev;
261 struct snd_soc_codec *codec = socdev->card->codec; 217 struct snd_soc_codec *codec = socdev->card->codec;
262 struct wm8731_priv *wm8731 = codec->private_data; 218 struct wm8731_priv *wm8731 = codec->private_data;
263 u16 iface = wm8731_read_reg_cache(codec, WM8731_IFACE) & 0xfff3; 219 u16 iface = snd_soc_read(codec, WM8731_IFACE) & 0xfff3;
264 int i = get_coeff(wm8731->sysclk, params_rate(params)); 220 int i = get_coeff(wm8731->sysclk, params_rate(params));
265 u16 srate = (coeff_div[i].sr << 2) | 221 u16 srate = (coeff_div[i].sr << 2) |
266 (coeff_div[i].bosr << 1) | coeff_div[i].usb; 222 (coeff_div[i].bosr << 1) | coeff_div[i].usb;
267 223
268 wm8731_write(codec, WM8731_SRATE, srate); 224 snd_soc_write(codec, WM8731_SRATE, srate);
269 225
270 /* bit size */ 226 /* bit size */
271 switch (params_format(params)) { 227 switch (params_format(params)) {
@@ -279,7 +235,7 @@ static int wm8731_hw_params(struct snd_pcm_substream *substream,
279 break; 235 break;
280 } 236 }
281 237
282 wm8731_write(codec, WM8731_IFACE, iface); 238 snd_soc_write(codec, WM8731_IFACE, iface);
283 return 0; 239 return 0;
284} 240}
285 241
@@ -291,7 +247,7 @@ static int wm8731_pcm_prepare(struct snd_pcm_substream *substream,
291 struct snd_soc_codec *codec = socdev->card->codec; 247 struct snd_soc_codec *codec = socdev->card->codec;
292 248
293 /* set active */ 249 /* set active */
294 wm8731_write(codec, WM8731_ACTIVE, 0x0001); 250 snd_soc_write(codec, WM8731_ACTIVE, 0x0001);
295 251
296 return 0; 252 return 0;
297} 253}
@@ -306,19 +262,19 @@ static void wm8731_shutdown(struct snd_pcm_substream *substream,
306 /* deactivate */ 262 /* deactivate */
307 if (!codec->active) { 263 if (!codec->active) {
308 udelay(50); 264 udelay(50);
309 wm8731_write(codec, WM8731_ACTIVE, 0x0); 265 snd_soc_write(codec, WM8731_ACTIVE, 0x0);
310 } 266 }
311} 267}
312 268
313static int wm8731_mute(struct snd_soc_dai *dai, int mute) 269static int wm8731_mute(struct snd_soc_dai *dai, int mute)
314{ 270{
315 struct snd_soc_codec *codec = dai->codec; 271 struct snd_soc_codec *codec = dai->codec;
316 u16 mute_reg = wm8731_read_reg_cache(codec, WM8731_APDIGI) & 0xfff7; 272 u16 mute_reg = snd_soc_read(codec, WM8731_APDIGI) & 0xfff7;
317 273
318 if (mute) 274 if (mute)
319 wm8731_write(codec, WM8731_APDIGI, mute_reg | 0x8); 275 snd_soc_write(codec, WM8731_APDIGI, mute_reg | 0x8);
320 else 276 else
321 wm8731_write(codec, WM8731_APDIGI, mute_reg); 277 snd_soc_write(codec, WM8731_APDIGI, mute_reg);
322 return 0; 278 return 0;
323} 279}
324 280
@@ -396,7 +352,7 @@ static int wm8731_set_dai_fmt(struct snd_soc_dai *codec_dai,
396 } 352 }
397 353
398 /* set iface */ 354 /* set iface */
399 wm8731_write(codec, WM8731_IFACE, iface); 355 snd_soc_write(codec, WM8731_IFACE, iface);
400 return 0; 356 return 0;
401} 357}
402 358
@@ -412,12 +368,12 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
412 break; 368 break;
413 case SND_SOC_BIAS_STANDBY: 369 case SND_SOC_BIAS_STANDBY:
414 /* Clear PWROFF, gate CLKOUT, everything else as-is */ 370 /* Clear PWROFF, gate CLKOUT, everything else as-is */
415 reg = wm8731_read_reg_cache(codec, WM8731_PWR) & 0xff7f; 371 reg = snd_soc_read(codec, WM8731_PWR) & 0xff7f;
416 wm8731_write(codec, WM8731_PWR, reg | 0x0040); 372 snd_soc_write(codec, WM8731_PWR, reg | 0x0040);
417 break; 373 break;
418 case SND_SOC_BIAS_OFF: 374 case SND_SOC_BIAS_OFF:
419 wm8731_write(codec, WM8731_ACTIVE, 0x0); 375 snd_soc_write(codec, WM8731_ACTIVE, 0x0);
420 wm8731_write(codec, WM8731_PWR, 0xffff); 376 snd_soc_write(codec, WM8731_PWR, 0xffff);
421 break; 377 break;
422 } 378 }
423 codec->bias_level = level; 379 codec->bias_level = level;
@@ -457,15 +413,17 @@ struct snd_soc_dai wm8731_dai = {
457 .rates = WM8731_RATES, 413 .rates = WM8731_RATES,
458 .formats = WM8731_FORMATS,}, 414 .formats = WM8731_FORMATS,},
459 .ops = &wm8731_dai_ops, 415 .ops = &wm8731_dai_ops,
416 .symmetric_rates = 1,
460}; 417};
461EXPORT_SYMBOL_GPL(wm8731_dai); 418EXPORT_SYMBOL_GPL(wm8731_dai);
462 419
420#ifdef CONFIG_PM
463static int wm8731_suspend(struct platform_device *pdev, pm_message_t state) 421static int wm8731_suspend(struct platform_device *pdev, pm_message_t state)
464{ 422{
465 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 423 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
466 struct snd_soc_codec *codec = socdev->card->codec; 424 struct snd_soc_codec *codec = socdev->card->codec;
467 425
468 wm8731_write(codec, WM8731_ACTIVE, 0x0); 426 snd_soc_write(codec, WM8731_ACTIVE, 0x0);
469 wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF); 427 wm8731_set_bias_level(codec, SND_SOC_BIAS_OFF);
470 return 0; 428 return 0;
471} 429}
@@ -488,6 +446,10 @@ static int wm8731_resume(struct platform_device *pdev)
488 wm8731_set_bias_level(codec, codec->suspend_bias_level); 446 wm8731_set_bias_level(codec, codec->suspend_bias_level);
489 return 0; 447 return 0;
490} 448}
449#else
450#define wm8731_suspend NULL
451#define wm8731_resume NULL
452#endif
491 453
492static int wm8731_probe(struct platform_device *pdev) 454static int wm8731_probe(struct platform_device *pdev)
493{ 455{
@@ -547,15 +509,16 @@ struct snd_soc_codec_device soc_codec_dev_wm8731 = {
547}; 509};
548EXPORT_SYMBOL_GPL(soc_codec_dev_wm8731); 510EXPORT_SYMBOL_GPL(soc_codec_dev_wm8731);
549 511
550static int wm8731_register(struct wm8731_priv *wm8731) 512static int wm8731_register(struct wm8731_priv *wm8731,
513 enum snd_soc_control_type control)
551{ 514{
552 int ret; 515 int ret;
553 struct snd_soc_codec *codec = &wm8731->codec; 516 struct snd_soc_codec *codec = &wm8731->codec;
554 u16 reg;
555 517
556 if (wm8731_codec) { 518 if (wm8731_codec) {
557 dev_err(codec->dev, "Another WM8731 is registered\n"); 519 dev_err(codec->dev, "Another WM8731 is registered\n");
558 return -EINVAL; 520 ret = -EINVAL;
521 goto err;
559 } 522 }
560 523
561 mutex_init(&codec->mutex); 524 mutex_init(&codec->mutex);
@@ -565,8 +528,6 @@ static int wm8731_register(struct wm8731_priv *wm8731)
565 codec->private_data = wm8731; 528 codec->private_data = wm8731;
566 codec->name = "WM8731"; 529 codec->name = "WM8731";
567 codec->owner = THIS_MODULE; 530 codec->owner = THIS_MODULE;
568 codec->read = wm8731_read_reg_cache;
569 codec->write = wm8731_write;
570 codec->bias_level = SND_SOC_BIAS_OFF; 531 codec->bias_level = SND_SOC_BIAS_OFF;
571 codec->set_bias_level = wm8731_set_bias_level; 532 codec->set_bias_level = wm8731_set_bias_level;
572 codec->dai = &wm8731_dai; 533 codec->dai = &wm8731_dai;
@@ -576,10 +537,16 @@ static int wm8731_register(struct wm8731_priv *wm8731)
576 537
577 memcpy(codec->reg_cache, wm8731_reg, sizeof(wm8731_reg)); 538 memcpy(codec->reg_cache, wm8731_reg, sizeof(wm8731_reg));
578 539
540 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
541 if (ret < 0) {
542 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
543 goto err;
544 }
545
579 ret = wm8731_reset(codec); 546 ret = wm8731_reset(codec);
580 if (ret < 0) { 547 if (ret < 0) {
581 dev_err(codec->dev, "Failed to issue reset\n"); 548 dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
582 return ret; 549 goto err;
583 } 550 }
584 551
585 wm8731_dai.dev = codec->dev; 552 wm8731_dai.dev = codec->dev;
@@ -587,35 +554,36 @@ static int wm8731_register(struct wm8731_priv *wm8731)
587 wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 554 wm8731_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
588 555
589 /* Latch the update bits */ 556 /* Latch the update bits */
590 reg = wm8731_read_reg_cache(codec, WM8731_LOUT1V); 557 snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
591 wm8731_write(codec, WM8731_LOUT1V, reg & ~0x0100); 558 snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
592 reg = wm8731_read_reg_cache(codec, WM8731_ROUT1V); 559 snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
593 wm8731_write(codec, WM8731_ROUT1V, reg & ~0x0100); 560 snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
594 reg = wm8731_read_reg_cache(codec, WM8731_LINVOL);
595 wm8731_write(codec, WM8731_LINVOL, reg & ~0x0100);
596 reg = wm8731_read_reg_cache(codec, WM8731_RINVOL);
597 wm8731_write(codec, WM8731_RINVOL, reg & ~0x0100);
598 561
599 /* Disable bypass path by default */ 562 /* Disable bypass path by default */
600 reg = wm8731_read_reg_cache(codec, WM8731_APANA); 563 snd_soc_update_bits(codec, WM8731_APANA, 0x4, 0);
601 wm8731_write(codec, WM8731_APANA, reg & ~0x4);
602 564
603 wm8731_codec = codec; 565 wm8731_codec = codec;
604 566
605 ret = snd_soc_register_codec(codec); 567 ret = snd_soc_register_codec(codec);
606 if (ret != 0) { 568 if (ret != 0) {
607 dev_err(codec->dev, "Failed to register codec: %d\n", ret); 569 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
608 return ret; 570 goto err;
609 } 571 }
610 572
611 ret = snd_soc_register_dai(&wm8731_dai); 573 ret = snd_soc_register_dai(&wm8731_dai);
612 if (ret != 0) { 574 if (ret != 0) {
613 dev_err(codec->dev, "Failed to register DAI: %d\n", ret); 575 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
614 snd_soc_unregister_codec(codec); 576 snd_soc_unregister_codec(codec);
615 return ret; 577 goto err_codec;
616 } 578 }
617 579
618 return 0; 580 return 0;
581
582err_codec:
583 snd_soc_unregister_codec(codec);
584err:
585 kfree(wm8731);
586 return ret;
619} 587}
620 588
621static void wm8731_unregister(struct wm8731_priv *wm8731) 589static void wm8731_unregister(struct wm8731_priv *wm8731)
@@ -628,30 +596,6 @@ static void wm8731_unregister(struct wm8731_priv *wm8731)
628} 596}
629 597
630#if defined(CONFIG_SPI_MASTER) 598#if defined(CONFIG_SPI_MASTER)
631static int wm8731_spi_write(struct spi_device *spi, const char *data, int len)
632{
633 struct spi_transfer t;
634 struct spi_message m;
635 u8 msg[2];
636
637 if (len <= 0)
638 return 0;
639
640 msg[0] = data[0];
641 msg[1] = data[1];
642
643 spi_message_init(&m);
644 memset(&t, 0, (sizeof t));
645
646 t.tx_buf = &msg[0];
647 t.len = len;
648
649 spi_message_add_tail(&t, &m);
650 spi_sync(spi, &m);
651
652 return len;
653}
654
655static int __devinit wm8731_spi_probe(struct spi_device *spi) 599static int __devinit wm8731_spi_probe(struct spi_device *spi)
656{ 600{
657 struct snd_soc_codec *codec; 601 struct snd_soc_codec *codec;
@@ -663,12 +607,11 @@ static int __devinit wm8731_spi_probe(struct spi_device *spi)
663 607
664 codec = &wm8731->codec; 608 codec = &wm8731->codec;
665 codec->control_data = spi; 609 codec->control_data = spi;
666 codec->hw_write = (hw_write_t)wm8731_spi_write;
667 codec->dev = &spi->dev; 610 codec->dev = &spi->dev;
668 611
669 dev_set_drvdata(&spi->dev, wm8731); 612 dev_set_drvdata(&spi->dev, wm8731);
670 613
671 return wm8731_register(wm8731); 614 return wm8731_register(wm8731, SND_SOC_SPI);
672} 615}
673 616
674static int __devexit wm8731_spi_remove(struct spi_device *spi) 617static int __devexit wm8731_spi_remove(struct spi_device *spi)
@@ -680,6 +623,21 @@ static int __devexit wm8731_spi_remove(struct spi_device *spi)
680 return 0; 623 return 0;
681} 624}
682 625
626#ifdef CONFIG_PM
627static int wm8731_spi_suspend(struct spi_device *spi, pm_message_t msg)
628{
629 return snd_soc_suspend_device(&spi->dev);
630}
631
632static int wm8731_spi_resume(struct spi_device *spi)
633{
634 return snd_soc_resume_device(&spi->dev);
635}
636#else
637#define wm8731_spi_suspend NULL
638#define wm8731_spi_resume NULL
639#endif
640
683static struct spi_driver wm8731_spi_driver = { 641static struct spi_driver wm8731_spi_driver = {
684 .driver = { 642 .driver = {
685 .name = "wm8731", 643 .name = "wm8731",
@@ -687,6 +645,8 @@ static struct spi_driver wm8731_spi_driver = {
687 .owner = THIS_MODULE, 645 .owner = THIS_MODULE,
688 }, 646 },
689 .probe = wm8731_spi_probe, 647 .probe = wm8731_spi_probe,
648 .suspend = wm8731_spi_suspend,
649 .resume = wm8731_spi_resume,
690 .remove = __devexit_p(wm8731_spi_remove), 650 .remove = __devexit_p(wm8731_spi_remove),
691}; 651};
692#endif /* CONFIG_SPI_MASTER */ 652#endif /* CONFIG_SPI_MASTER */
@@ -703,14 +663,13 @@ static __devinit int wm8731_i2c_probe(struct i2c_client *i2c,
703 return -ENOMEM; 663 return -ENOMEM;
704 664
705 codec = &wm8731->codec; 665 codec = &wm8731->codec;
706 codec->hw_write = (hw_write_t)i2c_master_send;
707 666
708 i2c_set_clientdata(i2c, wm8731); 667 i2c_set_clientdata(i2c, wm8731);
709 codec->control_data = i2c; 668 codec->control_data = i2c;
710 669
711 codec->dev = &i2c->dev; 670 codec->dev = &i2c->dev;
712 671
713 return wm8731_register(wm8731); 672 return wm8731_register(wm8731, SND_SOC_I2C);
714} 673}
715 674
716static __devexit int wm8731_i2c_remove(struct i2c_client *client) 675static __devexit int wm8731_i2c_remove(struct i2c_client *client)
@@ -720,6 +679,21 @@ static __devexit int wm8731_i2c_remove(struct i2c_client *client)
720 return 0; 679 return 0;
721} 680}
722 681
682#ifdef CONFIG_PM
683static int wm8731_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
684{
685 return snd_soc_suspend_device(&i2c->dev);
686}
687
688static int wm8731_i2c_resume(struct i2c_client *i2c)
689{
690 return snd_soc_resume_device(&i2c->dev);
691}
692#else
693#define wm8731_i2c_suspend NULL
694#define wm8731_i2c_resume NULL
695#endif
696
723static const struct i2c_device_id wm8731_i2c_id[] = { 697static const struct i2c_device_id wm8731_i2c_id[] = {
724 { "wm8731", 0 }, 698 { "wm8731", 0 },
725 { } 699 { }
@@ -733,6 +707,8 @@ static struct i2c_driver wm8731_i2c_driver = {
733 }, 707 },
734 .probe = wm8731_i2c_probe, 708 .probe = wm8731_i2c_probe,
735 .remove = __devexit_p(wm8731_i2c_remove), 709 .remove = __devexit_p(wm8731_i2c_remove),
710 .suspend = wm8731_i2c_suspend,
711 .resume = wm8731_i2c_resume,
736 .id_table = wm8731_i2c_id, 712 .id_table = wm8731_i2c_id,
737}; 713};
738#endif 714#endif
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index b64509b01a49..4ba1e7e93fb4 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -55,50 +55,7 @@ static const u16 wm8750_reg[] = {
55 0x0079, 0x0079, 0x0079, /* 40 */ 55 0x0079, 0x0079, 0x0079, /* 40 */
56}; 56};
57 57
58/* 58#define wm8750_reset(c) snd_soc_write(c, WM8750_RESET, 0)
59 * read wm8750 register cache
60 */
61static inline unsigned int wm8750_read_reg_cache(struct snd_soc_codec *codec,
62 unsigned int reg)
63{
64 u16 *cache = codec->reg_cache;
65 if (reg > WM8750_CACHE_REGNUM)
66 return -1;
67 return cache[reg];
68}
69
70/*
71 * write wm8750 register cache
72 */
73static inline void wm8750_write_reg_cache(struct snd_soc_codec *codec,
74 unsigned int reg, unsigned int value)
75{
76 u16 *cache = codec->reg_cache;
77 if (reg > WM8750_CACHE_REGNUM)
78 return;
79 cache[reg] = value;
80}
81
82static int wm8750_write(struct snd_soc_codec *codec, unsigned int reg,
83 unsigned int value)
84{
85 u8 data[2];
86
87 /* data is
88 * D15..D9 WM8753 register offset
89 * D8...D0 register data
90 */
91 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
92 data[1] = value & 0x00ff;
93
94 wm8750_write_reg_cache(codec, reg, value);
95 if (codec->hw_write(codec->control_data, data, 2) == 2)
96 return 0;
97 else
98 return -EIO;
99}
100
101#define wm8750_reset(c) wm8750_write(c, WM8750_RESET, 0)
102 59
103/* 60/*
104 * WM8750 Controls 61 * WM8750 Controls
@@ -594,7 +551,7 @@ static int wm8750_set_dai_fmt(struct snd_soc_dai *codec_dai,
594 return -EINVAL; 551 return -EINVAL;
595 } 552 }
596 553
597 wm8750_write(codec, WM8750_IFACE, iface); 554 snd_soc_write(codec, WM8750_IFACE, iface);
598 return 0; 555 return 0;
599} 556}
600 557
@@ -606,8 +563,8 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
606 struct snd_soc_device *socdev = rtd->socdev; 563 struct snd_soc_device *socdev = rtd->socdev;
607 struct snd_soc_codec *codec = socdev->card->codec; 564 struct snd_soc_codec *codec = socdev->card->codec;
608 struct wm8750_priv *wm8750 = codec->private_data; 565 struct wm8750_priv *wm8750 = codec->private_data;
609 u16 iface = wm8750_read_reg_cache(codec, WM8750_IFACE) & 0x1f3; 566 u16 iface = snd_soc_read(codec, WM8750_IFACE) & 0x1f3;
610 u16 srate = wm8750_read_reg_cache(codec, WM8750_SRATE) & 0x1c0; 567 u16 srate = snd_soc_read(codec, WM8750_SRATE) & 0x1c0;
611 int coeff = get_coeff(wm8750->sysclk, params_rate(params)); 568 int coeff = get_coeff(wm8750->sysclk, params_rate(params));
612 569
613 /* bit size */ 570 /* bit size */
@@ -626,9 +583,9 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
626 } 583 }
627 584
628 /* set iface & srate */ 585 /* set iface & srate */
629 wm8750_write(codec, WM8750_IFACE, iface); 586 snd_soc_write(codec, WM8750_IFACE, iface);
630 if (coeff >= 0) 587 if (coeff >= 0)
631 wm8750_write(codec, WM8750_SRATE, srate | 588 snd_soc_write(codec, WM8750_SRATE, srate |
632 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); 589 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
633 590
634 return 0; 591 return 0;
@@ -637,35 +594,35 @@ static int wm8750_pcm_hw_params(struct snd_pcm_substream *substream,
637static int wm8750_mute(struct snd_soc_dai *dai, int mute) 594static int wm8750_mute(struct snd_soc_dai *dai, int mute)
638{ 595{
639 struct snd_soc_codec *codec = dai->codec; 596 struct snd_soc_codec *codec = dai->codec;
640 u16 mute_reg = wm8750_read_reg_cache(codec, WM8750_ADCDAC) & 0xfff7; 597 u16 mute_reg = snd_soc_read(codec, WM8750_ADCDAC) & 0xfff7;
641 598
642 if (mute) 599 if (mute)
643 wm8750_write(codec, WM8750_ADCDAC, mute_reg | 0x8); 600 snd_soc_write(codec, WM8750_ADCDAC, mute_reg | 0x8);
644 else 601 else
645 wm8750_write(codec, WM8750_ADCDAC, mute_reg); 602 snd_soc_write(codec, WM8750_ADCDAC, mute_reg);
646 return 0; 603 return 0;
647} 604}
648 605
649static int wm8750_set_bias_level(struct snd_soc_codec *codec, 606static int wm8750_set_bias_level(struct snd_soc_codec *codec,
650 enum snd_soc_bias_level level) 607 enum snd_soc_bias_level level)
651{ 608{
652 u16 pwr_reg = wm8750_read_reg_cache(codec, WM8750_PWR1) & 0xfe3e; 609 u16 pwr_reg = snd_soc_read(codec, WM8750_PWR1) & 0xfe3e;
653 610
654 switch (level) { 611 switch (level) {
655 case SND_SOC_BIAS_ON: 612 case SND_SOC_BIAS_ON:
656 /* set vmid to 50k and unmute dac */ 613 /* set vmid to 50k and unmute dac */
657 wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x00c0); 614 snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x00c0);
658 break; 615 break;
659 case SND_SOC_BIAS_PREPARE: 616 case SND_SOC_BIAS_PREPARE:
660 /* set vmid to 5k for quick power up */ 617 /* set vmid to 5k for quick power up */
661 wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x01c1); 618 snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
662 break; 619 break;
663 case SND_SOC_BIAS_STANDBY: 620 case SND_SOC_BIAS_STANDBY:
664 /* mute dac and set vmid to 500k, enable VREF */ 621 /* mute dac and set vmid to 500k, enable VREF */
665 wm8750_write(codec, WM8750_PWR1, pwr_reg | 0x0141); 622 snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x0141);
666 break; 623 break;
667 case SND_SOC_BIAS_OFF: 624 case SND_SOC_BIAS_OFF:
668 wm8750_write(codec, WM8750_PWR1, 0x0001); 625 snd_soc_write(codec, WM8750_PWR1, 0x0001);
669 break; 626 break;
670 } 627 }
671 codec->bias_level = level; 628 codec->bias_level = level;
@@ -754,15 +711,14 @@ static int wm8750_resume(struct platform_device *pdev)
754 * initialise the WM8750 driver 711 * initialise the WM8750 driver
755 * register the mixer and dsp interfaces with the kernel 712 * register the mixer and dsp interfaces with the kernel
756 */ 713 */
757static int wm8750_init(struct snd_soc_device *socdev) 714static int wm8750_init(struct snd_soc_device *socdev,
715 enum snd_soc_control_type control)
758{ 716{
759 struct snd_soc_codec *codec = socdev->card->codec; 717 struct snd_soc_codec *codec = socdev->card->codec;
760 int reg, ret = 0; 718 int reg, ret = 0;
761 719
762 codec->name = "WM8750"; 720 codec->name = "WM8750";
763 codec->owner = THIS_MODULE; 721 codec->owner = THIS_MODULE;
764 codec->read = wm8750_read_reg_cache;
765 codec->write = wm8750_write;
766 codec->set_bias_level = wm8750_set_bias_level; 722 codec->set_bias_level = wm8750_set_bias_level;
767 codec->dai = &wm8750_dai; 723 codec->dai = &wm8750_dai;
768 codec->num_dai = 1; 724 codec->num_dai = 1;
@@ -771,13 +727,23 @@ static int wm8750_init(struct snd_soc_device *socdev)
771 if (codec->reg_cache == NULL) 727 if (codec->reg_cache == NULL)
772 return -ENOMEM; 728 return -ENOMEM;
773 729
774 wm8750_reset(codec); 730 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
731 if (ret < 0) {
732 printk(KERN_ERR "wm8750: failed to set cache I/O: %d\n", ret);
733 goto err;
734 }
735
736 ret = wm8750_reset(codec);
737 if (ret < 0) {
738 printk(KERN_ERR "wm8750: failed to reset: %d\n", ret);
739 goto err;
740 }
775 741
776 /* register pcms */ 742 /* register pcms */
777 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 743 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
778 if (ret < 0) { 744 if (ret < 0) {
779 printk(KERN_ERR "wm8750: failed to create pcms\n"); 745 printk(KERN_ERR "wm8750: failed to create pcms\n");
780 goto pcm_err; 746 goto err;
781 } 747 }
782 748
783 /* charge output caps */ 749 /* charge output caps */
@@ -786,22 +752,22 @@ static int wm8750_init(struct snd_soc_device *socdev)
786 schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1000)); 752 schedule_delayed_work(&codec->delayed_work, msecs_to_jiffies(1000));
787 753
788 /* set the update bits */ 754 /* set the update bits */
789 reg = wm8750_read_reg_cache(codec, WM8750_LDAC); 755 reg = snd_soc_read(codec, WM8750_LDAC);
790 wm8750_write(codec, WM8750_LDAC, reg | 0x0100); 756 snd_soc_write(codec, WM8750_LDAC, reg | 0x0100);
791 reg = wm8750_read_reg_cache(codec, WM8750_RDAC); 757 reg = snd_soc_read(codec, WM8750_RDAC);
792 wm8750_write(codec, WM8750_RDAC, reg | 0x0100); 758 snd_soc_write(codec, WM8750_RDAC, reg | 0x0100);
793 reg = wm8750_read_reg_cache(codec, WM8750_LOUT1V); 759 reg = snd_soc_read(codec, WM8750_LOUT1V);
794 wm8750_write(codec, WM8750_LOUT1V, reg | 0x0100); 760 snd_soc_write(codec, WM8750_LOUT1V, reg | 0x0100);
795 reg = wm8750_read_reg_cache(codec, WM8750_ROUT1V); 761 reg = snd_soc_read(codec, WM8750_ROUT1V);
796 wm8750_write(codec, WM8750_ROUT1V, reg | 0x0100); 762 snd_soc_write(codec, WM8750_ROUT1V, reg | 0x0100);
797 reg = wm8750_read_reg_cache(codec, WM8750_LOUT2V); 763 reg = snd_soc_read(codec, WM8750_LOUT2V);
798 wm8750_write(codec, WM8750_LOUT2V, reg | 0x0100); 764 snd_soc_write(codec, WM8750_LOUT2V, reg | 0x0100);
799 reg = wm8750_read_reg_cache(codec, WM8750_ROUT2V); 765 reg = snd_soc_read(codec, WM8750_ROUT2V);
800 wm8750_write(codec, WM8750_ROUT2V, reg | 0x0100); 766 snd_soc_write(codec, WM8750_ROUT2V, reg | 0x0100);
801 reg = wm8750_read_reg_cache(codec, WM8750_LINVOL); 767 reg = snd_soc_read(codec, WM8750_LINVOL);
802 wm8750_write(codec, WM8750_LINVOL, reg | 0x0100); 768 snd_soc_write(codec, WM8750_LINVOL, reg | 0x0100);
803 reg = wm8750_read_reg_cache(codec, WM8750_RINVOL); 769 reg = snd_soc_read(codec, WM8750_RINVOL);
804 wm8750_write(codec, WM8750_RINVOL, reg | 0x0100); 770 snd_soc_write(codec, WM8750_RINVOL, reg | 0x0100);
805 771
806 snd_soc_add_controls(codec, wm8750_snd_controls, 772 snd_soc_add_controls(codec, wm8750_snd_controls,
807 ARRAY_SIZE(wm8750_snd_controls)); 773 ARRAY_SIZE(wm8750_snd_controls));
@@ -816,7 +782,7 @@ static int wm8750_init(struct snd_soc_device *socdev)
816card_err: 782card_err:
817 snd_soc_free_pcms(socdev); 783 snd_soc_free_pcms(socdev);
818 snd_soc_dapm_free(socdev); 784 snd_soc_dapm_free(socdev);
819pcm_err: 785err:
820 kfree(codec->reg_cache); 786 kfree(codec->reg_cache);
821 return ret; 787 return ret;
822} 788}
@@ -844,7 +810,7 @@ static int wm8750_i2c_probe(struct i2c_client *i2c,
844 i2c_set_clientdata(i2c, codec); 810 i2c_set_clientdata(i2c, codec);
845 codec->control_data = i2c; 811 codec->control_data = i2c;
846 812
847 ret = wm8750_init(socdev); 813 ret = wm8750_init(socdev, SND_SOC_I2C);
848 if (ret < 0) 814 if (ret < 0)
849 pr_err("failed to initialise WM8750\n"); 815 pr_err("failed to initialise WM8750\n");
850 816
@@ -924,7 +890,7 @@ static int __devinit wm8750_spi_probe(struct spi_device *spi)
924 890
925 codec->control_data = spi; 891 codec->control_data = spi;
926 892
927 ret = wm8750_init(socdev); 893 ret = wm8750_init(socdev, SND_SOC_SPI);
928 if (ret < 0) 894 if (ret < 0)
929 dev_err(&spi->dev, "failed to initialise WM8750\n"); 895 dev_err(&spi->dev, "failed to initialise WM8750\n");
930 896
@@ -945,30 +911,6 @@ static struct spi_driver wm8750_spi_driver = {
945 .probe = wm8750_spi_probe, 911 .probe = wm8750_spi_probe,
946 .remove = __devexit_p(wm8750_spi_remove), 912 .remove = __devexit_p(wm8750_spi_remove),
947}; 913};
948
949static int wm8750_spi_write(struct spi_device *spi, const char *data, int len)
950{
951 struct spi_transfer t;
952 struct spi_message m;
953 u8 msg[2];
954
955 if (len <= 0)
956 return 0;
957
958 msg[0] = data[0];
959 msg[1] = data[1];
960
961 spi_message_init(&m);
962 memset(&t, 0, (sizeof t));
963
964 t.tx_buf = &msg[0];
965 t.len = len;
966
967 spi_message_add_tail(&t, &m);
968 spi_sync(spi, &m);
969
970 return len;
971}
972#endif 914#endif
973 915
974static int wm8750_probe(struct platform_device *pdev) 916static int wm8750_probe(struct platform_device *pdev)
@@ -1002,13 +944,11 @@ static int wm8750_probe(struct platform_device *pdev)
1002 944
1003#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 945#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
1004 if (setup->i2c_address) { 946 if (setup->i2c_address) {
1005 codec->hw_write = (hw_write_t)i2c_master_send;
1006 ret = wm8750_add_i2c_device(pdev, setup); 947 ret = wm8750_add_i2c_device(pdev, setup);
1007 } 948 }
1008#endif 949#endif
1009#if defined(CONFIG_SPI_MASTER) 950#if defined(CONFIG_SPI_MASTER)
1010 if (setup->spi) { 951 if (setup->spi) {
1011 codec->hw_write = (hw_write_t)wm8750_spi_write;
1012 ret = spi_register_driver(&wm8750_spi_driver); 952 ret = spi_register_driver(&wm8750_spi_driver);
1013 if (ret != 0) 953 if (ret != 0)
1014 printk(KERN_ERR "can't add spi driver"); 954 printk(KERN_ERR "can't add spi driver");
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 49c4b2898aff..d80d414cfbbd 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1766,6 +1766,21 @@ static int wm8753_i2c_remove(struct i2c_client *client)
1766 return 0; 1766 return 0;
1767} 1767}
1768 1768
1769#ifdef CONFIG_PM
1770static int wm8753_i2c_suspend(struct i2c_client *client, pm_message_t msg)
1771{
1772 return snd_soc_suspend_device(&client->dev);
1773}
1774
1775static int wm8753_i2c_resume(struct i2c_client *client)
1776{
1777 return snd_soc_resume_device(&client->dev);
1778}
1779#else
1780#define wm8753_i2c_suspend NULL
1781#define wm8753_i2c_resume NULL
1782#endif
1783
1769static const struct i2c_device_id wm8753_i2c_id[] = { 1784static const struct i2c_device_id wm8753_i2c_id[] = {
1770 { "wm8753", 0 }, 1785 { "wm8753", 0 },
1771 { } 1786 { }
@@ -1779,6 +1794,8 @@ static struct i2c_driver wm8753_i2c_driver = {
1779 }, 1794 },
1780 .probe = wm8753_i2c_probe, 1795 .probe = wm8753_i2c_probe,
1781 .remove = wm8753_i2c_remove, 1796 .remove = wm8753_i2c_remove,
1797 .suspend = wm8753_i2c_suspend,
1798 .resume = wm8753_i2c_resume,
1782 .id_table = wm8753_i2c_id, 1799 .id_table = wm8753_i2c_id,
1783}; 1800};
1784#endif 1801#endif
@@ -1834,6 +1851,22 @@ static int __devexit wm8753_spi_remove(struct spi_device *spi)
1834 return 0; 1851 return 0;
1835} 1852}
1836 1853
1854#ifdef CONFIG_PM
1855static int wm8753_spi_suspend(struct spi_device *spi, pm_message_t msg)
1856{
1857 return snd_soc_suspend_device(&spi->dev);
1858}
1859
1860static int wm8753_spi_resume(struct spi_device *spi)
1861{
1862 return snd_soc_resume_device(&spi->dev);
1863}
1864
1865#else
1866#define wm8753_spi_suspend NULL
1867#define wm8753_spi_resume NULL
1868#endif
1869
1837static struct spi_driver wm8753_spi_driver = { 1870static struct spi_driver wm8753_spi_driver = {
1838 .driver = { 1871 .driver = {
1839 .name = "wm8753", 1872 .name = "wm8753",
@@ -1842,6 +1875,8 @@ static struct spi_driver wm8753_spi_driver = {
1842 }, 1875 },
1843 .probe = wm8753_spi_probe, 1876 .probe = wm8753_spi_probe,
1844 .remove = __devexit_p(wm8753_spi_remove), 1877 .remove = __devexit_p(wm8753_spi_remove),
1878 .suspend = wm8753_spi_suspend,
1879 .resume = wm8753_spi_resume,
1845}; 1880};
1846#endif 1881#endif
1847 1882
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
new file mode 100644
index 000000000000..a9829aa26e53
--- /dev/null
+++ b/sound/soc/codecs/wm8776.c
@@ -0,0 +1,744 @@
1/*
2 * wm8776.c -- WM8776 ALSA SoC Audio driver
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * TODO: Input ALC/limiter support
13 */
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19#include <linux/pm.h>
20#include <linux/i2c.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23#include <sound/core.h>
24#include <sound/pcm.h>
25#include <sound/pcm_params.h>
26#include <sound/soc.h>
27#include <sound/soc-dapm.h>
28#include <sound/initval.h>
29#include <sound/tlv.h>
30
31#include "wm8776.h"
32
33static struct snd_soc_codec *wm8776_codec;
34struct snd_soc_codec_device soc_codec_dev_wm8776;
35
36/* codec private data */
37struct wm8776_priv {
38 struct snd_soc_codec codec;
39 u16 reg_cache[WM8776_CACHEREGNUM];
40 int sysclk[2];
41};
42
43#ifdef CONFIG_SPI_MASTER
44static int wm8776_spi_write(struct spi_device *spi, const char *data, int len);
45#endif
46
47static const u16 wm8776_reg[WM8776_CACHEREGNUM] = {
48 0x79, 0x79, 0x79, 0xff, 0xff, /* 4 */
49 0xff, 0x00, 0x90, 0x00, 0x00, /* 9 */
50 0x22, 0x22, 0x22, 0x08, 0xcf, /* 14 */
51 0xcf, 0x7b, 0x00, 0x32, 0x00, /* 19 */
52 0xa6, 0x01, 0x01
53};
54
55static int wm8776_reset(struct snd_soc_codec *codec)
56{
57 return snd_soc_write(codec, WM8776_RESET, 0);
58}
59
60static const DECLARE_TLV_DB_SCALE(hp_tlv, -12100, 100, 1);
61static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
62static const DECLARE_TLV_DB_SCALE(adc_tlv, -10350, 50, 1);
63
64static const struct snd_kcontrol_new wm8776_snd_controls[] = {
65SOC_DOUBLE_R_TLV("Headphone Playback Volume", WM8776_HPLVOL, WM8776_HPRVOL,
66 0, 127, 0, hp_tlv),
67SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8776_DACLVOL, WM8776_DACRVOL,
68 0, 255, 0, dac_tlv),
69SOC_SINGLE("Digital Playback ZC Switch", WM8776_DACCTRL1, 0, 1, 0),
70
71SOC_SINGLE("Deemphasis Switch", WM8776_DACCTRL2, 0, 1, 0),
72
73SOC_DOUBLE_R_TLV("Capture Volume", WM8776_ADCLVOL, WM8776_ADCRVOL,
74 0, 255, 0, adc_tlv),
75SOC_DOUBLE("Capture Switch", WM8776_ADCMUX, 7, 6, 1, 1),
76SOC_DOUBLE_R("Capture ZC Switch", WM8776_ADCLVOL, WM8776_ADCRVOL, 8, 1, 0),
77SOC_SINGLE("Capture HPF Switch", WM8776_ADCIFCTRL, 8, 1, 1),
78};
79
80static const struct snd_kcontrol_new inmix_controls[] = {
81SOC_DAPM_SINGLE("AIN1 Switch", WM8776_ADCMUX, 0, 1, 0),
82SOC_DAPM_SINGLE("AIN2 Switch", WM8776_ADCMUX, 1, 1, 0),
83SOC_DAPM_SINGLE("AIN3 Switch", WM8776_ADCMUX, 2, 1, 0),
84SOC_DAPM_SINGLE("AIN4 Switch", WM8776_ADCMUX, 3, 1, 0),
85SOC_DAPM_SINGLE("AIN5 Switch", WM8776_ADCMUX, 4, 1, 0),
86};
87
88static const struct snd_kcontrol_new outmix_controls[] = {
89SOC_DAPM_SINGLE("DAC Switch", WM8776_OUTMUX, 0, 1, 0),
90SOC_DAPM_SINGLE("AUX Switch", WM8776_OUTMUX, 1, 1, 0),
91SOC_DAPM_SINGLE("Bypass Switch", WM8776_OUTMUX, 2, 1, 0),
92};
93
94static const struct snd_soc_dapm_widget wm8776_dapm_widgets[] = {
95SND_SOC_DAPM_INPUT("AUX"),
96SND_SOC_DAPM_INPUT("AUX"),
97
98SND_SOC_DAPM_INPUT("AIN1"),
99SND_SOC_DAPM_INPUT("AIN2"),
100SND_SOC_DAPM_INPUT("AIN3"),
101SND_SOC_DAPM_INPUT("AIN4"),
102SND_SOC_DAPM_INPUT("AIN5"),
103
104SND_SOC_DAPM_MIXER("Input Mixer", WM8776_PWRDOWN, 6, 1,
105 inmix_controls, ARRAY_SIZE(inmix_controls)),
106
107SND_SOC_DAPM_ADC("ADC", "Capture", WM8776_PWRDOWN, 1, 1),
108SND_SOC_DAPM_DAC("DAC", "Playback", WM8776_PWRDOWN, 2, 1),
109
110SND_SOC_DAPM_MIXER("Output Mixer", SND_SOC_NOPM, 0, 0,
111 outmix_controls, ARRAY_SIZE(outmix_controls)),
112
113SND_SOC_DAPM_PGA("Headphone PGA", WM8776_PWRDOWN, 3, 1, NULL, 0),
114
115SND_SOC_DAPM_OUTPUT("VOUT"),
116
117SND_SOC_DAPM_OUTPUT("HPOUTL"),
118SND_SOC_DAPM_OUTPUT("HPOUTR"),
119};
120
121static const struct snd_soc_dapm_route routes[] = {
122 { "Input Mixer", "AIN1 Switch", "AIN1" },
123 { "Input Mixer", "AIN2 Switch", "AIN2" },
124 { "Input Mixer", "AIN3 Switch", "AIN3" },
125 { "Input Mixer", "AIN4 Switch", "AIN4" },
126 { "Input Mixer", "AIN5 Switch", "AIN5" },
127
128 { "ADC", NULL, "Input Mixer" },
129
130 { "Output Mixer", "DAC Switch", "DAC" },
131 { "Output Mixer", "AUX Switch", "AUX" },
132 { "Output Mixer", "Bypass Switch", "Input Mixer" },
133
134 { "VOUT", NULL, "Output Mixer" },
135
136 { "Headphone PGA", NULL, "Output Mixer" },
137
138 { "HPOUTL", NULL, "Headphone PGA" },
139 { "HPOUTR", NULL, "Headphone PGA" },
140};
141
142static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
143{
144 struct snd_soc_codec *codec = dai->codec;
145 int reg, iface, master;
146
147 switch (dai->id) {
148 case WM8776_DAI_DAC:
149 reg = WM8776_DACIFCTRL;
150 master = 0x80;
151 break;
152 case WM8776_DAI_ADC:
153 reg = WM8776_ADCIFCTRL;
154 master = 0x100;
155 break;
156 default:
157 return -EINVAL;
158 }
159
160 iface = 0;
161
162 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
163 case SND_SOC_DAIFMT_CBM_CFM:
164 break;
165 case SND_SOC_DAIFMT_CBS_CFS:
166 master = 0;
167 break;
168 default:
169 return -EINVAL;
170 }
171
172 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
173 case SND_SOC_DAIFMT_I2S:
174 iface |= 0x0002;
175 break;
176 case SND_SOC_DAIFMT_RIGHT_J:
177 break;
178 case SND_SOC_DAIFMT_LEFT_J:
179 iface |= 0x0001;
180 break;
181 /* FIXME: CHECK A/B */
182 case SND_SOC_DAIFMT_DSP_A:
183 iface |= 0x0003;
184 break;
185 case SND_SOC_DAIFMT_DSP_B:
186 iface |= 0x0007;
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
193 case SND_SOC_DAIFMT_NB_NF:
194 break;
195 case SND_SOC_DAIFMT_IB_IF:
196 iface |= 0x00c;
197 break;
198 case SND_SOC_DAIFMT_IB_NF:
199 iface |= 0x008;
200 break;
201 case SND_SOC_DAIFMT_NB_IF:
202 iface |= 0x004;
203 break;
204 default:
205 return -EINVAL;
206 }
207
208 /* Finally, write out the values */
209 snd_soc_update_bits(codec, reg, 0xf, iface);
210 snd_soc_update_bits(codec, WM8776_MSTRCTRL, 0x180, master);
211
212 return 0;
213}
214
215static int mclk_ratios[] = {
216 128,
217 192,
218 256,
219 384,
220 512,
221 768,
222};
223
224static int wm8776_hw_params(struct snd_pcm_substream *substream,
225 struct snd_pcm_hw_params *params,
226 struct snd_soc_dai *dai)
227{
228 struct snd_soc_codec *codec = dai->codec;
229 struct wm8776_priv *wm8776 = codec->private_data;
230 int iface_reg, iface;
231 int ratio_shift, master;
232 int i;
233
234 iface = 0;
235
236 switch (dai->id) {
237 case WM8776_DAI_DAC:
238 iface_reg = WM8776_DACIFCTRL;
239 master = 0x80;
240 ratio_shift = 4;
241 break;
242 case WM8776_DAI_ADC:
243 iface_reg = WM8776_ADCIFCTRL;
244 master = 0x100;
245 ratio_shift = 0;
246 break;
247 default:
248 return -EINVAL;
249 }
250
251
252 /* Set word length */
253 switch (params_format(params)) {
254 case SNDRV_PCM_FORMAT_S16_LE:
255 break;
256 case SNDRV_PCM_FORMAT_S20_3LE:
257 iface |= 0x10;
258 break;
259 case SNDRV_PCM_FORMAT_S24_LE:
260 iface |= 0x20;
261 break;
262 case SNDRV_PCM_FORMAT_S32_LE:
263 iface |= 0x30;
264 break;
265 }
266
267 /* Only need to set MCLK/LRCLK ratio if we're master */
268 if (snd_soc_read(codec, WM8776_MSTRCTRL) & master) {
269 for (i = 0; i < ARRAY_SIZE(mclk_ratios); i++) {
270 if (wm8776->sysclk[dai->id] / params_rate(params)
271 == mclk_ratios[i])
272 break;
273 }
274
275 if (i == ARRAY_SIZE(mclk_ratios)) {
276 dev_err(codec->dev,
277 "Unable to configure MCLK ratio %d/%d\n",
278 wm8776->sysclk[dai->id], params_rate(params));
279 return -EINVAL;
280 }
281
282 dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
283
284 snd_soc_update_bits(codec, WM8776_MSTRCTRL,
285 0x7 << ratio_shift, i << ratio_shift);
286 } else {
287 dev_dbg(codec->dev, "DAI in slave mode\n");
288 }
289
290 snd_soc_update_bits(codec, iface_reg, 0x30, iface);
291
292 return 0;
293}
294
295static int wm8776_mute(struct snd_soc_dai *dai, int mute)
296{
297 struct snd_soc_codec *codec = dai->codec;
298
299 return snd_soc_write(codec, WM8776_DACMUTE, !!mute);
300}
301
302static int wm8776_set_sysclk(struct snd_soc_dai *dai,
303 int clk_id, unsigned int freq, int dir)
304{
305 struct snd_soc_codec *codec = dai->codec;
306 struct wm8776_priv *wm8776 = codec->private_data;
307
308 BUG_ON(dai->id >= ARRAY_SIZE(wm8776->sysclk));
309
310 wm8776->sysclk[dai->id] = freq;
311
312 return 0;
313}
314
315static int wm8776_set_bias_level(struct snd_soc_codec *codec,
316 enum snd_soc_bias_level level)
317{
318 switch (level) {
319 case SND_SOC_BIAS_ON:
320 break;
321 case SND_SOC_BIAS_PREPARE:
322 break;
323 case SND_SOC_BIAS_STANDBY:
324 if (codec->bias_level == SND_SOC_BIAS_OFF) {
325 /* Disable the global powerdown; DAPM does the rest */
326 snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0);
327 }
328
329 break;
330 case SND_SOC_BIAS_OFF:
331 snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 1);
332 break;
333 }
334
335 codec->bias_level = level;
336 return 0;
337}
338
339#define WM8776_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
340 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
341 SNDRV_PCM_RATE_96000)
342
343
344#define WM8776_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
345 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
346
347static struct snd_soc_dai_ops wm8776_dac_ops = {
348 .digital_mute = wm8776_mute,
349 .hw_params = wm8776_hw_params,
350 .set_fmt = wm8776_set_fmt,
351 .set_sysclk = wm8776_set_sysclk,
352};
353
354static struct snd_soc_dai_ops wm8776_adc_ops = {
355 .hw_params = wm8776_hw_params,
356 .set_fmt = wm8776_set_fmt,
357 .set_sysclk = wm8776_set_sysclk,
358};
359
360struct snd_soc_dai wm8776_dai[] = {
361 {
362 .name = "WM8776 Playback",
363 .id = WM8776_DAI_DAC,
364 .playback = {
365 .stream_name = "Playback",
366 .channels_min = 2,
367 .channels_max = 2,
368 .rates = WM8776_RATES,
369 .formats = WM8776_FORMATS,
370 },
371 .ops = &wm8776_dac_ops,
372 },
373 {
374 .name = "WM8776 Capture",
375 .id = WM8776_DAI_ADC,
376 .capture = {
377 .stream_name = "Capture",
378 .channels_min = 2,
379 .channels_max = 2,
380 .rates = WM8776_RATES,
381 .formats = WM8776_FORMATS,
382 },
383 .ops = &wm8776_adc_ops,
384 },
385};
386EXPORT_SYMBOL_GPL(wm8776_dai);
387
388#ifdef CONFIG_PM
389static int wm8776_suspend(struct platform_device *pdev, pm_message_t state)
390{
391 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
392 struct snd_soc_codec *codec = socdev->card->codec;
393
394 wm8776_set_bias_level(codec, SND_SOC_BIAS_OFF);
395
396 return 0;
397}
398
399static int wm8776_resume(struct platform_device *pdev)
400{
401 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
402 struct snd_soc_codec *codec = socdev->card->codec;
403 int i;
404 u8 data[2];
405 u16 *cache = codec->reg_cache;
406
407 /* Sync reg_cache with the hardware */
408 for (i = 0; i < ARRAY_SIZE(wm8776_reg); i++) {
409 data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
410 data[1] = cache[i] & 0x00ff;
411 codec->hw_write(codec->control_data, data, 2);
412 }
413
414 wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
415
416 return 0;
417}
418#else
419#define wm8776_suspend NULL
420#define wm8776_resume NULL
421#endif
422
423static int wm8776_probe(struct platform_device *pdev)
424{
425 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
426 struct snd_soc_codec *codec;
427 int ret = 0;
428
429 if (wm8776_codec == NULL) {
430 dev_err(&pdev->dev, "Codec device not registered\n");
431 return -ENODEV;
432 }
433
434 socdev->card->codec = wm8776_codec;
435 codec = wm8776_codec;
436
437 /* register pcms */
438 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
439 if (ret < 0) {
440 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
441 goto pcm_err;
442 }
443
444 snd_soc_add_controls(codec, wm8776_snd_controls,
445 ARRAY_SIZE(wm8776_snd_controls));
446 snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets,
447 ARRAY_SIZE(wm8776_dapm_widgets));
448 snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
449
450 ret = snd_soc_init_card(socdev);
451 if (ret < 0) {
452 dev_err(codec->dev, "failed to register card: %d\n", ret);
453 goto card_err;
454 }
455
456 return ret;
457
458card_err:
459 snd_soc_free_pcms(socdev);
460 snd_soc_dapm_free(socdev);
461pcm_err:
462 return ret;
463}
464
465/* power down chip */
466static int wm8776_remove(struct platform_device *pdev)
467{
468 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
469
470 snd_soc_free_pcms(socdev);
471 snd_soc_dapm_free(socdev);
472
473 return 0;
474}
475
476struct snd_soc_codec_device soc_codec_dev_wm8776 = {
477 .probe = wm8776_probe,
478 .remove = wm8776_remove,
479 .suspend = wm8776_suspend,
480 .resume = wm8776_resume,
481};
482EXPORT_SYMBOL_GPL(soc_codec_dev_wm8776);
483
484static int wm8776_register(struct wm8776_priv *wm8776,
485 enum snd_soc_control_type control)
486{
487 int ret, i;
488 struct snd_soc_codec *codec = &wm8776->codec;
489
490 if (wm8776_codec) {
491 dev_err(codec->dev, "Another WM8776 is registered\n");
492 ret = -EINVAL;
493 goto err;
494 }
495
496 mutex_init(&codec->mutex);
497 INIT_LIST_HEAD(&codec->dapm_widgets);
498 INIT_LIST_HEAD(&codec->dapm_paths);
499
500 codec->private_data = wm8776;
501 codec->name = "WM8776";
502 codec->owner = THIS_MODULE;
503 codec->bias_level = SND_SOC_BIAS_OFF;
504 codec->set_bias_level = wm8776_set_bias_level;
505 codec->dai = wm8776_dai;
506 codec->num_dai = ARRAY_SIZE(wm8776_dai);
507 codec->reg_cache_size = WM8776_CACHEREGNUM;
508 codec->reg_cache = &wm8776->reg_cache;
509
510 memcpy(codec->reg_cache, wm8776_reg, sizeof(wm8776_reg));
511
512 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
513 if (ret < 0) {
514 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
515 goto err;
516 }
517
518 for (i = 0; i < ARRAY_SIZE(wm8776_dai); i++)
519 wm8776_dai[i].dev = codec->dev;
520
521 ret = wm8776_reset(codec);
522 if (ret < 0) {
523 dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
524 goto err;
525 }
526
527 wm8776_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
528
529 /* Latch the update bits; right channel only since we always
530 * update both. */
531 snd_soc_update_bits(codec, WM8776_HPRVOL, 0x100, 0x100);
532 snd_soc_update_bits(codec, WM8776_DACRVOL, 0x100, 0x100);
533
534 wm8776_codec = codec;
535
536 ret = snd_soc_register_codec(codec);
537 if (ret != 0) {
538 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
539 goto err;
540 }
541
542 ret = snd_soc_register_dais(wm8776_dai, ARRAY_SIZE(wm8776_dai));
543 if (ret != 0) {
544 dev_err(codec->dev, "Failed to register DAIs: %d\n", ret);
545 goto err_codec;
546 }
547
548 return 0;
549
550err_codec:
551 snd_soc_unregister_codec(codec);
552err:
553 kfree(wm8776);
554 return ret;
555}
556
557static void wm8776_unregister(struct wm8776_priv *wm8776)
558{
559 wm8776_set_bias_level(&wm8776->codec, SND_SOC_BIAS_OFF);
560 snd_soc_unregister_dais(wm8776_dai, ARRAY_SIZE(wm8776_dai));
561 snd_soc_unregister_codec(&wm8776->codec);
562 kfree(wm8776);
563 wm8776_codec = NULL;
564}
565
566#if defined(CONFIG_SPI_MASTER)
567static int wm8776_spi_write(struct spi_device *spi, const char *data, int len)
568{
569 struct spi_transfer t;
570 struct spi_message m;
571 u8 msg[2];
572
573 if (len <= 0)
574 return 0;
575
576 msg[0] = data[0];
577 msg[1] = data[1];
578
579 spi_message_init(&m);
580 memset(&t, 0, (sizeof t));
581
582 t.tx_buf = &msg[0];
583 t.len = len;
584
585 spi_message_add_tail(&t, &m);
586 spi_sync(spi, &m);
587
588 return len;
589}
590
591static int __devinit wm8776_spi_probe(struct spi_device *spi)
592{
593 struct snd_soc_codec *codec;
594 struct wm8776_priv *wm8776;
595
596 wm8776 = kzalloc(sizeof(struct wm8776_priv), GFP_KERNEL);
597 if (wm8776 == NULL)
598 return -ENOMEM;
599
600 codec = &wm8776->codec;
601 codec->control_data = spi;
602 codec->hw_write = (hw_write_t)wm8776_spi_write;
603 codec->dev = &spi->dev;
604
605 dev_set_drvdata(&spi->dev, wm8776);
606
607 return wm8776_register(wm8776, SND_SOC_SPI);
608}
609
610static int __devexit wm8776_spi_remove(struct spi_device *spi)
611{
612 struct wm8776_priv *wm8776 = dev_get_drvdata(&spi->dev);
613
614 wm8776_unregister(wm8776);
615
616 return 0;
617}
618
619#ifdef CONFIG_PM
620static int wm8776_spi_suspend(struct spi_device *spi, pm_message_t msg)
621{
622 return snd_soc_suspend_device(&spi->dev);
623}
624
625static int wm8776_spi_resume(struct spi_device *spi)
626{
627 return snd_soc_resume_device(&spi->dev);
628}
629#else
630#define wm8776_spi_suspend NULL
631#define wm8776_spi_resume NULL
632#endif
633
634static struct spi_driver wm8776_spi_driver = {
635 .driver = {
636 .name = "wm8776",
637 .bus = &spi_bus_type,
638 .owner = THIS_MODULE,
639 },
640 .probe = wm8776_spi_probe,
641 .suspend = wm8776_spi_suspend,
642 .resume = wm8776_spi_resume,
643 .remove = __devexit_p(wm8776_spi_remove),
644};
645#endif /* CONFIG_SPI_MASTER */
646
647#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
648static __devinit int wm8776_i2c_probe(struct i2c_client *i2c,
649 const struct i2c_device_id *id)
650{
651 struct wm8776_priv *wm8776;
652 struct snd_soc_codec *codec;
653
654 wm8776 = kzalloc(sizeof(struct wm8776_priv), GFP_KERNEL);
655 if (wm8776 == NULL)
656 return -ENOMEM;
657
658 codec = &wm8776->codec;
659 codec->hw_write = (hw_write_t)i2c_master_send;
660
661 i2c_set_clientdata(i2c, wm8776);
662 codec->control_data = i2c;
663
664 codec->dev = &i2c->dev;
665
666 return wm8776_register(wm8776, SND_SOC_I2C);
667}
668
669static __devexit int wm8776_i2c_remove(struct i2c_client *client)
670{
671 struct wm8776_priv *wm8776 = i2c_get_clientdata(client);
672 wm8776_unregister(wm8776);
673 return 0;
674}
675
676#ifdef CONFIG_PM
677static int wm8776_i2c_suspend(struct i2c_client *i2c, pm_message_t msg)
678{
679 return snd_soc_suspend_device(&i2c->dev);
680}
681
682static int wm8776_i2c_resume(struct i2c_client *i2c)
683{
684 return snd_soc_resume_device(&i2c->dev);
685}
686#else
687#define wm8776_i2c_suspend NULL
688#define wm8776_i2c_resume NULL
689#endif
690
691static const struct i2c_device_id wm8776_i2c_id[] = {
692 { "wm8776", 0 },
693 { }
694};
695MODULE_DEVICE_TABLE(i2c, wm8776_i2c_id);
696
697static struct i2c_driver wm8776_i2c_driver = {
698 .driver = {
699 .name = "wm8776",
700 .owner = THIS_MODULE,
701 },
702 .probe = wm8776_i2c_probe,
703 .remove = __devexit_p(wm8776_i2c_remove),
704 .suspend = wm8776_i2c_suspend,
705 .resume = wm8776_i2c_resume,
706 .id_table = wm8776_i2c_id,
707};
708#endif
709
710static int __init wm8776_modinit(void)
711{
712 int ret;
713#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
714 ret = i2c_add_driver(&wm8776_i2c_driver);
715 if (ret != 0) {
716 printk(KERN_ERR "Failed to register WM8776 I2C driver: %d\n",
717 ret);
718 }
719#endif
720#if defined(CONFIG_SPI_MASTER)
721 ret = spi_register_driver(&wm8776_spi_driver);
722 if (ret != 0) {
723 printk(KERN_ERR "Failed to register WM8776 SPI driver: %d\n",
724 ret);
725 }
726#endif
727 return 0;
728}
729module_init(wm8776_modinit);
730
731static void __exit wm8776_exit(void)
732{
733#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
734 i2c_del_driver(&wm8776_i2c_driver);
735#endif
736#if defined(CONFIG_SPI_MASTER)
737 spi_unregister_driver(&wm8776_spi_driver);
738#endif
739}
740module_exit(wm8776_exit);
741
742MODULE_DESCRIPTION("ASoC WM8776 driver");
743MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
744MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8776.h b/sound/soc/codecs/wm8776.h
new file mode 100644
index 000000000000..6606d25d2d83
--- /dev/null
+++ b/sound/soc/codecs/wm8776.h
@@ -0,0 +1,51 @@
1/*
2 * wm8776.h -- WM8776 ASoC driver
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef _WM8776_H
14#define _WM8776_H
15
16/* Registers */
17
18#define WM8776_HPLVOL 0x00
19#define WM8776_HPRVOL 0x01
20#define WM8776_HPMASTER 0x02
21#define WM8776_DACLVOL 0x03
22#define WM8776_DACRVOL 0x04
23#define WM8776_DACMASTER 0x05
24#define WM8776_PHASESWAP 0x06
25#define WM8776_DACCTRL1 0x07
26#define WM8776_DACMUTE 0x08
27#define WM8776_DACCTRL2 0x09
28#define WM8776_DACIFCTRL 0x0a
29#define WM8776_ADCIFCTRL 0x0b
30#define WM8776_MSTRCTRL 0x0c
31#define WM8776_PWRDOWN 0x0d
32#define WM8776_ADCLVOL 0x0e
33#define WM8776_ADCRVOL 0x0f
34#define WM8776_ALCCTRL1 0x10
35#define WM8776_ALCCTRL2 0x11
36#define WM8776_ALCCTRL3 0x12
37#define WM8776_NOISEGATE 0x13
38#define WM8776_LIMITER 0x14
39#define WM8776_ADCMUX 0x15
40#define WM8776_OUTMUX 0x16
41#define WM8776_RESET 0x17
42
43#define WM8776_CACHEREGNUM 0x17
44
45#define WM8776_DAI_DAC 0
46#define WM8776_DAI_ADC 1
47
48extern struct snd_soc_dai wm8776_dai[];
49extern struct snd_soc_codec_device soc_codec_dev_wm8776;
50
51#endif
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index 3c78945244b8..5e9c855c0036 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -116,6 +116,7 @@
116#define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c 116#define WM8900_REG_CLOCKING2_DAC_CLKDIV 0x1c
117 117
118#define WM8900_REG_DACCTRL_MUTE 0x004 118#define WM8900_REG_DACCTRL_MUTE 0x004
119#define WM8900_REG_DACCTRL_DAC_SB_FILT 0x100
119#define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400 120#define WM8900_REG_DACCTRL_AIF_LRCLKRATE 0x400
120 121
121#define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800 122#define WM8900_REG_AUDIO3_ADCLRC_DIR 0x0800
@@ -182,111 +183,20 @@ static const u16 wm8900_reg_defaults[WM8900_MAXREG] = {
182 /* Remaining registers all zero */ 183 /* Remaining registers all zero */
183}; 184};
184 185
185/* 186static int wm8900_volatile_register(unsigned int reg)
186 * read wm8900 register cache
187 */
188static inline unsigned int wm8900_read_reg_cache(struct snd_soc_codec *codec,
189 unsigned int reg)
190{
191 u16 *cache = codec->reg_cache;
192
193 BUG_ON(reg >= WM8900_MAXREG);
194
195 if (reg == WM8900_REG_ID)
196 return 0;
197
198 return cache[reg];
199}
200
201/*
202 * write wm8900 register cache
203 */
204static inline void wm8900_write_reg_cache(struct snd_soc_codec *codec,
205 u16 reg, unsigned int value)
206{
207 u16 *cache = codec->reg_cache;
208
209 BUG_ON(reg >= WM8900_MAXREG);
210
211 cache[reg] = value;
212}
213
214/*
215 * write to the WM8900 register space
216 */
217static int wm8900_write(struct snd_soc_codec *codec, unsigned int reg,
218 unsigned int value)
219{
220 u8 data[3];
221
222 if (value == wm8900_read_reg_cache(codec, reg))
223 return 0;
224
225 /* data is
226 * D15..D9 WM8900 register offset
227 * D8...D0 register data
228 */
229 data[0] = reg;
230 data[1] = value >> 8;
231 data[2] = value & 0x00ff;
232
233 wm8900_write_reg_cache(codec, reg, value);
234 if (codec->hw_write(codec->control_data, data, 3) == 3)
235 return 0;
236 else
237 return -EIO;
238}
239
240/*
241 * Read from the wm8900.
242 */
243static unsigned int wm8900_chip_read(struct snd_soc_codec *codec, u8 reg)
244{
245 struct i2c_msg xfer[2];
246 u16 data;
247 int ret;
248 struct i2c_client *client = codec->control_data;
249
250 BUG_ON(reg != WM8900_REG_ID && reg != WM8900_REG_POWER1);
251
252 /* Write register */
253 xfer[0].addr = client->addr;
254 xfer[0].flags = 0;
255 xfer[0].len = 1;
256 xfer[0].buf = &reg;
257
258 /* Read data */
259 xfer[1].addr = client->addr;
260 xfer[1].flags = I2C_M_RD;
261 xfer[1].len = 2;
262 xfer[1].buf = (u8 *)&data;
263
264 ret = i2c_transfer(client->adapter, xfer, 2);
265 if (ret != 2) {
266 printk(KERN_CRIT "i2c_transfer returned %d\n", ret);
267 return 0;
268 }
269
270 return (data >> 8) | ((data & 0xff) << 8);
271}
272
273/*
274 * Read from the WM8900 register space. Most registers can't be read
275 * and are therefore supplied from cache.
276 */
277static unsigned int wm8900_read(struct snd_soc_codec *codec, unsigned int reg)
278{ 187{
279 switch (reg) { 188 switch (reg) {
280 case WM8900_REG_ID: 189 case WM8900_REG_ID:
281 return wm8900_chip_read(codec, reg); 190 case WM8900_REG_POWER1:
191 return 1;
282 default: 192 default:
283 return wm8900_read_reg_cache(codec, reg); 193 return 0;
284 } 194 }
285} 195}
286 196
287static void wm8900_reset(struct snd_soc_codec *codec) 197static void wm8900_reset(struct snd_soc_codec *codec)
288{ 198{
289 wm8900_write(codec, WM8900_REG_RESET, 0); 199 snd_soc_write(codec, WM8900_REG_RESET, 0);
290 200
291 memcpy(codec->reg_cache, wm8900_reg_defaults, 201 memcpy(codec->reg_cache, wm8900_reg_defaults,
292 sizeof(codec->reg_cache)); 202 sizeof(codec->reg_cache));
@@ -296,14 +206,14 @@ static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
296 struct snd_kcontrol *kcontrol, int event) 206 struct snd_kcontrol *kcontrol, int event)
297{ 207{
298 struct snd_soc_codec *codec = w->codec; 208 struct snd_soc_codec *codec = w->codec;
299 u16 hpctl1 = wm8900_read(codec, WM8900_REG_HPCTL1); 209 u16 hpctl1 = snd_soc_read(codec, WM8900_REG_HPCTL1);
300 210
301 switch (event) { 211 switch (event) {
302 case SND_SOC_DAPM_PRE_PMU: 212 case SND_SOC_DAPM_PRE_PMU:
303 /* Clamp headphone outputs */ 213 /* Clamp headphone outputs */
304 hpctl1 = WM8900_REG_HPCTL1_HP_CLAMP_IP | 214 hpctl1 = WM8900_REG_HPCTL1_HP_CLAMP_IP |
305 WM8900_REG_HPCTL1_HP_CLAMP_OP; 215 WM8900_REG_HPCTL1_HP_CLAMP_OP;
306 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 216 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
307 break; 217 break;
308 218
309 case SND_SOC_DAPM_POST_PMU: 219 case SND_SOC_DAPM_POST_PMU:
@@ -312,41 +222,41 @@ static int wm8900_hp_event(struct snd_soc_dapm_widget *w,
312 hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT | 222 hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT |
313 WM8900_REG_HPCTL1_HP_SHORT2 | 223 WM8900_REG_HPCTL1_HP_SHORT2 |
314 WM8900_REG_HPCTL1_HP_IPSTAGE_ENA; 224 WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
315 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 225 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
316 226
317 msleep(400); 227 msleep(400);
318 228
319 /* Enable the output stage */ 229 /* Enable the output stage */
320 hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_OP; 230 hpctl1 &= ~WM8900_REG_HPCTL1_HP_CLAMP_OP;
321 hpctl1 |= WM8900_REG_HPCTL1_HP_OPSTAGE_ENA; 231 hpctl1 |= WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
322 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 232 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
323 233
324 /* Remove the shorts */ 234 /* Remove the shorts */
325 hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT2; 235 hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT2;
326 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 236 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
327 hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT; 237 hpctl1 &= ~WM8900_REG_HPCTL1_HP_SHORT;
328 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 238 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
329 break; 239 break;
330 240
331 case SND_SOC_DAPM_PRE_PMD: 241 case SND_SOC_DAPM_PRE_PMD:
332 /* Short the output */ 242 /* Short the output */
333 hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT; 243 hpctl1 |= WM8900_REG_HPCTL1_HP_SHORT;
334 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 244 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
335 245
336 /* Disable the output stage */ 246 /* Disable the output stage */
337 hpctl1 &= ~WM8900_REG_HPCTL1_HP_OPSTAGE_ENA; 247 hpctl1 &= ~WM8900_REG_HPCTL1_HP_OPSTAGE_ENA;
338 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 248 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
339 249
340 /* Clamp the outputs and power down input */ 250 /* Clamp the outputs and power down input */
341 hpctl1 |= WM8900_REG_HPCTL1_HP_CLAMP_IP | 251 hpctl1 |= WM8900_REG_HPCTL1_HP_CLAMP_IP |
342 WM8900_REG_HPCTL1_HP_CLAMP_OP; 252 WM8900_REG_HPCTL1_HP_CLAMP_OP;
343 hpctl1 &= ~WM8900_REG_HPCTL1_HP_IPSTAGE_ENA; 253 hpctl1 &= ~WM8900_REG_HPCTL1_HP_IPSTAGE_ENA;
344 wm8900_write(codec, WM8900_REG_HPCTL1, hpctl1); 254 snd_soc_write(codec, WM8900_REG_HPCTL1, hpctl1);
345 break; 255 break;
346 256
347 case SND_SOC_DAPM_POST_PMD: 257 case SND_SOC_DAPM_POST_PMD:
348 /* Disable everything */ 258 /* Disable everything */
349 wm8900_write(codec, WM8900_REG_HPCTL1, 0); 259 snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
350 break; 260 break;
351 261
352 default: 262 default:
@@ -439,7 +349,6 @@ SOC_SINGLE("DAC Soft Mute Switch", WM8900_REG_DACCTRL, 6, 1, 1),
439SOC_ENUM("DAC Mute Rate", dac_mute_rate), 349SOC_ENUM("DAC Mute Rate", dac_mute_rate),
440SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0), 350SOC_SINGLE("DAC Mono Switch", WM8900_REG_DACCTRL, 9, 1, 0),
441SOC_ENUM("DAC Deemphasis", dac_deemphasis), 351SOC_ENUM("DAC Deemphasis", dac_deemphasis),
442SOC_SINGLE("DAC Sloping Stopband Filter Switch", WM8900_REG_DACCTRL, 8, 1, 0),
443SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL, 352SOC_SINGLE("DAC Sigma-Delta Modulator Clock Switch", WM8900_REG_DACCTRL,
444 12, 1, 0), 353 12, 1, 0),
445 354
@@ -723,7 +632,7 @@ static int wm8900_hw_params(struct snd_pcm_substream *substream,
723 struct snd_soc_codec *codec = socdev->card->codec; 632 struct snd_soc_codec *codec = socdev->card->codec;
724 u16 reg; 633 u16 reg;
725 634
726 reg = wm8900_read(codec, WM8900_REG_AUDIO1) & ~0x60; 635 reg = snd_soc_read(codec, WM8900_REG_AUDIO1) & ~0x60;
727 636
728 switch (params_format(params)) { 637 switch (params_format(params)) {
729 case SNDRV_PCM_FORMAT_S16_LE: 638 case SNDRV_PCM_FORMAT_S16_LE:
@@ -741,7 +650,18 @@ static int wm8900_hw_params(struct snd_pcm_substream *substream,
741 return -EINVAL; 650 return -EINVAL;
742 } 651 }
743 652
744 wm8900_write(codec, WM8900_REG_AUDIO1, reg); 653 snd_soc_write(codec, WM8900_REG_AUDIO1, reg);
654
655 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
656 reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
657
658 if (params_rate(params) <= 24000)
659 reg |= WM8900_REG_DACCTRL_DAC_SB_FILT;
660 else
661 reg &= ~WM8900_REG_DACCTRL_DAC_SB_FILT;
662
663 snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
664 }
745 665
746 return 0; 666 return 0;
747} 667}
@@ -834,18 +754,18 @@ static int wm8900_set_fll(struct snd_soc_codec *codec,
834 return 0; 754 return 0;
835 755
836 /* The digital side should be disabled during any change. */ 756 /* The digital side should be disabled during any change. */
837 reg = wm8900_read(codec, WM8900_REG_POWER1); 757 reg = snd_soc_read(codec, WM8900_REG_POWER1);
838 wm8900_write(codec, WM8900_REG_POWER1, 758 snd_soc_write(codec, WM8900_REG_POWER1,
839 reg & (~WM8900_REG_POWER1_FLL_ENA)); 759 reg & (~WM8900_REG_POWER1_FLL_ENA));
840 760
841 /* Disable the FLL? */ 761 /* Disable the FLL? */
842 if (!freq_in || !freq_out) { 762 if (!freq_in || !freq_out) {
843 reg = wm8900_read(codec, WM8900_REG_CLOCKING1); 763 reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
844 wm8900_write(codec, WM8900_REG_CLOCKING1, 764 snd_soc_write(codec, WM8900_REG_CLOCKING1,
845 reg & (~WM8900_REG_CLOCKING1_MCLK_SRC)); 765 reg & (~WM8900_REG_CLOCKING1_MCLK_SRC));
846 766
847 reg = wm8900_read(codec, WM8900_REG_FLLCTL1); 767 reg = snd_soc_read(codec, WM8900_REG_FLLCTL1);
848 wm8900_write(codec, WM8900_REG_FLLCTL1, 768 snd_soc_write(codec, WM8900_REG_FLLCTL1,
849 reg & (~WM8900_REG_FLLCTL1_OSC_ENA)); 769 reg & (~WM8900_REG_FLLCTL1_OSC_ENA));
850 770
851 wm8900->fll_in = freq_in; 771 wm8900->fll_in = freq_in;
@@ -862,33 +782,33 @@ static int wm8900_set_fll(struct snd_soc_codec *codec,
862 782
863 /* The osclilator *MUST* be enabled before we enable the 783 /* The osclilator *MUST* be enabled before we enable the
864 * digital circuit. */ 784 * digital circuit. */
865 wm8900_write(codec, WM8900_REG_FLLCTL1, 785 snd_soc_write(codec, WM8900_REG_FLLCTL1,
866 fll_div.fll_ratio | WM8900_REG_FLLCTL1_OSC_ENA); 786 fll_div.fll_ratio | WM8900_REG_FLLCTL1_OSC_ENA);
867 787
868 wm8900_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5); 788 snd_soc_write(codec, WM8900_REG_FLLCTL4, fll_div.n >> 5);
869 wm8900_write(codec, WM8900_REG_FLLCTL5, 789 snd_soc_write(codec, WM8900_REG_FLLCTL5,
870 (fll_div.fllclk_div << 6) | (fll_div.n & 0x1f)); 790 (fll_div.fllclk_div << 6) | (fll_div.n & 0x1f));
871 791
872 if (fll_div.k) { 792 if (fll_div.k) {
873 wm8900_write(codec, WM8900_REG_FLLCTL2, 793 snd_soc_write(codec, WM8900_REG_FLLCTL2,
874 (fll_div.k >> 8) | 0x100); 794 (fll_div.k >> 8) | 0x100);
875 wm8900_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff); 795 snd_soc_write(codec, WM8900_REG_FLLCTL3, fll_div.k & 0xff);
876 } else 796 } else
877 wm8900_write(codec, WM8900_REG_FLLCTL2, 0); 797 snd_soc_write(codec, WM8900_REG_FLLCTL2, 0);
878 798
879 if (fll_div.fll_slow_lock_ref) 799 if (fll_div.fll_slow_lock_ref)
880 wm8900_write(codec, WM8900_REG_FLLCTL6, 800 snd_soc_write(codec, WM8900_REG_FLLCTL6,
881 WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF); 801 WM8900_REG_FLLCTL6_FLL_SLOW_LOCK_REF);
882 else 802 else
883 wm8900_write(codec, WM8900_REG_FLLCTL6, 0); 803 snd_soc_write(codec, WM8900_REG_FLLCTL6, 0);
884 804
885 reg = wm8900_read(codec, WM8900_REG_POWER1); 805 reg = snd_soc_read(codec, WM8900_REG_POWER1);
886 wm8900_write(codec, WM8900_REG_POWER1, 806 snd_soc_write(codec, WM8900_REG_POWER1,
887 reg | WM8900_REG_POWER1_FLL_ENA); 807 reg | WM8900_REG_POWER1_FLL_ENA);
888 808
889reenable: 809reenable:
890 reg = wm8900_read(codec, WM8900_REG_CLOCKING1); 810 reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
891 wm8900_write(codec, WM8900_REG_CLOCKING1, 811 snd_soc_write(codec, WM8900_REG_CLOCKING1,
892 reg | WM8900_REG_CLOCKING1_MCLK_SRC); 812 reg | WM8900_REG_CLOCKING1_MCLK_SRC);
893 813
894 return 0; 814 return 0;
@@ -908,38 +828,38 @@ static int wm8900_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
908 828
909 switch (div_id) { 829 switch (div_id) {
910 case WM8900_BCLK_DIV: 830 case WM8900_BCLK_DIV:
911 reg = wm8900_read(codec, WM8900_REG_CLOCKING1); 831 reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
912 wm8900_write(codec, WM8900_REG_CLOCKING1, 832 snd_soc_write(codec, WM8900_REG_CLOCKING1,
913 div | (reg & WM8900_REG_CLOCKING1_BCLK_MASK)); 833 div | (reg & WM8900_REG_CLOCKING1_BCLK_MASK));
914 break; 834 break;
915 case WM8900_OPCLK_DIV: 835 case WM8900_OPCLK_DIV:
916 reg = wm8900_read(codec, WM8900_REG_CLOCKING1); 836 reg = snd_soc_read(codec, WM8900_REG_CLOCKING1);
917 wm8900_write(codec, WM8900_REG_CLOCKING1, 837 snd_soc_write(codec, WM8900_REG_CLOCKING1,
918 div | (reg & WM8900_REG_CLOCKING1_OPCLK_MASK)); 838 div | (reg & WM8900_REG_CLOCKING1_OPCLK_MASK));
919 break; 839 break;
920 case WM8900_DAC_LRCLK: 840 case WM8900_DAC_LRCLK:
921 reg = wm8900_read(codec, WM8900_REG_AUDIO4); 841 reg = snd_soc_read(codec, WM8900_REG_AUDIO4);
922 wm8900_write(codec, WM8900_REG_AUDIO4, 842 snd_soc_write(codec, WM8900_REG_AUDIO4,
923 div | (reg & WM8900_LRC_MASK)); 843 div | (reg & WM8900_LRC_MASK));
924 break; 844 break;
925 case WM8900_ADC_LRCLK: 845 case WM8900_ADC_LRCLK:
926 reg = wm8900_read(codec, WM8900_REG_AUDIO3); 846 reg = snd_soc_read(codec, WM8900_REG_AUDIO3);
927 wm8900_write(codec, WM8900_REG_AUDIO3, 847 snd_soc_write(codec, WM8900_REG_AUDIO3,
928 div | (reg & WM8900_LRC_MASK)); 848 div | (reg & WM8900_LRC_MASK));
929 break; 849 break;
930 case WM8900_DAC_CLKDIV: 850 case WM8900_DAC_CLKDIV:
931 reg = wm8900_read(codec, WM8900_REG_CLOCKING2); 851 reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
932 wm8900_write(codec, WM8900_REG_CLOCKING2, 852 snd_soc_write(codec, WM8900_REG_CLOCKING2,
933 div | (reg & WM8900_REG_CLOCKING2_DAC_CLKDIV)); 853 div | (reg & WM8900_REG_CLOCKING2_DAC_CLKDIV));
934 break; 854 break;
935 case WM8900_ADC_CLKDIV: 855 case WM8900_ADC_CLKDIV:
936 reg = wm8900_read(codec, WM8900_REG_CLOCKING2); 856 reg = snd_soc_read(codec, WM8900_REG_CLOCKING2);
937 wm8900_write(codec, WM8900_REG_CLOCKING2, 857 snd_soc_write(codec, WM8900_REG_CLOCKING2,
938 div | (reg & WM8900_REG_CLOCKING2_ADC_CLKDIV)); 858 div | (reg & WM8900_REG_CLOCKING2_ADC_CLKDIV));
939 break; 859 break;
940 case WM8900_LRCLK_MODE: 860 case WM8900_LRCLK_MODE:
941 reg = wm8900_read(codec, WM8900_REG_DACCTRL); 861 reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
942 wm8900_write(codec, WM8900_REG_DACCTRL, 862 snd_soc_write(codec, WM8900_REG_DACCTRL,
943 div | (reg & WM8900_REG_DACCTRL_AIF_LRCLKRATE)); 863 div | (reg & WM8900_REG_DACCTRL_AIF_LRCLKRATE));
944 break; 864 break;
945 default: 865 default:
@@ -956,10 +876,10 @@ static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai,
956 struct snd_soc_codec *codec = codec_dai->codec; 876 struct snd_soc_codec *codec = codec_dai->codec;
957 unsigned int clocking1, aif1, aif3, aif4; 877 unsigned int clocking1, aif1, aif3, aif4;
958 878
959 clocking1 = wm8900_read(codec, WM8900_REG_CLOCKING1); 879 clocking1 = snd_soc_read(codec, WM8900_REG_CLOCKING1);
960 aif1 = wm8900_read(codec, WM8900_REG_AUDIO1); 880 aif1 = snd_soc_read(codec, WM8900_REG_AUDIO1);
961 aif3 = wm8900_read(codec, WM8900_REG_AUDIO3); 881 aif3 = snd_soc_read(codec, WM8900_REG_AUDIO3);
962 aif4 = wm8900_read(codec, WM8900_REG_AUDIO4); 882 aif4 = snd_soc_read(codec, WM8900_REG_AUDIO4);
963 883
964 /* set master/slave audio interface */ 884 /* set master/slave audio interface */
965 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 885 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -1055,10 +975,10 @@ static int wm8900_set_dai_fmt(struct snd_soc_dai *codec_dai,
1055 return -EINVAL; 975 return -EINVAL;
1056 } 976 }
1057 977
1058 wm8900_write(codec, WM8900_REG_CLOCKING1, clocking1); 978 snd_soc_write(codec, WM8900_REG_CLOCKING1, clocking1);
1059 wm8900_write(codec, WM8900_REG_AUDIO1, aif1); 979 snd_soc_write(codec, WM8900_REG_AUDIO1, aif1);
1060 wm8900_write(codec, WM8900_REG_AUDIO3, aif3); 980 snd_soc_write(codec, WM8900_REG_AUDIO3, aif3);
1061 wm8900_write(codec, WM8900_REG_AUDIO4, aif4); 981 snd_soc_write(codec, WM8900_REG_AUDIO4, aif4);
1062 982
1063 return 0; 983 return 0;
1064} 984}
@@ -1068,14 +988,14 @@ static int wm8900_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1068 struct snd_soc_codec *codec = codec_dai->codec; 988 struct snd_soc_codec *codec = codec_dai->codec;
1069 u16 reg; 989 u16 reg;
1070 990
1071 reg = wm8900_read(codec, WM8900_REG_DACCTRL); 991 reg = snd_soc_read(codec, WM8900_REG_DACCTRL);
1072 992
1073 if (mute) 993 if (mute)
1074 reg |= WM8900_REG_DACCTRL_MUTE; 994 reg |= WM8900_REG_DACCTRL_MUTE;
1075 else 995 else
1076 reg &= ~WM8900_REG_DACCTRL_MUTE; 996 reg &= ~WM8900_REG_DACCTRL_MUTE;
1077 997
1078 wm8900_write(codec, WM8900_REG_DACCTRL, reg); 998 snd_soc_write(codec, WM8900_REG_DACCTRL, reg);
1079 999
1080 return 0; 1000 return 0;
1081} 1001}
@@ -1124,11 +1044,11 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
1124 switch (level) { 1044 switch (level) {
1125 case SND_SOC_BIAS_ON: 1045 case SND_SOC_BIAS_ON:
1126 /* Enable thermal shutdown */ 1046 /* Enable thermal shutdown */
1127 reg = wm8900_read(codec, WM8900_REG_GPIO); 1047 reg = snd_soc_read(codec, WM8900_REG_GPIO);
1128 wm8900_write(codec, WM8900_REG_GPIO, 1048 snd_soc_write(codec, WM8900_REG_GPIO,
1129 reg | WM8900_REG_GPIO_TEMP_ENA); 1049 reg | WM8900_REG_GPIO_TEMP_ENA);
1130 reg = wm8900_read(codec, WM8900_REG_ADDCTL); 1050 reg = snd_soc_read(codec, WM8900_REG_ADDCTL);
1131 wm8900_write(codec, WM8900_REG_ADDCTL, 1051 snd_soc_write(codec, WM8900_REG_ADDCTL,
1132 reg | WM8900_REG_ADDCTL_TEMP_SD); 1052 reg | WM8900_REG_ADDCTL_TEMP_SD);
1133 break; 1053 break;
1134 1054
@@ -1139,69 +1059,69 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
1139 /* Charge capacitors if initial power up */ 1059 /* Charge capacitors if initial power up */
1140 if (codec->bias_level == SND_SOC_BIAS_OFF) { 1060 if (codec->bias_level == SND_SOC_BIAS_OFF) {
1141 /* STARTUP_BIAS_ENA on */ 1061 /* STARTUP_BIAS_ENA on */
1142 wm8900_write(codec, WM8900_REG_POWER1, 1062 snd_soc_write(codec, WM8900_REG_POWER1,
1143 WM8900_REG_POWER1_STARTUP_BIAS_ENA); 1063 WM8900_REG_POWER1_STARTUP_BIAS_ENA);
1144 1064
1145 /* Startup bias mode */ 1065 /* Startup bias mode */
1146 wm8900_write(codec, WM8900_REG_ADDCTL, 1066 snd_soc_write(codec, WM8900_REG_ADDCTL,
1147 WM8900_REG_ADDCTL_BIAS_SRC | 1067 WM8900_REG_ADDCTL_BIAS_SRC |
1148 WM8900_REG_ADDCTL_VMID_SOFTST); 1068 WM8900_REG_ADDCTL_VMID_SOFTST);
1149 1069
1150 /* VMID 2x50k */ 1070 /* VMID 2x50k */
1151 wm8900_write(codec, WM8900_REG_POWER1, 1071 snd_soc_write(codec, WM8900_REG_POWER1,
1152 WM8900_REG_POWER1_STARTUP_BIAS_ENA | 0x1); 1072 WM8900_REG_POWER1_STARTUP_BIAS_ENA | 0x1);
1153 1073
1154 /* Allow capacitors to charge */ 1074 /* Allow capacitors to charge */
1155 schedule_timeout_interruptible(msecs_to_jiffies(400)); 1075 schedule_timeout_interruptible(msecs_to_jiffies(400));
1156 1076
1157 /* Enable bias */ 1077 /* Enable bias */
1158 wm8900_write(codec, WM8900_REG_POWER1, 1078 snd_soc_write(codec, WM8900_REG_POWER1,
1159 WM8900_REG_POWER1_STARTUP_BIAS_ENA | 1079 WM8900_REG_POWER1_STARTUP_BIAS_ENA |
1160 WM8900_REG_POWER1_BIAS_ENA | 0x1); 1080 WM8900_REG_POWER1_BIAS_ENA | 0x1);
1161 1081
1162 wm8900_write(codec, WM8900_REG_ADDCTL, 0); 1082 snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
1163 1083
1164 wm8900_write(codec, WM8900_REG_POWER1, 1084 snd_soc_write(codec, WM8900_REG_POWER1,
1165 WM8900_REG_POWER1_BIAS_ENA | 0x1); 1085 WM8900_REG_POWER1_BIAS_ENA | 0x1);
1166 } 1086 }
1167 1087
1168 reg = wm8900_read(codec, WM8900_REG_POWER1); 1088 reg = snd_soc_read(codec, WM8900_REG_POWER1);
1169 wm8900_write(codec, WM8900_REG_POWER1, 1089 snd_soc_write(codec, WM8900_REG_POWER1,
1170 (reg & WM8900_REG_POWER1_FLL_ENA) | 1090 (reg & WM8900_REG_POWER1_FLL_ENA) |
1171 WM8900_REG_POWER1_BIAS_ENA | 0x1); 1091 WM8900_REG_POWER1_BIAS_ENA | 0x1);
1172 wm8900_write(codec, WM8900_REG_POWER2, 1092 snd_soc_write(codec, WM8900_REG_POWER2,
1173 WM8900_REG_POWER2_SYSCLK_ENA); 1093 WM8900_REG_POWER2_SYSCLK_ENA);
1174 wm8900_write(codec, WM8900_REG_POWER3, 0); 1094 snd_soc_write(codec, WM8900_REG_POWER3, 0);
1175 break; 1095 break;
1176 1096
1177 case SND_SOC_BIAS_OFF: 1097 case SND_SOC_BIAS_OFF:
1178 /* Startup bias enable */ 1098 /* Startup bias enable */
1179 reg = wm8900_read(codec, WM8900_REG_POWER1); 1099 reg = snd_soc_read(codec, WM8900_REG_POWER1);
1180 wm8900_write(codec, WM8900_REG_POWER1, 1100 snd_soc_write(codec, WM8900_REG_POWER1,
1181 reg & WM8900_REG_POWER1_STARTUP_BIAS_ENA); 1101 reg & WM8900_REG_POWER1_STARTUP_BIAS_ENA);
1182 wm8900_write(codec, WM8900_REG_ADDCTL, 1102 snd_soc_write(codec, WM8900_REG_ADDCTL,
1183 WM8900_REG_ADDCTL_BIAS_SRC | 1103 WM8900_REG_ADDCTL_BIAS_SRC |
1184 WM8900_REG_ADDCTL_VMID_SOFTST); 1104 WM8900_REG_ADDCTL_VMID_SOFTST);
1185 1105
1186 /* Discharge caps */ 1106 /* Discharge caps */
1187 wm8900_write(codec, WM8900_REG_POWER1, 1107 snd_soc_write(codec, WM8900_REG_POWER1,
1188 WM8900_REG_POWER1_STARTUP_BIAS_ENA); 1108 WM8900_REG_POWER1_STARTUP_BIAS_ENA);
1189 schedule_timeout_interruptible(msecs_to_jiffies(500)); 1109 schedule_timeout_interruptible(msecs_to_jiffies(500));
1190 1110
1191 /* Remove clamp */ 1111 /* Remove clamp */
1192 wm8900_write(codec, WM8900_REG_HPCTL1, 0); 1112 snd_soc_write(codec, WM8900_REG_HPCTL1, 0);
1193 1113
1194 /* Power down */ 1114 /* Power down */
1195 wm8900_write(codec, WM8900_REG_ADDCTL, 0); 1115 snd_soc_write(codec, WM8900_REG_ADDCTL, 0);
1196 wm8900_write(codec, WM8900_REG_POWER1, 0); 1116 snd_soc_write(codec, WM8900_REG_POWER1, 0);
1197 wm8900_write(codec, WM8900_REG_POWER2, 0); 1117 snd_soc_write(codec, WM8900_REG_POWER2, 0);
1198 wm8900_write(codec, WM8900_REG_POWER3, 0); 1118 snd_soc_write(codec, WM8900_REG_POWER3, 0);
1199 1119
1200 /* Need to let things settle before stopping the clock 1120 /* Need to let things settle before stopping the clock
1201 * to ensure that restart works, see "Stopping the 1121 * to ensure that restart works, see "Stopping the
1202 * master clock" in the datasheet. */ 1122 * master clock" in the datasheet. */
1203 schedule_timeout_interruptible(msecs_to_jiffies(1)); 1123 schedule_timeout_interruptible(msecs_to_jiffies(1));
1204 wm8900_write(codec, WM8900_REG_POWER2, 1124 snd_soc_write(codec, WM8900_REG_POWER2,
1205 WM8900_REG_POWER2_SYSCLK_ENA); 1125 WM8900_REG_POWER2_SYSCLK_ENA);
1206 break; 1126 break;
1207 } 1127 }
@@ -1264,7 +1184,7 @@ static int wm8900_resume(struct platform_device *pdev)
1264 1184
1265 if (cache) { 1185 if (cache) {
1266 for (i = 0; i < WM8900_MAXREG; i++) 1186 for (i = 0; i < WM8900_MAXREG; i++)
1267 wm8900_write(codec, i, cache[i]); 1187 snd_soc_write(codec, i, cache[i]);
1268 kfree(cache); 1188 kfree(cache);
1269 } else 1189 } else
1270 dev_err(&pdev->dev, "Unable to allocate register cache\n"); 1190 dev_err(&pdev->dev, "Unable to allocate register cache\n");
@@ -1297,16 +1217,20 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
1297 1217
1298 codec->name = "WM8900"; 1218 codec->name = "WM8900";
1299 codec->owner = THIS_MODULE; 1219 codec->owner = THIS_MODULE;
1300 codec->read = wm8900_read;
1301 codec->write = wm8900_write;
1302 codec->dai = &wm8900_dai; 1220 codec->dai = &wm8900_dai;
1303 codec->num_dai = 1; 1221 codec->num_dai = 1;
1304 codec->hw_write = (hw_write_t)i2c_master_send;
1305 codec->control_data = i2c; 1222 codec->control_data = i2c;
1306 codec->set_bias_level = wm8900_set_bias_level; 1223 codec->set_bias_level = wm8900_set_bias_level;
1224 codec->volatile_register = wm8900_volatile_register;
1307 codec->dev = &i2c->dev; 1225 codec->dev = &i2c->dev;
1308 1226
1309 reg = wm8900_read(codec, WM8900_REG_ID); 1227 ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
1228 if (ret != 0) {
1229 dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret);
1230 goto err;
1231 }
1232
1233 reg = snd_soc_read(codec, WM8900_REG_ID);
1310 if (reg != 0x8900) { 1234 if (reg != 0x8900) {
1311 dev_err(&i2c->dev, "Device is not a WM8900 - ID %x\n", reg); 1235 dev_err(&i2c->dev, "Device is not a WM8900 - ID %x\n", reg);
1312 ret = -ENODEV; 1236 ret = -ENODEV;
@@ -1314,7 +1238,7 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
1314 } 1238 }
1315 1239
1316 /* Read back from the chip */ 1240 /* Read back from the chip */
1317 reg = wm8900_chip_read(codec, WM8900_REG_POWER1); 1241 reg = snd_soc_read(codec, WM8900_REG_POWER1);
1318 reg = (reg >> 12) & 0xf; 1242 reg = (reg >> 12) & 0xf;
1319 dev_info(&i2c->dev, "WM8900 revision %d\n", reg); 1243 dev_info(&i2c->dev, "WM8900 revision %d\n", reg);
1320 1244
@@ -1324,29 +1248,29 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c,
1324 wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1248 wm8900_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1325 1249
1326 /* Latch the volume update bits */ 1250 /* Latch the volume update bits */
1327 wm8900_write(codec, WM8900_REG_LINVOL, 1251 snd_soc_write(codec, WM8900_REG_LINVOL,
1328 wm8900_read(codec, WM8900_REG_LINVOL) | 0x100); 1252 snd_soc_read(codec, WM8900_REG_LINVOL) | 0x100);
1329 wm8900_write(codec, WM8900_REG_RINVOL, 1253 snd_soc_write(codec, WM8900_REG_RINVOL,
1330 wm8900_read(codec, WM8900_REG_RINVOL) | 0x100); 1254 snd_soc_read(codec, WM8900_REG_RINVOL) | 0x100);
1331 wm8900_write(codec, WM8900_REG_LOUT1CTL, 1255 snd_soc_write(codec, WM8900_REG_LOUT1CTL,
1332 wm8900_read(codec, WM8900_REG_LOUT1CTL) | 0x100); 1256 snd_soc_read(codec, WM8900_REG_LOUT1CTL) | 0x100);
1333 wm8900_write(codec, WM8900_REG_ROUT1CTL, 1257 snd_soc_write(codec, WM8900_REG_ROUT1CTL,
1334 wm8900_read(codec, WM8900_REG_ROUT1CTL) | 0x100); 1258 snd_soc_read(codec, WM8900_REG_ROUT1CTL) | 0x100);
1335 wm8900_write(codec, WM8900_REG_LOUT2CTL, 1259 snd_soc_write(codec, WM8900_REG_LOUT2CTL,
1336 wm8900_read(codec, WM8900_REG_LOUT2CTL) | 0x100); 1260 snd_soc_read(codec, WM8900_REG_LOUT2CTL) | 0x100);
1337 wm8900_write(codec, WM8900_REG_ROUT2CTL, 1261 snd_soc_write(codec, WM8900_REG_ROUT2CTL,
1338 wm8900_read(codec, WM8900_REG_ROUT2CTL) | 0x100); 1262 snd_soc_read(codec, WM8900_REG_ROUT2CTL) | 0x100);
1339 wm8900_write(codec, WM8900_REG_LDAC_DV, 1263 snd_soc_write(codec, WM8900_REG_LDAC_DV,
1340 wm8900_read(codec, WM8900_REG_LDAC_DV) | 0x100); 1264 snd_soc_read(codec, WM8900_REG_LDAC_DV) | 0x100);
1341 wm8900_write(codec, WM8900_REG_RDAC_DV, 1265 snd_soc_write(codec, WM8900_REG_RDAC_DV,
1342 wm8900_read(codec, WM8900_REG_RDAC_DV) | 0x100); 1266 snd_soc_read(codec, WM8900_REG_RDAC_DV) | 0x100);
1343 wm8900_write(codec, WM8900_REG_LADC_DV, 1267 snd_soc_write(codec, WM8900_REG_LADC_DV,
1344 wm8900_read(codec, WM8900_REG_LADC_DV) | 0x100); 1268 snd_soc_read(codec, WM8900_REG_LADC_DV) | 0x100);
1345 wm8900_write(codec, WM8900_REG_RADC_DV, 1269 snd_soc_write(codec, WM8900_REG_RADC_DV,
1346 wm8900_read(codec, WM8900_REG_RADC_DV) | 0x100); 1270 snd_soc_read(codec, WM8900_REG_RADC_DV) | 0x100);
1347 1271
1348 /* Set the DAC and mixer output bias */ 1272 /* Set the DAC and mixer output bias */
1349 wm8900_write(codec, WM8900_REG_OUTBIASCTL, 0x81); 1273 snd_soc_write(codec, WM8900_REG_OUTBIASCTL, 0x81);
1350 1274
1351 wm8900_dai.dev = &i2c->dev; 1275 wm8900_dai.dev = &i2c->dev;
1352 1276
@@ -1388,6 +1312,21 @@ static __devexit int wm8900_i2c_remove(struct i2c_client *client)
1388 return 0; 1312 return 0;
1389} 1313}
1390 1314
1315#ifdef CONFIG_PM
1316static int wm8900_i2c_suspend(struct i2c_client *client, pm_message_t msg)
1317{
1318 return snd_soc_suspend_device(&client->dev);
1319}
1320
1321static int wm8900_i2c_resume(struct i2c_client *client)
1322{
1323 return snd_soc_resume_device(&client->dev);
1324}
1325#else
1326#define wm8900_i2c_suspend NULL
1327#define wm8900_i2c_resume NULL
1328#endif
1329
1391static const struct i2c_device_id wm8900_i2c_id[] = { 1330static const struct i2c_device_id wm8900_i2c_id[] = {
1392 { "wm8900", 0 }, 1331 { "wm8900", 0 },
1393 { } 1332 { }
@@ -1401,6 +1340,8 @@ static struct i2c_driver wm8900_i2c_driver = {
1401 }, 1340 },
1402 .probe = wm8900_i2c_probe, 1341 .probe = wm8900_i2c_probe,
1403 .remove = __devexit_p(wm8900_i2c_remove), 1342 .remove = __devexit_p(wm8900_i2c_remove),
1343 .suspend = wm8900_i2c_suspend,
1344 .resume = wm8900_i2c_resume,
1404 .id_table = wm8900_i2c_id, 1345 .id_table = wm8900_i2c_id,
1405}; 1346};
1406 1347
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index e8d2e3e14c45..fe1307b500cf 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -225,94 +225,18 @@ struct wm8903_priv {
225 struct snd_pcm_substream *slave_substream; 225 struct snd_pcm_substream *slave_substream;
226}; 226};
227 227
228 228static int wm8903_volatile_register(unsigned int reg)
229static unsigned int wm8903_read_reg_cache(struct snd_soc_codec *codec,
230 unsigned int reg)
231{
232 u16 *cache = codec->reg_cache;
233
234 BUG_ON(reg >= ARRAY_SIZE(wm8903_reg_defaults));
235
236 return cache[reg];
237}
238
239static unsigned int wm8903_hw_read(struct snd_soc_codec *codec, u8 reg)
240{
241 struct i2c_msg xfer[2];
242 u16 data;
243 int ret;
244 struct i2c_client *client = codec->control_data;
245
246 /* Write register */
247 xfer[0].addr = client->addr;
248 xfer[0].flags = 0;
249 xfer[0].len = 1;
250 xfer[0].buf = &reg;
251
252 /* Read data */
253 xfer[1].addr = client->addr;
254 xfer[1].flags = I2C_M_RD;
255 xfer[1].len = 2;
256 xfer[1].buf = (u8 *)&data;
257
258 ret = i2c_transfer(client->adapter, xfer, 2);
259 if (ret != 2) {
260 pr_err("i2c_transfer returned %d\n", ret);
261 return 0;
262 }
263
264 return (data >> 8) | ((data & 0xff) << 8);
265}
266
267static unsigned int wm8903_read(struct snd_soc_codec *codec,
268 unsigned int reg)
269{ 229{
270 switch (reg) { 230 switch (reg) {
271 case WM8903_SW_RESET_AND_ID: 231 case WM8903_SW_RESET_AND_ID:
272 case WM8903_REVISION_NUMBER: 232 case WM8903_REVISION_NUMBER:
273 case WM8903_INTERRUPT_STATUS_1: 233 case WM8903_INTERRUPT_STATUS_1:
274 case WM8903_WRITE_SEQUENCER_4: 234 case WM8903_WRITE_SEQUENCER_4:
275 return wm8903_hw_read(codec, reg); 235 return 1;
276 236
277 default: 237 default:
278 return wm8903_read_reg_cache(codec, reg);
279 }
280}
281
282static void wm8903_write_reg_cache(struct snd_soc_codec *codec,
283 u16 reg, unsigned int value)
284{
285 u16 *cache = codec->reg_cache;
286
287 BUG_ON(reg >= ARRAY_SIZE(wm8903_reg_defaults));
288
289 switch (reg) {
290 case WM8903_SW_RESET_AND_ID:
291 case WM8903_REVISION_NUMBER:
292 break;
293
294 default:
295 cache[reg] = value;
296 break;
297 }
298}
299
300static int wm8903_write(struct snd_soc_codec *codec, unsigned int reg,
301 unsigned int value)
302{
303 u8 data[3];
304
305 wm8903_write_reg_cache(codec, reg, value);
306
307 /* Data format is 1 byte of address followed by 2 bytes of data */
308 data[0] = reg;
309 data[1] = (value >> 8) & 0xff;
310 data[2] = value & 0xff;
311
312 if (codec->hw_write(codec->control_data, data, 3) == 2)
313 return 0; 238 return 0;
314 else 239 }
315 return -EIO;
316} 240}
317 241
318static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start) 242static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
@@ -323,13 +247,13 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
323 BUG_ON(start > 48); 247 BUG_ON(start > 48);
324 248
325 /* Enable the sequencer */ 249 /* Enable the sequencer */
326 reg[0] = wm8903_read(codec, WM8903_WRITE_SEQUENCER_0); 250 reg[0] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_0);
327 reg[0] |= WM8903_WSEQ_ENA; 251 reg[0] |= WM8903_WSEQ_ENA;
328 wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]); 252 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, reg[0]);
329 253
330 dev_dbg(&i2c->dev, "Starting sequence at %d\n", start); 254 dev_dbg(&i2c->dev, "Starting sequence at %d\n", start);
331 255
332 wm8903_write(codec, WM8903_WRITE_SEQUENCER_3, 256 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_3,
333 start | WM8903_WSEQ_START); 257 start | WM8903_WSEQ_START);
334 258
335 /* Wait for it to complete. If we have the interrupt wired up then 259 /* Wait for it to complete. If we have the interrupt wired up then
@@ -339,13 +263,13 @@ static int wm8903_run_sequence(struct snd_soc_codec *codec, unsigned int start)
339 do { 263 do {
340 msleep(10); 264 msleep(10);
341 265
342 reg[4] = wm8903_read(codec, WM8903_WRITE_SEQUENCER_4); 266 reg[4] = snd_soc_read(codec, WM8903_WRITE_SEQUENCER_4);
343 } while (reg[4] & WM8903_WSEQ_BUSY); 267 } while (reg[4] & WM8903_WSEQ_BUSY);
344 268
345 dev_dbg(&i2c->dev, "Sequence complete\n"); 269 dev_dbg(&i2c->dev, "Sequence complete\n");
346 270
347 /* Disable the sequencer again */ 271 /* Disable the sequencer again */
348 wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, 272 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0,
349 reg[0] & ~WM8903_WSEQ_ENA); 273 reg[0] & ~WM8903_WSEQ_ENA);
350 274
351 return 0; 275 return 0;
@@ -357,12 +281,12 @@ static void wm8903_sync_reg_cache(struct snd_soc_codec *codec, u16 *cache)
357 281
358 /* There really ought to be something better we can do here :/ */ 282 /* There really ought to be something better we can do here :/ */
359 for (i = 0; i < ARRAY_SIZE(wm8903_reg_defaults); i++) 283 for (i = 0; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
360 cache[i] = wm8903_hw_read(codec, i); 284 cache[i] = codec->hw_read(codec, i);
361} 285}
362 286
363static void wm8903_reset(struct snd_soc_codec *codec) 287static void wm8903_reset(struct snd_soc_codec *codec)
364{ 288{
365 wm8903_write(codec, WM8903_SW_RESET_AND_ID, 0); 289 snd_soc_write(codec, WM8903_SW_RESET_AND_ID, 0);
366 memcpy(codec->reg_cache, wm8903_reg_defaults, 290 memcpy(codec->reg_cache, wm8903_reg_defaults,
367 sizeof(wm8903_reg_defaults)); 291 sizeof(wm8903_reg_defaults));
368} 292}
@@ -423,52 +347,52 @@ static int wm8903_output_event(struct snd_soc_dapm_widget *w,
423 } 347 }
424 348
425 if (event & SND_SOC_DAPM_PRE_PMU) { 349 if (event & SND_SOC_DAPM_PRE_PMU) {
426 val = wm8903_read(codec, reg); 350 val = snd_soc_read(codec, reg);
427 351
428 /* Short the output */ 352 /* Short the output */
429 val &= ~(WM8903_OUTPUT_SHORT << shift); 353 val &= ~(WM8903_OUTPUT_SHORT << shift);
430 wm8903_write(codec, reg, val); 354 snd_soc_write(codec, reg, val);
431 } 355 }
432 356
433 if (event & SND_SOC_DAPM_POST_PMU) { 357 if (event & SND_SOC_DAPM_POST_PMU) {
434 val = wm8903_read(codec, reg); 358 val = snd_soc_read(codec, reg);
435 359
436 val |= (WM8903_OUTPUT_IN << shift); 360 val |= (WM8903_OUTPUT_IN << shift);
437 wm8903_write(codec, reg, val); 361 snd_soc_write(codec, reg, val);
438 362
439 val |= (WM8903_OUTPUT_INT << shift); 363 val |= (WM8903_OUTPUT_INT << shift);
440 wm8903_write(codec, reg, val); 364 snd_soc_write(codec, reg, val);
441 365
442 /* Turn on the output ENA_OUTP */ 366 /* Turn on the output ENA_OUTP */
443 val |= (WM8903_OUTPUT_OUT << shift); 367 val |= (WM8903_OUTPUT_OUT << shift);
444 wm8903_write(codec, reg, val); 368 snd_soc_write(codec, reg, val);
445 369
446 /* Enable the DC servo */ 370 /* Enable the DC servo */
447 dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0); 371 dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
448 dcs_reg |= dcs_bit; 372 dcs_reg |= dcs_bit;
449 wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg); 373 snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
450 374
451 /* Remove the short */ 375 /* Remove the short */
452 val |= (WM8903_OUTPUT_SHORT << shift); 376 val |= (WM8903_OUTPUT_SHORT << shift);
453 wm8903_write(codec, reg, val); 377 snd_soc_write(codec, reg, val);
454 } 378 }
455 379
456 if (event & SND_SOC_DAPM_PRE_PMD) { 380 if (event & SND_SOC_DAPM_PRE_PMD) {
457 val = wm8903_read(codec, reg); 381 val = snd_soc_read(codec, reg);
458 382
459 /* Short the output */ 383 /* Short the output */
460 val &= ~(WM8903_OUTPUT_SHORT << shift); 384 val &= ~(WM8903_OUTPUT_SHORT << shift);
461 wm8903_write(codec, reg, val); 385 snd_soc_write(codec, reg, val);
462 386
463 /* Disable the DC servo */ 387 /* Disable the DC servo */
464 dcs_reg = wm8903_read(codec, WM8903_DC_SERVO_0); 388 dcs_reg = snd_soc_read(codec, WM8903_DC_SERVO_0);
465 dcs_reg &= ~dcs_bit; 389 dcs_reg &= ~dcs_bit;
466 wm8903_write(codec, WM8903_DC_SERVO_0, dcs_reg); 390 snd_soc_write(codec, WM8903_DC_SERVO_0, dcs_reg);
467 391
468 /* Then disable the intermediate and output stages */ 392 /* Then disable the intermediate and output stages */
469 val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT | 393 val &= ~((WM8903_OUTPUT_OUT | WM8903_OUTPUT_INT |
470 WM8903_OUTPUT_IN) << shift); 394 WM8903_OUTPUT_IN) << shift);
471 wm8903_write(codec, reg, val); 395 snd_soc_write(codec, reg, val);
472 } 396 }
473 397
474 return 0; 398 return 0;
@@ -492,13 +416,13 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
492 u16 reg; 416 u16 reg;
493 int ret; 417 int ret;
494 418
495 reg = wm8903_read(codec, WM8903_CLASS_W_0); 419 reg = snd_soc_read(codec, WM8903_CLASS_W_0);
496 420
497 /* Turn it off if we're about to enable bypass */ 421 /* Turn it off if we're about to enable bypass */
498 if (ucontrol->value.integer.value[0]) { 422 if (ucontrol->value.integer.value[0]) {
499 if (wm8903->class_w_users == 0) { 423 if (wm8903->class_w_users == 0) {
500 dev_dbg(&i2c->dev, "Disabling Class W\n"); 424 dev_dbg(&i2c->dev, "Disabling Class W\n");
501 wm8903_write(codec, WM8903_CLASS_W_0, reg & 425 snd_soc_write(codec, WM8903_CLASS_W_0, reg &
502 ~(WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V)); 426 ~(WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V));
503 } 427 }
504 wm8903->class_w_users++; 428 wm8903->class_w_users++;
@@ -511,7 +435,7 @@ static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
511 if (!ucontrol->value.integer.value[0]) { 435 if (!ucontrol->value.integer.value[0]) {
512 if (wm8903->class_w_users == 1) { 436 if (wm8903->class_w_users == 1) {
513 dev_dbg(&i2c->dev, "Enabling Class W\n"); 437 dev_dbg(&i2c->dev, "Enabling Class W\n");
514 wm8903_write(codec, WM8903_CLASS_W_0, reg | 438 snd_soc_write(codec, WM8903_CLASS_W_0, reg |
515 WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V); 439 WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
516 } 440 }
517 wm8903->class_w_users--; 441 wm8903->class_w_users--;
@@ -715,8 +639,6 @@ SOC_ENUM("DAC Soft Mute Rate", soft_mute),
715SOC_ENUM("DAC Mute Mode", mute_mode), 639SOC_ENUM("DAC Mute Mode", mute_mode),
716SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0), 640SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
717SOC_ENUM("DAC De-emphasis", dac_deemphasis), 641SOC_ENUM("DAC De-emphasis", dac_deemphasis),
718SOC_SINGLE("DAC Sloping Stopband Filter Switch",
719 WM8903_DAC_DIGITAL_1, 11, 1, 0),
720SOC_ENUM("DAC Companding Mode", dac_companding), 642SOC_ENUM("DAC Companding Mode", dac_companding),
721SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0), 643SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
722 644
@@ -1011,55 +933,55 @@ static int wm8903_set_bias_level(struct snd_soc_codec *codec,
1011 switch (level) { 933 switch (level) {
1012 case SND_SOC_BIAS_ON: 934 case SND_SOC_BIAS_ON:
1013 case SND_SOC_BIAS_PREPARE: 935 case SND_SOC_BIAS_PREPARE:
1014 reg = wm8903_read(codec, WM8903_VMID_CONTROL_0); 936 reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
1015 reg &= ~(WM8903_VMID_RES_MASK); 937 reg &= ~(WM8903_VMID_RES_MASK);
1016 reg |= WM8903_VMID_RES_50K; 938 reg |= WM8903_VMID_RES_50K;
1017 wm8903_write(codec, WM8903_VMID_CONTROL_0, reg); 939 snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
1018 break; 940 break;
1019 941
1020 case SND_SOC_BIAS_STANDBY: 942 case SND_SOC_BIAS_STANDBY:
1021 if (codec->bias_level == SND_SOC_BIAS_OFF) { 943 if (codec->bias_level == SND_SOC_BIAS_OFF) {
1022 wm8903_write(codec, WM8903_CLOCK_RATES_2, 944 snd_soc_write(codec, WM8903_CLOCK_RATES_2,
1023 WM8903_CLK_SYS_ENA); 945 WM8903_CLK_SYS_ENA);
1024 946
1025 /* Change DC servo dither level in startup sequence */ 947 /* Change DC servo dither level in startup sequence */
1026 wm8903_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11); 948 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_0, 0x11);
1027 wm8903_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257); 949 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_1, 0x1257);
1028 wm8903_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2); 950 snd_soc_write(codec, WM8903_WRITE_SEQUENCER_2, 0x2);
1029 951
1030 wm8903_run_sequence(codec, 0); 952 wm8903_run_sequence(codec, 0);
1031 wm8903_sync_reg_cache(codec, codec->reg_cache); 953 wm8903_sync_reg_cache(codec, codec->reg_cache);
1032 954
1033 /* Enable low impedence charge pump output */ 955 /* Enable low impedence charge pump output */
1034 reg = wm8903_read(codec, 956 reg = snd_soc_read(codec,
1035 WM8903_CONTROL_INTERFACE_TEST_1); 957 WM8903_CONTROL_INTERFACE_TEST_1);
1036 wm8903_write(codec, WM8903_CONTROL_INTERFACE_TEST_1, 958 snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
1037 reg | WM8903_TEST_KEY); 959 reg | WM8903_TEST_KEY);
1038 reg2 = wm8903_read(codec, WM8903_CHARGE_PUMP_TEST_1); 960 reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1);
1039 wm8903_write(codec, WM8903_CHARGE_PUMP_TEST_1, 961 snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1,
1040 reg2 | WM8903_CP_SW_KELVIN_MODE_MASK); 962 reg2 | WM8903_CP_SW_KELVIN_MODE_MASK);
1041 wm8903_write(codec, WM8903_CONTROL_INTERFACE_TEST_1, 963 snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
1042 reg); 964 reg);
1043 965
1044 /* By default no bypass paths are enabled so 966 /* By default no bypass paths are enabled so
1045 * enable Class W support. 967 * enable Class W support.
1046 */ 968 */
1047 dev_dbg(&i2c->dev, "Enabling Class W\n"); 969 dev_dbg(&i2c->dev, "Enabling Class W\n");
1048 wm8903_write(codec, WM8903_CLASS_W_0, reg | 970 snd_soc_write(codec, WM8903_CLASS_W_0, reg |
1049 WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V); 971 WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
1050 } 972 }
1051 973
1052 reg = wm8903_read(codec, WM8903_VMID_CONTROL_0); 974 reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
1053 reg &= ~(WM8903_VMID_RES_MASK); 975 reg &= ~(WM8903_VMID_RES_MASK);
1054 reg |= WM8903_VMID_RES_250K; 976 reg |= WM8903_VMID_RES_250K;
1055 wm8903_write(codec, WM8903_VMID_CONTROL_0, reg); 977 snd_soc_write(codec, WM8903_VMID_CONTROL_0, reg);
1056 break; 978 break;
1057 979
1058 case SND_SOC_BIAS_OFF: 980 case SND_SOC_BIAS_OFF:
1059 wm8903_run_sequence(codec, 32); 981 wm8903_run_sequence(codec, 32);
1060 reg = wm8903_read(codec, WM8903_CLOCK_RATES_2); 982 reg = snd_soc_read(codec, WM8903_CLOCK_RATES_2);
1061 reg &= ~WM8903_CLK_SYS_ENA; 983 reg &= ~WM8903_CLK_SYS_ENA;
1062 wm8903_write(codec, WM8903_CLOCK_RATES_2, reg); 984 snd_soc_write(codec, WM8903_CLOCK_RATES_2, reg);
1063 break; 985 break;
1064 } 986 }
1065 987
@@ -1083,7 +1005,7 @@ static int wm8903_set_dai_fmt(struct snd_soc_dai *codec_dai,
1083 unsigned int fmt) 1005 unsigned int fmt)
1084{ 1006{
1085 struct snd_soc_codec *codec = codec_dai->codec; 1007 struct snd_soc_codec *codec = codec_dai->codec;
1086 u16 aif1 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_1); 1008 u16 aif1 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_1);
1087 1009
1088 aif1 &= ~(WM8903_LRCLK_DIR | WM8903_BCLK_DIR | WM8903_AIF_FMT_MASK | 1010 aif1 &= ~(WM8903_LRCLK_DIR | WM8903_BCLK_DIR | WM8903_AIF_FMT_MASK |
1089 WM8903_AIF_LRCLK_INV | WM8903_AIF_BCLK_INV); 1011 WM8903_AIF_LRCLK_INV | WM8903_AIF_BCLK_INV);
@@ -1161,7 +1083,7 @@ static int wm8903_set_dai_fmt(struct snd_soc_dai *codec_dai,
1161 return -EINVAL; 1083 return -EINVAL;
1162 } 1084 }
1163 1085
1164 wm8903_write(codec, WM8903_AUDIO_INTERFACE_1, aif1); 1086 snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
1165 1087
1166 return 0; 1088 return 0;
1167} 1089}
@@ -1171,14 +1093,14 @@ static int wm8903_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1171 struct snd_soc_codec *codec = codec_dai->codec; 1093 struct snd_soc_codec *codec = codec_dai->codec;
1172 u16 reg; 1094 u16 reg;
1173 1095
1174 reg = wm8903_read(codec, WM8903_DAC_DIGITAL_1); 1096 reg = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
1175 1097
1176 if (mute) 1098 if (mute)
1177 reg |= WM8903_DAC_MUTE; 1099 reg |= WM8903_DAC_MUTE;
1178 else 1100 else
1179 reg &= ~WM8903_DAC_MUTE; 1101 reg &= ~WM8903_DAC_MUTE;
1180 1102
1181 wm8903_write(codec, WM8903_DAC_DIGITAL_1, reg); 1103 snd_soc_write(codec, WM8903_DAC_DIGITAL_1, reg);
1182 1104
1183 return 0; 1105 return 0;
1184} 1106}
@@ -1368,17 +1290,24 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
1368 int cur_val; 1290 int cur_val;
1369 int clk_sys; 1291 int clk_sys;
1370 1292
1371 u16 aif1 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_1); 1293 u16 aif1 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_1);
1372 u16 aif2 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_2); 1294 u16 aif2 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_2);
1373 u16 aif3 = wm8903_read(codec, WM8903_AUDIO_INTERFACE_3); 1295 u16 aif3 = snd_soc_read(codec, WM8903_AUDIO_INTERFACE_3);
1374 u16 clock0 = wm8903_read(codec, WM8903_CLOCK_RATES_0); 1296 u16 clock0 = snd_soc_read(codec, WM8903_CLOCK_RATES_0);
1375 u16 clock1 = wm8903_read(codec, WM8903_CLOCK_RATES_1); 1297 u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1);
1298 u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
1376 1299
1377 if (substream == wm8903->slave_substream) { 1300 if (substream == wm8903->slave_substream) {
1378 dev_dbg(&i2c->dev, "Ignoring hw_params for slave substream\n"); 1301 dev_dbg(&i2c->dev, "Ignoring hw_params for slave substream\n");
1379 return 0; 1302 return 0;
1380 } 1303 }
1381 1304
1305 /* Enable sloping stopband filter for low sample rates */
1306 if (fs <= 24000)
1307 dac_digital1 |= WM8903_DAC_SB_FILT;
1308 else
1309 dac_digital1 &= ~WM8903_DAC_SB_FILT;
1310
1382 /* Configure sample rate logic for DSP - choose nearest rate */ 1311 /* Configure sample rate logic for DSP - choose nearest rate */
1383 dsp_config = 0; 1312 dsp_config = 0;
1384 best_val = abs(sample_rates[dsp_config].rate - fs); 1313 best_val = abs(sample_rates[dsp_config].rate - fs);
@@ -1498,11 +1427,12 @@ static int wm8903_hw_params(struct snd_pcm_substream *substream,
1498 aif2 |= bclk_divs[bclk_div].div; 1427 aif2 |= bclk_divs[bclk_div].div;
1499 aif3 |= bclk / fs; 1428 aif3 |= bclk / fs;
1500 1429
1501 wm8903_write(codec, WM8903_CLOCK_RATES_0, clock0); 1430 snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0);
1502 wm8903_write(codec, WM8903_CLOCK_RATES_1, clock1); 1431 snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1);
1503 wm8903_write(codec, WM8903_AUDIO_INTERFACE_1, aif1); 1432 snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
1504 wm8903_write(codec, WM8903_AUDIO_INTERFACE_2, aif2); 1433 snd_soc_write(codec, WM8903_AUDIO_INTERFACE_2, aif2);
1505 wm8903_write(codec, WM8903_AUDIO_INTERFACE_3, aif3); 1434 snd_soc_write(codec, WM8903_AUDIO_INTERFACE_3, aif3);
1435 snd_soc_write(codec, WM8903_DAC_DIGITAL_1, dac_digital1);
1506 1436
1507 return 0; 1437 return 0;
1508} 1438}
@@ -1587,7 +1517,7 @@ static int wm8903_resume(struct platform_device *pdev)
1587 if (tmp_cache) { 1517 if (tmp_cache) {
1588 for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++) 1518 for (i = 2; i < ARRAY_SIZE(wm8903_reg_defaults); i++)
1589 if (tmp_cache[i] != reg_cache[i]) 1519 if (tmp_cache[i] != reg_cache[i])
1590 wm8903_write(codec, i, tmp_cache[i]); 1520 snd_soc_write(codec, i, tmp_cache[i]);
1591 } else { 1521 } else {
1592 dev_err(&i2c->dev, "Failed to allocate temporary cache\n"); 1522 dev_err(&i2c->dev, "Failed to allocate temporary cache\n");
1593 } 1523 }
@@ -1618,9 +1548,6 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
1618 codec->dev = &i2c->dev; 1548 codec->dev = &i2c->dev;
1619 codec->name = "WM8903"; 1549 codec->name = "WM8903";
1620 codec->owner = THIS_MODULE; 1550 codec->owner = THIS_MODULE;
1621 codec->read = wm8903_read;
1622 codec->write = wm8903_write;
1623 codec->hw_write = (hw_write_t)i2c_master_send;
1624 codec->bias_level = SND_SOC_BIAS_OFF; 1551 codec->bias_level = SND_SOC_BIAS_OFF;
1625 codec->set_bias_level = wm8903_set_bias_level; 1552 codec->set_bias_level = wm8903_set_bias_level;
1626 codec->dai = &wm8903_dai; 1553 codec->dai = &wm8903_dai;
@@ -1628,18 +1555,25 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
1628 codec->reg_cache_size = ARRAY_SIZE(wm8903->reg_cache); 1555 codec->reg_cache_size = ARRAY_SIZE(wm8903->reg_cache);
1629 codec->reg_cache = &wm8903->reg_cache[0]; 1556 codec->reg_cache = &wm8903->reg_cache[0];
1630 codec->private_data = wm8903; 1557 codec->private_data = wm8903;
1558 codec->volatile_register = wm8903_volatile_register;
1631 1559
1632 i2c_set_clientdata(i2c, codec); 1560 i2c_set_clientdata(i2c, codec);
1633 codec->control_data = i2c; 1561 codec->control_data = i2c;
1634 1562
1635 val = wm8903_hw_read(codec, WM8903_SW_RESET_AND_ID); 1563 ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
1564 if (ret != 0) {
1565 dev_err(&i2c->dev, "Failed to set cache I/O: %d\n", ret);
1566 goto err;
1567 }
1568
1569 val = snd_soc_read(codec, WM8903_SW_RESET_AND_ID);
1636 if (val != wm8903_reg_defaults[WM8903_SW_RESET_AND_ID]) { 1570 if (val != wm8903_reg_defaults[WM8903_SW_RESET_AND_ID]) {
1637 dev_err(&i2c->dev, 1571 dev_err(&i2c->dev,
1638 "Device with ID register %x is not a WM8903\n", val); 1572 "Device with ID register %x is not a WM8903\n", val);
1639 return -ENODEV; 1573 return -ENODEV;
1640 } 1574 }
1641 1575
1642 val = wm8903_read(codec, WM8903_REVISION_NUMBER); 1576 val = snd_soc_read(codec, WM8903_REVISION_NUMBER);
1643 dev_info(&i2c->dev, "WM8903 revision %d\n", 1577 dev_info(&i2c->dev, "WM8903 revision %d\n",
1644 val & WM8903_CHIP_REV_MASK); 1578 val & WM8903_CHIP_REV_MASK);
1645 1579
@@ -1649,35 +1583,35 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
1649 wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1583 wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1650 1584
1651 /* Latch volume update bits */ 1585 /* Latch volume update bits */
1652 val = wm8903_read(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT); 1586 val = snd_soc_read(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT);
1653 val |= WM8903_ADCVU; 1587 val |= WM8903_ADCVU;
1654 wm8903_write(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT, val); 1588 snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT, val);
1655 wm8903_write(codec, WM8903_ADC_DIGITAL_VOLUME_RIGHT, val); 1589 snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_RIGHT, val);
1656 1590
1657 val = wm8903_read(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT); 1591 val = snd_soc_read(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT);
1658 val |= WM8903_DACVU; 1592 val |= WM8903_DACVU;
1659 wm8903_write(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT, val); 1593 snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT, val);
1660 wm8903_write(codec, WM8903_DAC_DIGITAL_VOLUME_RIGHT, val); 1594 snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_RIGHT, val);
1661 1595
1662 val = wm8903_read(codec, WM8903_ANALOGUE_OUT1_LEFT); 1596 val = snd_soc_read(codec, WM8903_ANALOGUE_OUT1_LEFT);
1663 val |= WM8903_HPOUTVU; 1597 val |= WM8903_HPOUTVU;
1664 wm8903_write(codec, WM8903_ANALOGUE_OUT1_LEFT, val); 1598 snd_soc_write(codec, WM8903_ANALOGUE_OUT1_LEFT, val);
1665 wm8903_write(codec, WM8903_ANALOGUE_OUT1_RIGHT, val); 1599 snd_soc_write(codec, WM8903_ANALOGUE_OUT1_RIGHT, val);
1666 1600
1667 val = wm8903_read(codec, WM8903_ANALOGUE_OUT2_LEFT); 1601 val = snd_soc_read(codec, WM8903_ANALOGUE_OUT2_LEFT);
1668 val |= WM8903_LINEOUTVU; 1602 val |= WM8903_LINEOUTVU;
1669 wm8903_write(codec, WM8903_ANALOGUE_OUT2_LEFT, val); 1603 snd_soc_write(codec, WM8903_ANALOGUE_OUT2_LEFT, val);
1670 wm8903_write(codec, WM8903_ANALOGUE_OUT2_RIGHT, val); 1604 snd_soc_write(codec, WM8903_ANALOGUE_OUT2_RIGHT, val);
1671 1605
1672 val = wm8903_read(codec, WM8903_ANALOGUE_OUT3_LEFT); 1606 val = snd_soc_read(codec, WM8903_ANALOGUE_OUT3_LEFT);
1673 val |= WM8903_SPKVU; 1607 val |= WM8903_SPKVU;
1674 wm8903_write(codec, WM8903_ANALOGUE_OUT3_LEFT, val); 1608 snd_soc_write(codec, WM8903_ANALOGUE_OUT3_LEFT, val);
1675 wm8903_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val); 1609 snd_soc_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val);
1676 1610
1677 /* Enable DAC soft mute by default */ 1611 /* Enable DAC soft mute by default */
1678 val = wm8903_read(codec, WM8903_DAC_DIGITAL_1); 1612 val = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
1679 val |= WM8903_DAC_MUTEMODE; 1613 val |= WM8903_DAC_MUTEMODE;
1680 wm8903_write(codec, WM8903_DAC_DIGITAL_1, val); 1614 snd_soc_write(codec, WM8903_DAC_DIGITAL_1, val);
1681 1615
1682 wm8903_dai.dev = &i2c->dev; 1616 wm8903_dai.dev = &i2c->dev;
1683 wm8903_codec = codec; 1617 wm8903_codec = codec;
@@ -1721,6 +1655,21 @@ static __devexit int wm8903_i2c_remove(struct i2c_client *client)
1721 return 0; 1655 return 0;
1722} 1656}
1723 1657
1658#ifdef CONFIG_PM
1659static int wm8903_i2c_suspend(struct i2c_client *client, pm_message_t msg)
1660{
1661 return snd_soc_suspend_device(&client->dev);
1662}
1663
1664static int wm8903_i2c_resume(struct i2c_client *client)
1665{
1666 return snd_soc_resume_device(&client->dev);
1667}
1668#else
1669#define wm8903_i2c_suspend NULL
1670#define wm8903_i2c_resume NULL
1671#endif
1672
1724/* i2c codec control layer */ 1673/* i2c codec control layer */
1725static const struct i2c_device_id wm8903_i2c_id[] = { 1674static const struct i2c_device_id wm8903_i2c_id[] = {
1726 { "wm8903", 0 }, 1675 { "wm8903", 0 },
@@ -1735,6 +1684,8 @@ static struct i2c_driver wm8903_i2c_driver = {
1735 }, 1684 },
1736 .probe = wm8903_i2c_probe, 1685 .probe = wm8903_i2c_probe,
1737 .remove = __devexit_p(wm8903_i2c_remove), 1686 .remove = __devexit_p(wm8903_i2c_remove),
1687 .suspend = wm8903_i2c_suspend,
1688 .resume = wm8903_i2c_resume,
1738 .id_table = wm8903_i2c_id, 1689 .id_table = wm8903_i2c_id,
1739}; 1690};
1740 1691
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index b8e17d6bc1f7..da97aae475a2 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -106,50 +106,6 @@ static u16 wm8940_reg_defaults[] = {
106 0x0000, /* Mono Mixer Control */ 106 0x0000, /* Mono Mixer Control */
107}; 107};
108 108
109static inline unsigned int wm8940_read_reg_cache(struct snd_soc_codec *codec,
110 unsigned int reg)
111{
112 u16 *cache = codec->reg_cache;
113
114 if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
115 return -1;
116
117 return cache[reg];
118}
119
120static inline int wm8940_write_reg_cache(struct snd_soc_codec *codec,
121 u16 reg, unsigned int value)
122{
123 u16 *cache = codec->reg_cache;
124
125 if (reg >= ARRAY_SIZE(wm8940_reg_defaults))
126 return -1;
127
128 cache[reg] = value;
129
130 return 0;
131}
132
133static int wm8940_write(struct snd_soc_codec *codec, unsigned int reg,
134 unsigned int value)
135{
136 int ret;
137 u8 data[3] = { reg,
138 (value & 0xff00) >> 8,
139 (value & 0x00ff)
140 };
141
142 wm8940_write_reg_cache(codec, reg, value);
143
144 ret = codec->hw_write(codec->control_data, data, 3);
145
146 if (ret < 0)
147 return ret;
148 else if (ret != 3)
149 return -EIO;
150 return 0;
151}
152
153static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" }; 109static const char *wm8940_companding[] = { "Off", "NC", "u-law", "A-law" };
154static const struct soc_enum wm8940_adc_companding_enum 110static const struct soc_enum wm8940_adc_companding_enum
155= SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding); 111= SOC_ENUM_SINGLE(WM8940_COMPANDINGCTL, 1, 4, wm8940_companding);
@@ -348,14 +304,14 @@ error_ret:
348 return ret; 304 return ret;
349} 305}
350 306
351#define wm8940_reset(c) wm8940_write(c, WM8940_SOFTRESET, 0); 307#define wm8940_reset(c) snd_soc_write(c, WM8940_SOFTRESET, 0);
352 308
353static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai, 309static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
354 unsigned int fmt) 310 unsigned int fmt)
355{ 311{
356 struct snd_soc_codec *codec = codec_dai->codec; 312 struct snd_soc_codec *codec = codec_dai->codec;
357 u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFE67; 313 u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFE67;
358 u16 clk = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0x1fe; 314 u16 clk = snd_soc_read(codec, WM8940_CLOCK) & 0x1fe;
359 315
360 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 316 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
361 case SND_SOC_DAIFMT_CBM_CFM: 317 case SND_SOC_DAIFMT_CBM_CFM:
@@ -366,7 +322,7 @@ static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
366 default: 322 default:
367 return -EINVAL; 323 return -EINVAL;
368 } 324 }
369 wm8940_write(codec, WM8940_CLOCK, clk); 325 snd_soc_write(codec, WM8940_CLOCK, clk);
370 326
371 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 327 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
372 case SND_SOC_DAIFMT_I2S: 328 case SND_SOC_DAIFMT_I2S:
@@ -399,7 +355,7 @@ static int wm8940_set_dai_fmt(struct snd_soc_dai *codec_dai,
399 break; 355 break;
400 } 356 }
401 357
402 wm8940_write(codec, WM8940_IFACE, iface); 358 snd_soc_write(codec, WM8940_IFACE, iface);
403 359
404 return 0; 360 return 0;
405} 361}
@@ -411,9 +367,9 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
411 struct snd_soc_pcm_runtime *rtd = substream->private_data; 367 struct snd_soc_pcm_runtime *rtd = substream->private_data;
412 struct snd_soc_device *socdev = rtd->socdev; 368 struct snd_soc_device *socdev = rtd->socdev;
413 struct snd_soc_codec *codec = socdev->card->codec; 369 struct snd_soc_codec *codec = socdev->card->codec;
414 u16 iface = wm8940_read_reg_cache(codec, WM8940_IFACE) & 0xFD9F; 370 u16 iface = snd_soc_read(codec, WM8940_IFACE) & 0xFD9F;
415 u16 addcntrl = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFF1; 371 u16 addcntrl = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFF1;
416 u16 companding = wm8940_read_reg_cache(codec, 372 u16 companding = snd_soc_read(codec,
417 WM8940_COMPANDINGCTL) & 0xFFDF; 373 WM8940_COMPANDINGCTL) & 0xFFDF;
418 int ret; 374 int ret;
419 375
@@ -442,7 +398,7 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
442 case SNDRV_PCM_RATE_48000: 398 case SNDRV_PCM_RATE_48000:
443 break; 399 break;
444 } 400 }
445 ret = wm8940_write(codec, WM8940_ADDCNTRL, addcntrl); 401 ret = snd_soc_write(codec, WM8940_ADDCNTRL, addcntrl);
446 if (ret) 402 if (ret)
447 goto error_ret; 403 goto error_ret;
448 404
@@ -462,10 +418,10 @@ static int wm8940_i2s_hw_params(struct snd_pcm_substream *substream,
462 iface |= (3 << 5); 418 iface |= (3 << 5);
463 break; 419 break;
464 } 420 }
465 ret = wm8940_write(codec, WM8940_COMPANDINGCTL, companding); 421 ret = snd_soc_write(codec, WM8940_COMPANDINGCTL, companding);
466 if (ret) 422 if (ret)
467 goto error_ret; 423 goto error_ret;
468 ret = wm8940_write(codec, WM8940_IFACE, iface); 424 ret = snd_soc_write(codec, WM8940_IFACE, iface);
469 425
470error_ret: 426error_ret:
471 return ret; 427 return ret;
@@ -474,19 +430,19 @@ error_ret:
474static int wm8940_mute(struct snd_soc_dai *dai, int mute) 430static int wm8940_mute(struct snd_soc_dai *dai, int mute)
475{ 431{
476 struct snd_soc_codec *codec = dai->codec; 432 struct snd_soc_codec *codec = dai->codec;
477 u16 mute_reg = wm8940_read_reg_cache(codec, WM8940_DAC) & 0xffbf; 433 u16 mute_reg = snd_soc_read(codec, WM8940_DAC) & 0xffbf;
478 434
479 if (mute) 435 if (mute)
480 mute_reg |= 0x40; 436 mute_reg |= 0x40;
481 437
482 return wm8940_write(codec, WM8940_DAC, mute_reg); 438 return snd_soc_write(codec, WM8940_DAC, mute_reg);
483} 439}
484 440
485static int wm8940_set_bias_level(struct snd_soc_codec *codec, 441static int wm8940_set_bias_level(struct snd_soc_codec *codec,
486 enum snd_soc_bias_level level) 442 enum snd_soc_bias_level level)
487{ 443{
488 u16 val; 444 u16 val;
489 u16 pwr_reg = wm8940_read_reg_cache(codec, WM8940_POWER1) & 0x1F0; 445 u16 pwr_reg = snd_soc_read(codec, WM8940_POWER1) & 0x1F0;
490 int ret = 0; 446 int ret = 0;
491 447
492 switch (level) { 448 switch (level) {
@@ -494,26 +450,26 @@ static int wm8940_set_bias_level(struct snd_soc_codec *codec,
494 /* ensure bufioen and biasen */ 450 /* ensure bufioen and biasen */
495 pwr_reg |= (1 << 2) | (1 << 3); 451 pwr_reg |= (1 << 2) | (1 << 3);
496 /* Enable thermal shutdown */ 452 /* Enable thermal shutdown */
497 val = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL); 453 val = snd_soc_read(codec, WM8940_OUTPUTCTL);
498 ret = wm8940_write(codec, WM8940_OUTPUTCTL, val | 0x2); 454 ret = snd_soc_write(codec, WM8940_OUTPUTCTL, val | 0x2);
499 if (ret) 455 if (ret)
500 break; 456 break;
501 /* set vmid to 75k */ 457 /* set vmid to 75k */
502 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1); 458 ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1);
503 break; 459 break;
504 case SND_SOC_BIAS_PREPARE: 460 case SND_SOC_BIAS_PREPARE:
505 /* ensure bufioen and biasen */ 461 /* ensure bufioen and biasen */
506 pwr_reg |= (1 << 2) | (1 << 3); 462 pwr_reg |= (1 << 2) | (1 << 3);
507 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x1); 463 ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x1);
508 break; 464 break;
509 case SND_SOC_BIAS_STANDBY: 465 case SND_SOC_BIAS_STANDBY:
510 /* ensure bufioen and biasen */ 466 /* ensure bufioen and biasen */
511 pwr_reg |= (1 << 2) | (1 << 3); 467 pwr_reg |= (1 << 2) | (1 << 3);
512 /* set vmid to 300k for standby */ 468 /* set vmid to 300k for standby */
513 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg | 0x2); 469 ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg | 0x2);
514 break; 470 break;
515 case SND_SOC_BIAS_OFF: 471 case SND_SOC_BIAS_OFF:
516 ret = wm8940_write(codec, WM8940_POWER1, pwr_reg); 472 ret = snd_soc_write(codec, WM8940_POWER1, pwr_reg);
517 break; 473 break;
518 } 474 }
519 475
@@ -587,36 +543,36 @@ static int wm8940_set_dai_pll(struct snd_soc_dai *codec_dai,
587 u16 reg; 543 u16 reg;
588 544
589 /* Turn off PLL */ 545 /* Turn off PLL */
590 reg = wm8940_read_reg_cache(codec, WM8940_POWER1); 546 reg = snd_soc_read(codec, WM8940_POWER1);
591 wm8940_write(codec, WM8940_POWER1, reg & 0x1df); 547 snd_soc_write(codec, WM8940_POWER1, reg & 0x1df);
592 548
593 if (freq_in == 0 || freq_out == 0) { 549 if (freq_in == 0 || freq_out == 0) {
594 /* Clock CODEC directly from MCLK */ 550 /* Clock CODEC directly from MCLK */
595 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK); 551 reg = snd_soc_read(codec, WM8940_CLOCK);
596 wm8940_write(codec, WM8940_CLOCK, reg & 0x0ff); 552 snd_soc_write(codec, WM8940_CLOCK, reg & 0x0ff);
597 /* Pll power down */ 553 /* Pll power down */
598 wm8940_write(codec, WM8940_PLLN, (1 << 7)); 554 snd_soc_write(codec, WM8940_PLLN, (1 << 7));
599 return 0; 555 return 0;
600 } 556 }
601 557
602 /* Pll is followed by a frequency divide by 4 */ 558 /* Pll is followed by a frequency divide by 4 */
603 pll_factors(freq_out*4, freq_in); 559 pll_factors(freq_out*4, freq_in);
604 if (pll_div.k) 560 if (pll_div.k)
605 wm8940_write(codec, WM8940_PLLN, 561 snd_soc_write(codec, WM8940_PLLN,
606 (pll_div.pre_scale << 4) | pll_div.n | (1 << 6)); 562 (pll_div.pre_scale << 4) | pll_div.n | (1 << 6));
607 else /* No factional component */ 563 else /* No factional component */
608 wm8940_write(codec, WM8940_PLLN, 564 snd_soc_write(codec, WM8940_PLLN,
609 (pll_div.pre_scale << 4) | pll_div.n); 565 (pll_div.pre_scale << 4) | pll_div.n);
610 wm8940_write(codec, WM8940_PLLK1, pll_div.k >> 18); 566 snd_soc_write(codec, WM8940_PLLK1, pll_div.k >> 18);
611 wm8940_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff); 567 snd_soc_write(codec, WM8940_PLLK2, (pll_div.k >> 9) & 0x1ff);
612 wm8940_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff); 568 snd_soc_write(codec, WM8940_PLLK3, pll_div.k & 0x1ff);
613 /* Enable the PLL */ 569 /* Enable the PLL */
614 reg = wm8940_read_reg_cache(codec, WM8940_POWER1); 570 reg = snd_soc_read(codec, WM8940_POWER1);
615 wm8940_write(codec, WM8940_POWER1, reg | 0x020); 571 snd_soc_write(codec, WM8940_POWER1, reg | 0x020);
616 572
617 /* Run CODEC from PLL instead of MCLK */ 573 /* Run CODEC from PLL instead of MCLK */
618 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK); 574 reg = snd_soc_read(codec, WM8940_CLOCK);
619 wm8940_write(codec, WM8940_CLOCK, reg | 0x100); 575 snd_soc_write(codec, WM8940_CLOCK, reg | 0x100);
620 576
621 return 0; 577 return 0;
622} 578}
@@ -648,16 +604,16 @@ static int wm8940_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
648 604
649 switch (div_id) { 605 switch (div_id) {
650 case WM8940_BCLKDIV: 606 case WM8940_BCLKDIV:
651 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFFEF3; 607 reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFFEF3;
652 ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 2)); 608 ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 2));
653 break; 609 break;
654 case WM8940_MCLKDIV: 610 case WM8940_MCLKDIV:
655 reg = wm8940_read_reg_cache(codec, WM8940_CLOCK) & 0xFF1F; 611 reg = snd_soc_read(codec, WM8940_CLOCK) & 0xFF1F;
656 ret = wm8940_write(codec, WM8940_CLOCK, reg | (div << 5)); 612 ret = snd_soc_write(codec, WM8940_CLOCK, reg | (div << 5));
657 break; 613 break;
658 case WM8940_OPCLKDIV: 614 case WM8940_OPCLKDIV:
659 reg = wm8940_read_reg_cache(codec, WM8940_ADDCNTRL) & 0xFFCF; 615 reg = snd_soc_read(codec, WM8940_ADDCNTRL) & 0xFFCF;
660 ret = wm8940_write(codec, WM8940_ADDCNTRL, reg | (div << 4)); 616 ret = snd_soc_write(codec, WM8940_ADDCNTRL, reg | (div << 4));
661 break; 617 break;
662 } 618 }
663 return ret; 619 return ret;
@@ -808,7 +764,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8940 = {
808}; 764};
809EXPORT_SYMBOL_GPL(soc_codec_dev_wm8940); 765EXPORT_SYMBOL_GPL(soc_codec_dev_wm8940);
810 766
811static int wm8940_register(struct wm8940_priv *wm8940) 767static int wm8940_register(struct wm8940_priv *wm8940,
768 enum snd_soc_control_type control)
812{ 769{
813 struct wm8940_setup_data *pdata = wm8940->codec.dev->platform_data; 770 struct wm8940_setup_data *pdata = wm8940->codec.dev->platform_data;
814 struct snd_soc_codec *codec = &wm8940->codec; 771 struct snd_soc_codec *codec = &wm8940->codec;
@@ -825,8 +782,6 @@ static int wm8940_register(struct wm8940_priv *wm8940)
825 codec->private_data = wm8940; 782 codec->private_data = wm8940;
826 codec->name = "WM8940"; 783 codec->name = "WM8940";
827 codec->owner = THIS_MODULE; 784 codec->owner = THIS_MODULE;
828 codec->read = wm8940_read_reg_cache;
829 codec->write = wm8940_write;
830 codec->bias_level = SND_SOC_BIAS_OFF; 785 codec->bias_level = SND_SOC_BIAS_OFF;
831 codec->set_bias_level = wm8940_set_bias_level; 786 codec->set_bias_level = wm8940_set_bias_level;
832 codec->dai = &wm8940_dai; 787 codec->dai = &wm8940_dai;
@@ -834,6 +789,12 @@ static int wm8940_register(struct wm8940_priv *wm8940)
834 codec->reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults); 789 codec->reg_cache_size = ARRAY_SIZE(wm8940_reg_defaults);
835 codec->reg_cache = &wm8940->reg_cache; 790 codec->reg_cache = &wm8940->reg_cache;
836 791
792 ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
793 if (ret == 0) {
794 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
795 return ret;
796 }
797
837 memcpy(codec->reg_cache, wm8940_reg_defaults, 798 memcpy(codec->reg_cache, wm8940_reg_defaults,
838 sizeof(wm8940_reg_defaults)); 799 sizeof(wm8940_reg_defaults));
839 800
@@ -847,15 +808,15 @@ static int wm8940_register(struct wm8940_priv *wm8940)
847 808
848 wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 809 wm8940_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
849 810
850 ret = wm8940_write(codec, WM8940_POWER1, 0x180); 811 ret = snd_soc_write(codec, WM8940_POWER1, 0x180);
851 if (ret < 0) 812 if (ret < 0)
852 return ret; 813 return ret;
853 814
854 if (!pdata) 815 if (!pdata)
855 dev_warn(codec->dev, "No platform data supplied\n"); 816 dev_warn(codec->dev, "No platform data supplied\n");
856 else { 817 else {
857 reg = wm8940_read_reg_cache(codec, WM8940_OUTPUTCTL); 818 reg = snd_soc_read(codec, WM8940_OUTPUTCTL);
858 ret = wm8940_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi); 819 ret = snd_soc_write(codec, WM8940_OUTPUTCTL, reg | pdata->vroi);
859 if (ret < 0) 820 if (ret < 0)
860 return ret; 821 return ret;
861 } 822 }
@@ -904,7 +865,7 @@ static int wm8940_i2c_probe(struct i2c_client *i2c,
904 codec->control_data = i2c; 865 codec->control_data = i2c;
905 codec->dev = &i2c->dev; 866 codec->dev = &i2c->dev;
906 867
907 return wm8940_register(wm8940); 868 return wm8940_register(wm8940, SND_SOC_I2C);
908} 869}
909 870
910static int __devexit wm8940_i2c_remove(struct i2c_client *client) 871static int __devexit wm8940_i2c_remove(struct i2c_client *client)
@@ -916,6 +877,21 @@ static int __devexit wm8940_i2c_remove(struct i2c_client *client)
916 return 0; 877 return 0;
917} 878}
918 879
880#ifdef CONFIG_PM
881static int wm8940_i2c_suspend(struct i2c_client *client, pm_message_t msg)
882{
883 return snd_soc_suspend_device(&client->dev);
884}
885
886static int wm8940_i2c_resume(struct i2c_client *client)
887{
888 return snd_soc_resume_device(&client->dev);
889}
890#else
891#define wm8940_i2c_suspend NULL
892#define wm8940_i2c_resume NULL
893#endif
894
919static const struct i2c_device_id wm8940_i2c_id[] = { 895static const struct i2c_device_id wm8940_i2c_id[] = {
920 { "wm8940", 0 }, 896 { "wm8940", 0 },
921 { } 897 { }
@@ -929,6 +905,8 @@ static struct i2c_driver wm8940_i2c_driver = {
929 }, 905 },
930 .probe = wm8940_i2c_probe, 906 .probe = wm8940_i2c_probe,
931 .remove = __devexit_p(wm8940_i2c_remove), 907 .remove = __devexit_p(wm8940_i2c_remove),
908 .suspend = wm8940_i2c_suspend,
909 .resume = wm8940_i2c_resume,
932 .id_table = wm8940_i2c_id, 910 .id_table = wm8940_i2c_id,
933}; 911};
934 912
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index e224d8add170..f59703be61c8 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -69,61 +69,7 @@ struct wm8960_priv {
69 struct snd_soc_codec codec; 69 struct snd_soc_codec codec;
70}; 70};
71 71
72/* 72#define wm8960_reset(c) snd_soc_write(c, WM8960_RESET, 0)
73 * read wm8960 register cache
74 */
75static inline unsigned int wm8960_read_reg_cache(struct snd_soc_codec *codec,
76 unsigned int reg)
77{
78 u16 *cache = codec->reg_cache;
79 if (reg == WM8960_RESET)
80 return 0;
81 if (reg >= WM8960_CACHEREGNUM)
82 return -1;
83 return cache[reg];
84}
85
86/*
87 * write wm8960 register cache
88 */
89static inline void wm8960_write_reg_cache(struct snd_soc_codec *codec,
90 u16 reg, unsigned int value)
91{
92 u16 *cache = codec->reg_cache;
93 if (reg >= WM8960_CACHEREGNUM)
94 return;
95 cache[reg] = value;
96}
97
98static inline unsigned int wm8960_read(struct snd_soc_codec *codec,
99 unsigned int reg)
100{
101 return wm8960_read_reg_cache(codec, reg);
102}
103
104/*
105 * write to the WM8960 register space
106 */
107static int wm8960_write(struct snd_soc_codec *codec, unsigned int reg,
108 unsigned int value)
109{
110 u8 data[2];
111
112 /* data is
113 * D15..D9 WM8960 register offset
114 * D8...D0 register data
115 */
116 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
117 data[1] = value & 0x00ff;
118
119 wm8960_write_reg_cache(codec, reg, value);
120 if (codec->hw_write(codec->control_data, data, 2) == 2)
121 return 0;
122 else
123 return -EIO;
124}
125
126#define wm8960_reset(c) wm8960_write(c, WM8960_RESET, 0)
127 73
128/* enumerated controls */ 74/* enumerated controls */
129static const char *wm8960_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"}; 75static const char *wm8960_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
@@ -420,7 +366,7 @@ static int wm8960_set_dai_fmt(struct snd_soc_dai *codec_dai,
420 } 366 }
421 367
422 /* set iface */ 368 /* set iface */
423 wm8960_write(codec, WM8960_IFACE1, iface); 369 snd_soc_write(codec, WM8960_IFACE1, iface);
424 return 0; 370 return 0;
425} 371}
426 372
@@ -431,7 +377,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
431 struct snd_soc_pcm_runtime *rtd = substream->private_data; 377 struct snd_soc_pcm_runtime *rtd = substream->private_data;
432 struct snd_soc_device *socdev = rtd->socdev; 378 struct snd_soc_device *socdev = rtd->socdev;
433 struct snd_soc_codec *codec = socdev->card->codec; 379 struct snd_soc_codec *codec = socdev->card->codec;
434 u16 iface = wm8960_read(codec, WM8960_IFACE1) & 0xfff3; 380 u16 iface = snd_soc_read(codec, WM8960_IFACE1) & 0xfff3;
435 381
436 /* bit size */ 382 /* bit size */
437 switch (params_format(params)) { 383 switch (params_format(params)) {
@@ -446,19 +392,19 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
446 } 392 }
447 393
448 /* set iface */ 394 /* set iface */
449 wm8960_write(codec, WM8960_IFACE1, iface); 395 snd_soc_write(codec, WM8960_IFACE1, iface);
450 return 0; 396 return 0;
451} 397}
452 398
453static int wm8960_mute(struct snd_soc_dai *dai, int mute) 399static int wm8960_mute(struct snd_soc_dai *dai, int mute)
454{ 400{
455 struct snd_soc_codec *codec = dai->codec; 401 struct snd_soc_codec *codec = dai->codec;
456 u16 mute_reg = wm8960_read(codec, WM8960_DACCTL1) & 0xfff7; 402 u16 mute_reg = snd_soc_read(codec, WM8960_DACCTL1) & 0xfff7;
457 403
458 if (mute) 404 if (mute)
459 wm8960_write(codec, WM8960_DACCTL1, mute_reg | 0x8); 405 snd_soc_write(codec, WM8960_DACCTL1, mute_reg | 0x8);
460 else 406 else
461 wm8960_write(codec, WM8960_DACCTL1, mute_reg); 407 snd_soc_write(codec, WM8960_DACCTL1, mute_reg);
462 return 0; 408 return 0;
463} 409}
464 410
@@ -474,16 +420,16 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
474 420
475 case SND_SOC_BIAS_PREPARE: 421 case SND_SOC_BIAS_PREPARE:
476 /* Set VMID to 2x50k */ 422 /* Set VMID to 2x50k */
477 reg = wm8960_read(codec, WM8960_POWER1); 423 reg = snd_soc_read(codec, WM8960_POWER1);
478 reg &= ~0x180; 424 reg &= ~0x180;
479 reg |= 0x80; 425 reg |= 0x80;
480 wm8960_write(codec, WM8960_POWER1, reg); 426 snd_soc_write(codec, WM8960_POWER1, reg);
481 break; 427 break;
482 428
483 case SND_SOC_BIAS_STANDBY: 429 case SND_SOC_BIAS_STANDBY:
484 if (codec->bias_level == SND_SOC_BIAS_OFF) { 430 if (codec->bias_level == SND_SOC_BIAS_OFF) {
485 /* Enable anti-pop features */ 431 /* Enable anti-pop features */
486 wm8960_write(codec, WM8960_APOP1, 432 snd_soc_write(codec, WM8960_APOP1,
487 WM8960_POBCTRL | WM8960_SOFT_ST | 433 WM8960_POBCTRL | WM8960_SOFT_ST |
488 WM8960_BUFDCOPEN | WM8960_BUFIOEN); 434 WM8960_BUFDCOPEN | WM8960_BUFIOEN);
489 435
@@ -491,43 +437,43 @@ static int wm8960_set_bias_level(struct snd_soc_codec *codec,
491 reg = WM8960_DISOP; 437 reg = WM8960_DISOP;
492 if (pdata) 438 if (pdata)
493 reg |= pdata->dres << 4; 439 reg |= pdata->dres << 4;
494 wm8960_write(codec, WM8960_APOP2, reg); 440 snd_soc_write(codec, WM8960_APOP2, reg);
495 441
496 msleep(400); 442 msleep(400);
497 443
498 wm8960_write(codec, WM8960_APOP2, 0); 444 snd_soc_write(codec, WM8960_APOP2, 0);
499 445
500 /* Enable & ramp VMID at 2x50k */ 446 /* Enable & ramp VMID at 2x50k */
501 reg = wm8960_read(codec, WM8960_POWER1); 447 reg = snd_soc_read(codec, WM8960_POWER1);
502 reg |= 0x80; 448 reg |= 0x80;
503 wm8960_write(codec, WM8960_POWER1, reg); 449 snd_soc_write(codec, WM8960_POWER1, reg);
504 msleep(100); 450 msleep(100);
505 451
506 /* Enable VREF */ 452 /* Enable VREF */
507 wm8960_write(codec, WM8960_POWER1, reg | WM8960_VREF); 453 snd_soc_write(codec, WM8960_POWER1, reg | WM8960_VREF);
508 454
509 /* Disable anti-pop features */ 455 /* Disable anti-pop features */
510 wm8960_write(codec, WM8960_APOP1, WM8960_BUFIOEN); 456 snd_soc_write(codec, WM8960_APOP1, WM8960_BUFIOEN);
511 } 457 }
512 458
513 /* Set VMID to 2x250k */ 459 /* Set VMID to 2x250k */
514 reg = wm8960_read(codec, WM8960_POWER1); 460 reg = snd_soc_read(codec, WM8960_POWER1);
515 reg &= ~0x180; 461 reg &= ~0x180;
516 reg |= 0x100; 462 reg |= 0x100;
517 wm8960_write(codec, WM8960_POWER1, reg); 463 snd_soc_write(codec, WM8960_POWER1, reg);
518 break; 464 break;
519 465
520 case SND_SOC_BIAS_OFF: 466 case SND_SOC_BIAS_OFF:
521 /* Enable anti-pop features */ 467 /* Enable anti-pop features */
522 wm8960_write(codec, WM8960_APOP1, 468 snd_soc_write(codec, WM8960_APOP1,
523 WM8960_POBCTRL | WM8960_SOFT_ST | 469 WM8960_POBCTRL | WM8960_SOFT_ST |
524 WM8960_BUFDCOPEN | WM8960_BUFIOEN); 470 WM8960_BUFDCOPEN | WM8960_BUFIOEN);
525 471
526 /* Disable VMID and VREF, let them discharge */ 472 /* Disable VMID and VREF, let them discharge */
527 wm8960_write(codec, WM8960_POWER1, 0); 473 snd_soc_write(codec, WM8960_POWER1, 0);
528 msleep(600); 474 msleep(600);
529 475
530 wm8960_write(codec, WM8960_APOP1, 0); 476 snd_soc_write(codec, WM8960_APOP1, 0);
531 break; 477 break;
532 } 478 }
533 479
@@ -610,33 +556,33 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai,
610 556
611 /* Disable the PLL: even if we are changing the frequency the 557 /* Disable the PLL: even if we are changing the frequency the
612 * PLL needs to be disabled while we do so. */ 558 * PLL needs to be disabled while we do so. */
613 wm8960_write(codec, WM8960_CLOCK1, 559 snd_soc_write(codec, WM8960_CLOCK1,
614 wm8960_read(codec, WM8960_CLOCK1) & ~1); 560 snd_soc_read(codec, WM8960_CLOCK1) & ~1);
615 wm8960_write(codec, WM8960_POWER2, 561 snd_soc_write(codec, WM8960_POWER2,
616 wm8960_read(codec, WM8960_POWER2) & ~1); 562 snd_soc_read(codec, WM8960_POWER2) & ~1);
617 563
618 if (!freq_in || !freq_out) 564 if (!freq_in || !freq_out)
619 return 0; 565 return 0;
620 566
621 reg = wm8960_read(codec, WM8960_PLL1) & ~0x3f; 567 reg = snd_soc_read(codec, WM8960_PLL1) & ~0x3f;
622 reg |= pll_div.pre_div << 4; 568 reg |= pll_div.pre_div << 4;
623 reg |= pll_div.n; 569 reg |= pll_div.n;
624 570
625 if (pll_div.k) { 571 if (pll_div.k) {
626 reg |= 0x20; 572 reg |= 0x20;
627 573
628 wm8960_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f); 574 snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
629 wm8960_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff); 575 snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
630 wm8960_write(codec, WM8960_PLL4, pll_div.k & 0x1ff); 576 snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
631 } 577 }
632 wm8960_write(codec, WM8960_PLL1, reg); 578 snd_soc_write(codec, WM8960_PLL1, reg);
633 579
634 /* Turn it on */ 580 /* Turn it on */
635 wm8960_write(codec, WM8960_POWER2, 581 snd_soc_write(codec, WM8960_POWER2,
636 wm8960_read(codec, WM8960_POWER2) | 1); 582 snd_soc_read(codec, WM8960_POWER2) | 1);
637 msleep(250); 583 msleep(250);
638 wm8960_write(codec, WM8960_CLOCK1, 584 snd_soc_write(codec, WM8960_CLOCK1,
639 wm8960_read(codec, WM8960_CLOCK1) | 1); 585 snd_soc_read(codec, WM8960_CLOCK1) | 1);
640 586
641 return 0; 587 return 0;
642} 588}
@@ -649,28 +595,28 @@ static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
649 595
650 switch (div_id) { 596 switch (div_id) {
651 case WM8960_SYSCLKSEL: 597 case WM8960_SYSCLKSEL:
652 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1fe; 598 reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1fe;
653 wm8960_write(codec, WM8960_CLOCK1, reg | div); 599 snd_soc_write(codec, WM8960_CLOCK1, reg | div);
654 break; 600 break;
655 case WM8960_SYSCLKDIV: 601 case WM8960_SYSCLKDIV:
656 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1f9; 602 reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1f9;
657 wm8960_write(codec, WM8960_CLOCK1, reg | div); 603 snd_soc_write(codec, WM8960_CLOCK1, reg | div);
658 break; 604 break;
659 case WM8960_DACDIV: 605 case WM8960_DACDIV:
660 reg = wm8960_read(codec, WM8960_CLOCK1) & 0x1c7; 606 reg = snd_soc_read(codec, WM8960_CLOCK1) & 0x1c7;
661 wm8960_write(codec, WM8960_CLOCK1, reg | div); 607 snd_soc_write(codec, WM8960_CLOCK1, reg | div);
662 break; 608 break;
663 case WM8960_OPCLKDIV: 609 case WM8960_OPCLKDIV:
664 reg = wm8960_read(codec, WM8960_PLL1) & 0x03f; 610 reg = snd_soc_read(codec, WM8960_PLL1) & 0x03f;
665 wm8960_write(codec, WM8960_PLL1, reg | div); 611 snd_soc_write(codec, WM8960_PLL1, reg | div);
666 break; 612 break;
667 case WM8960_DCLKDIV: 613 case WM8960_DCLKDIV:
668 reg = wm8960_read(codec, WM8960_CLOCK2) & 0x03f; 614 reg = snd_soc_read(codec, WM8960_CLOCK2) & 0x03f;
669 wm8960_write(codec, WM8960_CLOCK2, reg | div); 615 snd_soc_write(codec, WM8960_CLOCK2, reg | div);
670 break; 616 break;
671 case WM8960_TOCLKSEL: 617 case WM8960_TOCLKSEL:
672 reg = wm8960_read(codec, WM8960_ADDCTL1) & 0x1fd; 618 reg = snd_soc_read(codec, WM8960_ADDCTL1) & 0x1fd;
673 wm8960_write(codec, WM8960_ADDCTL1, reg | div); 619 snd_soc_write(codec, WM8960_ADDCTL1, reg | div);
674 break; 620 break;
675 default: 621 default:
676 return -EINVAL; 622 return -EINVAL;
@@ -801,7 +747,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8960 = {
801}; 747};
802EXPORT_SYMBOL_GPL(soc_codec_dev_wm8960); 748EXPORT_SYMBOL_GPL(soc_codec_dev_wm8960);
803 749
804static int wm8960_register(struct wm8960_priv *wm8960) 750static int wm8960_register(struct wm8960_priv *wm8960,
751 enum snd_soc_control_type control)
805{ 752{
806 struct wm8960_data *pdata = wm8960->codec.dev->platform_data; 753 struct wm8960_data *pdata = wm8960->codec.dev->platform_data;
807 struct snd_soc_codec *codec = &wm8960->codec; 754 struct snd_soc_codec *codec = &wm8960->codec;
@@ -810,7 +757,8 @@ static int wm8960_register(struct wm8960_priv *wm8960)
810 757
811 if (wm8960_codec) { 758 if (wm8960_codec) {
812 dev_err(codec->dev, "Another WM8960 is registered\n"); 759 dev_err(codec->dev, "Another WM8960 is registered\n");
813 return -EINVAL; 760 ret = -EINVAL;
761 goto err;
814 } 762 }
815 763
816 if (!pdata) { 764 if (!pdata) {
@@ -829,8 +777,6 @@ static int wm8960_register(struct wm8960_priv *wm8960)
829 codec->private_data = wm8960; 777 codec->private_data = wm8960;
830 codec->name = "WM8960"; 778 codec->name = "WM8960";
831 codec->owner = THIS_MODULE; 779 codec->owner = THIS_MODULE;
832 codec->read = wm8960_read_reg_cache;
833 codec->write = wm8960_write;
834 codec->bias_level = SND_SOC_BIAS_OFF; 780 codec->bias_level = SND_SOC_BIAS_OFF;
835 codec->set_bias_level = wm8960_set_bias_level; 781 codec->set_bias_level = wm8960_set_bias_level;
836 codec->dai = &wm8960_dai; 782 codec->dai = &wm8960_dai;
@@ -840,10 +786,16 @@ static int wm8960_register(struct wm8960_priv *wm8960)
840 786
841 memcpy(codec->reg_cache, wm8960_reg, sizeof(wm8960_reg)); 787 memcpy(codec->reg_cache, wm8960_reg, sizeof(wm8960_reg));
842 788
789 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
790 if (ret < 0) {
791 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
792 goto err;
793 }
794
843 ret = wm8960_reset(codec); 795 ret = wm8960_reset(codec);
844 if (ret < 0) { 796 if (ret < 0) {
845 dev_err(codec->dev, "Failed to issue reset\n"); 797 dev_err(codec->dev, "Failed to issue reset\n");
846 return ret; 798 goto err;
847 } 799 }
848 800
849 wm8960_dai.dev = codec->dev; 801 wm8960_dai.dev = codec->dev;
@@ -851,43 +803,48 @@ static int wm8960_register(struct wm8960_priv *wm8960)
851 wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 803 wm8960_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
852 804
853 /* Latch the update bits */ 805 /* Latch the update bits */
854 reg = wm8960_read(codec, WM8960_LINVOL); 806 reg = snd_soc_read(codec, WM8960_LINVOL);
855 wm8960_write(codec, WM8960_LINVOL, reg | 0x100); 807 snd_soc_write(codec, WM8960_LINVOL, reg | 0x100);
856 reg = wm8960_read(codec, WM8960_RINVOL); 808 reg = snd_soc_read(codec, WM8960_RINVOL);
857 wm8960_write(codec, WM8960_RINVOL, reg | 0x100); 809 snd_soc_write(codec, WM8960_RINVOL, reg | 0x100);
858 reg = wm8960_read(codec, WM8960_LADC); 810 reg = snd_soc_read(codec, WM8960_LADC);
859 wm8960_write(codec, WM8960_LADC, reg | 0x100); 811 snd_soc_write(codec, WM8960_LADC, reg | 0x100);
860 reg = wm8960_read(codec, WM8960_RADC); 812 reg = snd_soc_read(codec, WM8960_RADC);
861 wm8960_write(codec, WM8960_RADC, reg | 0x100); 813 snd_soc_write(codec, WM8960_RADC, reg | 0x100);
862 reg = wm8960_read(codec, WM8960_LDAC); 814 reg = snd_soc_read(codec, WM8960_LDAC);
863 wm8960_write(codec, WM8960_LDAC, reg | 0x100); 815 snd_soc_write(codec, WM8960_LDAC, reg | 0x100);
864 reg = wm8960_read(codec, WM8960_RDAC); 816 reg = snd_soc_read(codec, WM8960_RDAC);
865 wm8960_write(codec, WM8960_RDAC, reg | 0x100); 817 snd_soc_write(codec, WM8960_RDAC, reg | 0x100);
866 reg = wm8960_read(codec, WM8960_LOUT1); 818 reg = snd_soc_read(codec, WM8960_LOUT1);
867 wm8960_write(codec, WM8960_LOUT1, reg | 0x100); 819 snd_soc_write(codec, WM8960_LOUT1, reg | 0x100);
868 reg = wm8960_read(codec, WM8960_ROUT1); 820 reg = snd_soc_read(codec, WM8960_ROUT1);
869 wm8960_write(codec, WM8960_ROUT1, reg | 0x100); 821 snd_soc_write(codec, WM8960_ROUT1, reg | 0x100);
870 reg = wm8960_read(codec, WM8960_LOUT2); 822 reg = snd_soc_read(codec, WM8960_LOUT2);
871 wm8960_write(codec, WM8960_LOUT2, reg | 0x100); 823 snd_soc_write(codec, WM8960_LOUT2, reg | 0x100);
872 reg = wm8960_read(codec, WM8960_ROUT2); 824 reg = snd_soc_read(codec, WM8960_ROUT2);
873 wm8960_write(codec, WM8960_ROUT2, reg | 0x100); 825 snd_soc_write(codec, WM8960_ROUT2, reg | 0x100);
874 826
875 wm8960_codec = codec; 827 wm8960_codec = codec;
876 828
877 ret = snd_soc_register_codec(codec); 829 ret = snd_soc_register_codec(codec);
878 if (ret != 0) { 830 if (ret != 0) {
879 dev_err(codec->dev, "Failed to register codec: %d\n", ret); 831 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
880 return ret; 832 goto err;
881 } 833 }
882 834
883 ret = snd_soc_register_dai(&wm8960_dai); 835 ret = snd_soc_register_dai(&wm8960_dai);
884 if (ret != 0) { 836 if (ret != 0) {
885 dev_err(codec->dev, "Failed to register DAI: %d\n", ret); 837 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
886 snd_soc_unregister_codec(codec); 838 goto err_codec;
887 return ret;
888 } 839 }
889 840
890 return 0; 841 return 0;
842
843err_codec:
844 snd_soc_unregister_codec(codec);
845err:
846 kfree(wm8960);
847 return ret;
891} 848}
892 849
893static void wm8960_unregister(struct wm8960_priv *wm8960) 850static void wm8960_unregister(struct wm8960_priv *wm8960)
@@ -910,14 +867,13 @@ static __devinit int wm8960_i2c_probe(struct i2c_client *i2c,
910 return -ENOMEM; 867 return -ENOMEM;
911 868
912 codec = &wm8960->codec; 869 codec = &wm8960->codec;
913 codec->hw_write = (hw_write_t)i2c_master_send;
914 870
915 i2c_set_clientdata(i2c, wm8960); 871 i2c_set_clientdata(i2c, wm8960);
916 codec->control_data = i2c; 872 codec->control_data = i2c;
917 873
918 codec->dev = &i2c->dev; 874 codec->dev = &i2c->dev;
919 875
920 return wm8960_register(wm8960); 876 return wm8960_register(wm8960, SND_SOC_I2C);
921} 877}
922 878
923static __devexit int wm8960_i2c_remove(struct i2c_client *client) 879static __devexit int wm8960_i2c_remove(struct i2c_client *client)
@@ -927,6 +883,21 @@ static __devexit int wm8960_i2c_remove(struct i2c_client *client)
927 return 0; 883 return 0;
928} 884}
929 885
886#ifdef CONFIG_PM
887static int wm8960_i2c_suspend(struct i2c_client *client, pm_message_t msg)
888{
889 return snd_soc_suspend_device(&client->dev);
890}
891
892static int wm8960_i2c_resume(struct i2c_client *client)
893{
894 return snd_soc_resume_device(&client->dev);
895}
896#else
897#define wm8960_i2c_suspend NULL
898#define wm8960_i2c_resume NULL
899#endif
900
930static const struct i2c_device_id wm8960_i2c_id[] = { 901static const struct i2c_device_id wm8960_i2c_id[] = {
931 { "wm8960", 0 }, 902 { "wm8960", 0 },
932 { } 903 { }
@@ -940,6 +911,8 @@ static struct i2c_driver wm8960_i2c_driver = {
940 }, 911 },
941 .probe = wm8960_i2c_probe, 912 .probe = wm8960_i2c_probe,
942 .remove = __devexit_p(wm8960_i2c_remove), 913 .remove = __devexit_p(wm8960_i2c_remove),
914 .suspend = wm8960_i2c_suspend,
915 .resume = wm8960_i2c_resume,
943 .id_table = wm8960_i2c_id, 916 .id_table = wm8960_i2c_id,
944}; 917};
945 918
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
new file mode 100644
index 000000000000..503032085899
--- /dev/null
+++ b/sound/soc/codecs/wm8961.c
@@ -0,0 +1,1265 @@
1/*
2 * wm8961.c -- WM8961 ALSA SoC Audio driver
3 *
4 * Author: Mark Brown
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Currently unimplemented features:
11 * - ALC
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <sound/core.h>
22#include <sound/pcm.h>
23#include <sound/pcm_params.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26#include <sound/initval.h>
27#include <sound/tlv.h>
28
29#include "wm8961.h"
30
31#define WM8961_MAX_REGISTER 0xFC
32
33static u16 wm8961_reg_defaults[] = {
34 0x009F, /* R0 - Left Input volume */
35 0x009F, /* R1 - Right Input volume */
36 0x0000, /* R2 - LOUT1 volume */
37 0x0000, /* R3 - ROUT1 volume */
38 0x0020, /* R4 - Clocking1 */
39 0x0008, /* R5 - ADC & DAC Control 1 */
40 0x0000, /* R6 - ADC & DAC Control 2 */
41 0x000A, /* R7 - Audio Interface 0 */
42 0x01F4, /* R8 - Clocking2 */
43 0x0000, /* R9 - Audio Interface 1 */
44 0x00FF, /* R10 - Left DAC volume */
45 0x00FF, /* R11 - Right DAC volume */
46 0x0000, /* R12 */
47 0x0000, /* R13 */
48 0x0040, /* R14 - Audio Interface 2 */
49 0x0000, /* R15 - Software Reset */
50 0x0000, /* R16 */
51 0x007B, /* R17 - ALC1 */
52 0x0000, /* R18 - ALC2 */
53 0x0032, /* R19 - ALC3 */
54 0x0000, /* R20 - Noise Gate */
55 0x00C0, /* R21 - Left ADC volume */
56 0x00C0, /* R22 - Right ADC volume */
57 0x0120, /* R23 - Additional control(1) */
58 0x0000, /* R24 - Additional control(2) */
59 0x0000, /* R25 - Pwr Mgmt (1) */
60 0x0000, /* R26 - Pwr Mgmt (2) */
61 0x0000, /* R27 - Additional Control (3) */
62 0x0000, /* R28 - Anti-pop */
63 0x0000, /* R29 */
64 0x005F, /* R30 - Clocking 3 */
65 0x0000, /* R31 */
66 0x0000, /* R32 - ADCL signal path */
67 0x0000, /* R33 - ADCR signal path */
68 0x0000, /* R34 */
69 0x0000, /* R35 */
70 0x0000, /* R36 */
71 0x0000, /* R37 */
72 0x0000, /* R38 */
73 0x0000, /* R39 */
74 0x0000, /* R40 - LOUT2 volume */
75 0x0000, /* R41 - ROUT2 volume */
76 0x0000, /* R42 */
77 0x0000, /* R43 */
78 0x0000, /* R44 */
79 0x0000, /* R45 */
80 0x0000, /* R46 */
81 0x0000, /* R47 - Pwr Mgmt (3) */
82 0x0023, /* R48 - Additional Control (4) */
83 0x0000, /* R49 - Class D Control 1 */
84 0x0000, /* R50 */
85 0x0003, /* R51 - Class D Control 2 */
86 0x0000, /* R52 */
87 0x0000, /* R53 */
88 0x0000, /* R54 */
89 0x0000, /* R55 */
90 0x0106, /* R56 - Clocking 4 */
91 0x0000, /* R57 - DSP Sidetone 0 */
92 0x0000, /* R58 - DSP Sidetone 1 */
93 0x0000, /* R59 */
94 0x0000, /* R60 - DC Servo 0 */
95 0x0000, /* R61 - DC Servo 1 */
96 0x0000, /* R62 */
97 0x015E, /* R63 - DC Servo 3 */
98 0x0010, /* R64 */
99 0x0010, /* R65 - DC Servo 5 */
100 0x0000, /* R66 */
101 0x0001, /* R67 */
102 0x0003, /* R68 - Analogue PGA Bias */
103 0x0000, /* R69 - Analogue HP 0 */
104 0x0060, /* R70 */
105 0x01FB, /* R71 - Analogue HP 2 */
106 0x0000, /* R72 - Charge Pump 1 */
107 0x0065, /* R73 */
108 0x005F, /* R74 */
109 0x0059, /* R75 */
110 0x006B, /* R76 */
111 0x0038, /* R77 */
112 0x000C, /* R78 */
113 0x000A, /* R79 */
114 0x006B, /* R80 */
115 0x0000, /* R81 */
116 0x0000, /* R82 - Charge Pump B */
117 0x0087, /* R83 */
118 0x0000, /* R84 */
119 0x005C, /* R85 */
120 0x0000, /* R86 */
121 0x0000, /* R87 - Write Sequencer 1 */
122 0x0000, /* R88 - Write Sequencer 2 */
123 0x0000, /* R89 - Write Sequencer 3 */
124 0x0000, /* R90 - Write Sequencer 4 */
125 0x0000, /* R91 - Write Sequencer 5 */
126 0x0000, /* R92 - Write Sequencer 6 */
127 0x0000, /* R93 - Write Sequencer 7 */
128 0x0000, /* R94 */
129 0x0000, /* R95 */
130 0x0000, /* R96 */
131 0x0000, /* R97 */
132 0x0000, /* R98 */
133 0x0000, /* R99 */
134 0x0000, /* R100 */
135 0x0000, /* R101 */
136 0x0000, /* R102 */
137 0x0000, /* R103 */
138 0x0000, /* R104 */
139 0x0000, /* R105 */
140 0x0000, /* R106 */
141 0x0000, /* R107 */
142 0x0000, /* R108 */
143 0x0000, /* R109 */
144 0x0000, /* R110 */
145 0x0000, /* R111 */
146 0x0000, /* R112 */
147 0x0000, /* R113 */
148 0x0000, /* R114 */
149 0x0000, /* R115 */
150 0x0000, /* R116 */
151 0x0000, /* R117 */
152 0x0000, /* R118 */
153 0x0000, /* R119 */
154 0x0000, /* R120 */
155 0x0000, /* R121 */
156 0x0000, /* R122 */
157 0x0000, /* R123 */
158 0x0000, /* R124 */
159 0x0000, /* R125 */
160 0x0000, /* R126 */
161 0x0000, /* R127 */
162 0x0000, /* R128 */
163 0x0000, /* R129 */
164 0x0000, /* R130 */
165 0x0000, /* R131 */
166 0x0000, /* R132 */
167 0x0000, /* R133 */
168 0x0000, /* R134 */
169 0x0000, /* R135 */
170 0x0000, /* R136 */
171 0x0000, /* R137 */
172 0x0000, /* R138 */
173 0x0000, /* R139 */
174 0x0000, /* R140 */
175 0x0000, /* R141 */
176 0x0000, /* R142 */
177 0x0000, /* R143 */
178 0x0000, /* R144 */
179 0x0000, /* R145 */
180 0x0000, /* R146 */
181 0x0000, /* R147 */
182 0x0000, /* R148 */
183 0x0000, /* R149 */
184 0x0000, /* R150 */
185 0x0000, /* R151 */
186 0x0000, /* R152 */
187 0x0000, /* R153 */
188 0x0000, /* R154 */
189 0x0000, /* R155 */
190 0x0000, /* R156 */
191 0x0000, /* R157 */
192 0x0000, /* R158 */
193 0x0000, /* R159 */
194 0x0000, /* R160 */
195 0x0000, /* R161 */
196 0x0000, /* R162 */
197 0x0000, /* R163 */
198 0x0000, /* R164 */
199 0x0000, /* R165 */
200 0x0000, /* R166 */
201 0x0000, /* R167 */
202 0x0000, /* R168 */
203 0x0000, /* R169 */
204 0x0000, /* R170 */
205 0x0000, /* R171 */
206 0x0000, /* R172 */
207 0x0000, /* R173 */
208 0x0000, /* R174 */
209 0x0000, /* R175 */
210 0x0000, /* R176 */
211 0x0000, /* R177 */
212 0x0000, /* R178 */
213 0x0000, /* R179 */
214 0x0000, /* R180 */
215 0x0000, /* R181 */
216 0x0000, /* R182 */
217 0x0000, /* R183 */
218 0x0000, /* R184 */
219 0x0000, /* R185 */
220 0x0000, /* R186 */
221 0x0000, /* R187 */
222 0x0000, /* R188 */
223 0x0000, /* R189 */
224 0x0000, /* R190 */
225 0x0000, /* R191 */
226 0x0000, /* R192 */
227 0x0000, /* R193 */
228 0x0000, /* R194 */
229 0x0000, /* R195 */
230 0x0030, /* R196 */
231 0x0006, /* R197 */
232 0x0000, /* R198 */
233 0x0060, /* R199 */
234 0x0000, /* R200 */
235 0x003F, /* R201 */
236 0x0000, /* R202 */
237 0x0000, /* R203 */
238 0x0000, /* R204 */
239 0x0001, /* R205 */
240 0x0000, /* R206 */
241 0x0181, /* R207 */
242 0x0005, /* R208 */
243 0x0008, /* R209 */
244 0x0008, /* R210 */
245 0x0000, /* R211 */
246 0x013B, /* R212 */
247 0x0000, /* R213 */
248 0x0000, /* R214 */
249 0x0000, /* R215 */
250 0x0000, /* R216 */
251 0x0070, /* R217 */
252 0x0000, /* R218 */
253 0x0000, /* R219 */
254 0x0000, /* R220 */
255 0x0000, /* R221 */
256 0x0000, /* R222 */
257 0x0003, /* R223 */
258 0x0000, /* R224 */
259 0x0000, /* R225 */
260 0x0001, /* R226 */
261 0x0008, /* R227 */
262 0x0000, /* R228 */
263 0x0000, /* R229 */
264 0x0000, /* R230 */
265 0x0000, /* R231 */
266 0x0004, /* R232 */
267 0x0000, /* R233 */
268 0x0000, /* R234 */
269 0x0000, /* R235 */
270 0x0000, /* R236 */
271 0x0000, /* R237 */
272 0x0080, /* R238 */
273 0x0000, /* R239 */
274 0x0000, /* R240 */
275 0x0000, /* R241 */
276 0x0000, /* R242 */
277 0x0000, /* R243 */
278 0x0000, /* R244 */
279 0x0052, /* R245 */
280 0x0110, /* R246 */
281 0x0040, /* R247 */
282 0x0000, /* R248 */
283 0x0030, /* R249 */
284 0x0000, /* R250 */
285 0x0000, /* R251 */
286 0x0001, /* R252 - General test 1 */
287};
288
289struct wm8961_priv {
290 struct snd_soc_codec codec;
291 int sysclk;
292 u16 reg_cache[WM8961_MAX_REGISTER];
293};
294
295static int wm8961_volatile_register(unsigned int reg)
296{
297 switch (reg) {
298 case WM8961_SOFTWARE_RESET:
299 case WM8961_WRITE_SEQUENCER_7:
300 case WM8961_DC_SERVO_1:
301 return 1;
302
303 default:
304 return 0;
305 }
306}
307
308static int wm8961_reset(struct snd_soc_codec *codec)
309{
310 return snd_soc_write(codec, WM8961_SOFTWARE_RESET, 0);
311}
312
313/*
314 * The headphone output supports special anti-pop sequences giving
315 * silent power up and power down.
316 */
317static int wm8961_hp_event(struct snd_soc_dapm_widget *w,
318 struct snd_kcontrol *kcontrol, int event)
319{
320 struct snd_soc_codec *codec = w->codec;
321 u16 hp_reg = snd_soc_read(codec, WM8961_ANALOGUE_HP_0);
322 u16 cp_reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_1);
323 u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
324 u16 dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
325 int timeout = 500;
326
327 if (event & SND_SOC_DAPM_POST_PMU) {
328 /* Make sure the output is shorted */
329 hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
330 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
331
332 /* Enable the charge pump */
333 cp_reg |= WM8961_CP_ENA;
334 snd_soc_write(codec, WM8961_CHARGE_PUMP_1, cp_reg);
335 mdelay(5);
336
337 /* Enable the PGA */
338 pwr_reg |= WM8961_LOUT1_PGA | WM8961_ROUT1_PGA;
339 snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
340
341 /* Enable the amplifier */
342 hp_reg |= WM8961_HPR_ENA | WM8961_HPL_ENA;
343 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
344
345 /* Second stage enable */
346 hp_reg |= WM8961_HPR_ENA_DLY | WM8961_HPL_ENA_DLY;
347 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
348
349 /* Enable the DC servo & trigger startup */
350 dcs_reg |=
351 WM8961_DCS_ENA_CHAN_HPR | WM8961_DCS_TRIG_STARTUP_HPR |
352 WM8961_DCS_ENA_CHAN_HPL | WM8961_DCS_TRIG_STARTUP_HPL;
353 dev_dbg(codec->dev, "Enabling DC servo\n");
354
355 snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
356 do {
357 msleep(1);
358 dcs_reg = snd_soc_read(codec, WM8961_DC_SERVO_1);
359 } while (--timeout &&
360 dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
361 WM8961_DCS_TRIG_STARTUP_HPL));
362 if (dcs_reg & (WM8961_DCS_TRIG_STARTUP_HPR |
363 WM8961_DCS_TRIG_STARTUP_HPL))
364 dev_err(codec->dev, "DC servo timed out\n");
365 else
366 dev_dbg(codec->dev, "DC servo startup complete\n");
367
368 /* Enable the output stage */
369 hp_reg |= WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP;
370 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
371
372 /* Remove the short on the output stage */
373 hp_reg |= WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT;
374 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
375 }
376
377 if (event & SND_SOC_DAPM_PRE_PMD) {
378 /* Short the output */
379 hp_reg &= ~(WM8961_HPR_RMV_SHORT | WM8961_HPL_RMV_SHORT);
380 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
381
382 /* Disable the output stage */
383 hp_reg &= ~(WM8961_HPR_ENA_OUTP | WM8961_HPL_ENA_OUTP);
384 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
385
386 /* Disable DC offset cancellation */
387 dcs_reg &= ~(WM8961_DCS_ENA_CHAN_HPR |
388 WM8961_DCS_ENA_CHAN_HPL);
389 snd_soc_write(codec, WM8961_DC_SERVO_1, dcs_reg);
390
391 /* Finish up */
392 hp_reg &= ~(WM8961_HPR_ENA_DLY | WM8961_HPR_ENA |
393 WM8961_HPL_ENA_DLY | WM8961_HPL_ENA);
394 snd_soc_write(codec, WM8961_ANALOGUE_HP_0, hp_reg);
395
396 /* Disable the PGA */
397 pwr_reg &= ~(WM8961_LOUT1_PGA | WM8961_ROUT1_PGA);
398 snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
399
400 /* Disable the charge pump */
401 dev_dbg(codec->dev, "Disabling charge pump\n");
402 snd_soc_write(codec, WM8961_CHARGE_PUMP_1,
403 cp_reg & ~WM8961_CP_ENA);
404 }
405
406 return 0;
407}
408
409static int wm8961_spk_event(struct snd_soc_dapm_widget *w,
410 struct snd_kcontrol *kcontrol, int event)
411{
412 struct snd_soc_codec *codec = w->codec;
413 u16 pwr_reg = snd_soc_read(codec, WM8961_PWR_MGMT_2);
414 u16 spk_reg = snd_soc_read(codec, WM8961_CLASS_D_CONTROL_1);
415
416 if (event & SND_SOC_DAPM_POST_PMU) {
417 /* Enable the PGA */
418 pwr_reg |= WM8961_SPKL_PGA | WM8961_SPKR_PGA;
419 snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
420
421 /* Enable the amplifier */
422 spk_reg |= WM8961_SPKL_ENA | WM8961_SPKR_ENA;
423 snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
424 }
425
426 if (event & SND_SOC_DAPM_PRE_PMD) {
427 /* Enable the amplifier */
428 spk_reg &= ~(WM8961_SPKL_ENA | WM8961_SPKR_ENA);
429 snd_soc_write(codec, WM8961_CLASS_D_CONTROL_1, spk_reg);
430
431 /* Enable the PGA */
432 pwr_reg &= ~(WM8961_SPKL_PGA | WM8961_SPKR_PGA);
433 snd_soc_write(codec, WM8961_PWR_MGMT_2, pwr_reg);
434 }
435
436 return 0;
437}
438
439static const char *adc_hpf_text[] = {
440 "Hi-fi", "Voice 1", "Voice 2", "Voice 3",
441};
442
443static const struct soc_enum adc_hpf =
444 SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_2, 7, 4, adc_hpf_text);
445
446static const char *dac_deemph_text[] = {
447 "None", "32kHz", "44.1kHz", "48kHz",
448};
449
450static const struct soc_enum dac_deemph =
451 SOC_ENUM_SINGLE(WM8961_ADC_DAC_CONTROL_1, 1, 4, dac_deemph_text);
452
453static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
454static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
455static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
456static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
457static unsigned int boost_tlv[] = {
458 TLV_DB_RANGE_HEAD(4),
459 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
460 1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
461 2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
462 3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
463};
464static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
465
466static const struct snd_kcontrol_new wm8961_snd_controls[] = {
467SOC_DOUBLE_R_TLV("Headphone Volume", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
468 0, 127, 0, out_tlv),
469SOC_DOUBLE_TLV("Headphone Secondary Volume", WM8961_ANALOGUE_HP_2,
470 6, 3, 7, 0, hp_sec_tlv),
471SOC_DOUBLE_R("Headphone ZC Switch", WM8961_LOUT1_VOLUME, WM8961_ROUT1_VOLUME,
472 7, 1, 0),
473
474SOC_DOUBLE_R_TLV("Speaker Volume", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
475 0, 127, 0, out_tlv),
476SOC_DOUBLE_R("Speaker ZC Switch", WM8961_LOUT2_VOLUME, WM8961_ROUT2_VOLUME,
477 7, 1, 0),
478SOC_SINGLE("Speaker AC Gain", WM8961_CLASS_D_CONTROL_2, 0, 7, 0),
479
480SOC_SINGLE("DAC x128 OSR Switch", WM8961_ADC_DAC_CONTROL_2, 0, 1, 0),
481SOC_ENUM("DAC Deemphasis", dac_deemph),
482SOC_SINGLE("DAC Soft Mute Switch", WM8961_ADC_DAC_CONTROL_2, 3, 1, 0),
483
484SOC_DOUBLE_R_TLV("Sidetone Volume", WM8961_DSP_SIDETONE_0,
485 WM8961_DSP_SIDETONE_1, 4, 12, 0, sidetone_tlv),
486
487SOC_SINGLE("ADC High Pass Filter Switch", WM8961_ADC_DAC_CONTROL_1, 0, 1, 0),
488SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
489
490SOC_DOUBLE_R_TLV("Capture Volume",
491 WM8961_LEFT_ADC_VOLUME, WM8961_RIGHT_ADC_VOLUME,
492 1, 119, 0, adc_tlv),
493SOC_DOUBLE_R_TLV("Capture Boost Volume",
494 WM8961_ADCL_SIGNAL_PATH, WM8961_ADCR_SIGNAL_PATH,
495 4, 3, 0, boost_tlv),
496SOC_DOUBLE_R_TLV("Capture PGA Volume",
497 WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
498 0, 62, 0, pga_tlv),
499SOC_DOUBLE_R("Capture PGA ZC Switch",
500 WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
501 6, 1, 1),
502SOC_DOUBLE_R("Capture PGA Switch",
503 WM8961_LEFT_INPUT_VOLUME, WM8961_RIGHT_INPUT_VOLUME,
504 7, 1, 1),
505};
506
507static const char *sidetone_text[] = {
508 "None", "Left", "Right"
509};
510
511static const struct soc_enum dacl_sidetone =
512 SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_0, 2, 3, sidetone_text);
513
514static const struct soc_enum dacr_sidetone =
515 SOC_ENUM_SINGLE(WM8961_DSP_SIDETONE_1, 2, 3, sidetone_text);
516
517static const struct snd_kcontrol_new dacl_mux =
518 SOC_DAPM_ENUM("DACL Sidetone", dacl_sidetone);
519
520static const struct snd_kcontrol_new dacr_mux =
521 SOC_DAPM_ENUM("DACR Sidetone", dacr_sidetone);
522
523static const struct snd_soc_dapm_widget wm8961_dapm_widgets[] = {
524SND_SOC_DAPM_INPUT("LINPUT"),
525SND_SOC_DAPM_INPUT("RINPUT"),
526
527SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8961_CLOCKING2, 4, 0, NULL, 0),
528
529SND_SOC_DAPM_PGA("Left Input", WM8961_PWR_MGMT_1, 5, 0, NULL, 0),
530SND_SOC_DAPM_PGA("Right Input", WM8961_PWR_MGMT_1, 4, 0, NULL, 0),
531
532SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", WM8961_PWR_MGMT_1, 3, 0),
533SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", WM8961_PWR_MGMT_1, 2, 0),
534
535SND_SOC_DAPM_MICBIAS("MICBIAS", WM8961_PWR_MGMT_1, 1, 0),
536
537SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &dacl_mux),
538SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &dacr_mux),
539
540SND_SOC_DAPM_DAC("DACL", "HiFi Playback", WM8961_PWR_MGMT_2, 8, 0),
541SND_SOC_DAPM_DAC("DACR", "HiFi Playback", WM8961_PWR_MGMT_2, 7, 0),
542
543/* Handle as a mono path for DCS */
544SND_SOC_DAPM_PGA_E("Headphone Output", SND_SOC_NOPM,
545 4, 0, NULL, 0, wm8961_hp_event,
546 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
547SND_SOC_DAPM_PGA_E("Speaker Output", SND_SOC_NOPM,
548 4, 0, NULL, 0, wm8961_spk_event,
549 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
550
551SND_SOC_DAPM_OUTPUT("HP_L"),
552SND_SOC_DAPM_OUTPUT("HP_R"),
553SND_SOC_DAPM_OUTPUT("SPK_LN"),
554SND_SOC_DAPM_OUTPUT("SPK_LP"),
555SND_SOC_DAPM_OUTPUT("SPK_RN"),
556SND_SOC_DAPM_OUTPUT("SPK_RP"),
557};
558
559
560static const struct snd_soc_dapm_route audio_paths[] = {
561 { "DACL", NULL, "CLK_DSP" },
562 { "DACL", NULL, "DACL Sidetone" },
563 { "DACR", NULL, "CLK_DSP" },
564 { "DACR", NULL, "DACR Sidetone" },
565
566 { "DACL Sidetone", "Left", "ADCL" },
567 { "DACL Sidetone", "Right", "ADCR" },
568
569 { "DACR Sidetone", "Left", "ADCL" },
570 { "DACR Sidetone", "Right", "ADCR" },
571
572 { "HP_L", NULL, "Headphone Output" },
573 { "HP_R", NULL, "Headphone Output" },
574 { "Headphone Output", NULL, "DACL" },
575 { "Headphone Output", NULL, "DACR" },
576
577 { "SPK_LN", NULL, "Speaker Output" },
578 { "SPK_LP", NULL, "Speaker Output" },
579 { "SPK_RN", NULL, "Speaker Output" },
580 { "SPK_RP", NULL, "Speaker Output" },
581
582 { "Speaker Output", NULL, "DACL" },
583 { "Speaker Output", NULL, "DACR" },
584
585 { "ADCL", NULL, "Left Input" },
586 { "ADCL", NULL, "CLK_DSP" },
587 { "ADCR", NULL, "Right Input" },
588 { "ADCR", NULL, "CLK_DSP" },
589
590 { "Left Input", NULL, "LINPUT" },
591 { "Right Input", NULL, "RINPUT" },
592
593};
594
595/* Values for CLK_SYS_RATE */
596static struct {
597 int ratio;
598 u16 val;
599} wm8961_clk_sys_ratio[] = {
600 { 64, 0 },
601 { 128, 1 },
602 { 192, 2 },
603 { 256, 3 },
604 { 384, 4 },
605 { 512, 5 },
606 { 768, 6 },
607 { 1024, 7 },
608 { 1408, 8 },
609 { 1536, 9 },
610};
611
612/* Values for SAMPLE_RATE */
613static struct {
614 int rate;
615 u16 val;
616} wm8961_srate[] = {
617 { 48000, 0 },
618 { 44100, 0 },
619 { 32000, 1 },
620 { 22050, 2 },
621 { 24000, 2 },
622 { 16000, 3 },
623 { 11250, 4 },
624 { 12000, 4 },
625 { 8000, 5 },
626};
627
628static int wm8961_hw_params(struct snd_pcm_substream *substream,
629 struct snd_pcm_hw_params *params,
630 struct snd_soc_dai *dai)
631{
632 struct snd_soc_codec *codec = dai->codec;
633 struct wm8961_priv *wm8961 = codec->private_data;
634 int i, best, target, fs;
635 u16 reg;
636
637 fs = params_rate(params);
638
639 if (!wm8961->sysclk) {
640 dev_err(codec->dev, "MCLK has not been specified\n");
641 return -EINVAL;
642 }
643
644 /* Find the closest sample rate for the filters */
645 best = 0;
646 for (i = 0; i < ARRAY_SIZE(wm8961_srate); i++) {
647 if (abs(wm8961_srate[i].rate - fs) <
648 abs(wm8961_srate[best].rate - fs))
649 best = i;
650 }
651 reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_3);
652 reg &= ~WM8961_SAMPLE_RATE_MASK;
653 reg |= wm8961_srate[best].val;
654 snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_3, reg);
655 dev_dbg(codec->dev, "Selected SRATE %dHz for %dHz\n",
656 wm8961_srate[best].rate, fs);
657
658 /* Select a CLK_SYS/fs ratio equal to or higher than required */
659 target = wm8961->sysclk / fs;
660
661 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && target < 64) {
662 dev_err(codec->dev,
663 "SYSCLK must be at least 64*fs for DAC\n");
664 return -EINVAL;
665 }
666 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE && target < 256) {
667 dev_err(codec->dev,
668 "SYSCLK must be at least 256*fs for ADC\n");
669 return -EINVAL;
670 }
671
672 for (i = 0; i < ARRAY_SIZE(wm8961_clk_sys_ratio); i++) {
673 if (wm8961_clk_sys_ratio[i].ratio >= target)
674 break;
675 }
676 if (i == ARRAY_SIZE(wm8961_clk_sys_ratio)) {
677 dev_err(codec->dev, "Unable to generate CLK_SYS_RATE\n");
678 return -EINVAL;
679 }
680 dev_dbg(codec->dev, "Selected CLK_SYS_RATE of %d for %d/%d=%d\n",
681 wm8961_clk_sys_ratio[i].ratio, wm8961->sysclk, fs,
682 wm8961->sysclk / fs);
683
684 reg = snd_soc_read(codec, WM8961_CLOCKING_4);
685 reg &= ~WM8961_CLK_SYS_RATE_MASK;
686 reg |= wm8961_clk_sys_ratio[i].val << WM8961_CLK_SYS_RATE_SHIFT;
687 snd_soc_write(codec, WM8961_CLOCKING_4, reg);
688
689 reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
690 reg &= ~WM8961_WL_MASK;
691 switch (params_format(params)) {
692 case SNDRV_PCM_FORMAT_S16_LE:
693 break;
694 case SNDRV_PCM_FORMAT_S20_3LE:
695 reg |= 1 << WM8961_WL_SHIFT;
696 break;
697 case SNDRV_PCM_FORMAT_S24_LE:
698 reg |= 2 << WM8961_WL_SHIFT;
699 break;
700 case SNDRV_PCM_FORMAT_S32_LE:
701 reg |= 3 << WM8961_WL_SHIFT;
702 break;
703 default:
704 return -EINVAL;
705 }
706 snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, reg);
707
708 /* Sloping stop-band filter is recommended for <= 24kHz */
709 reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
710 if (fs <= 24000)
711 reg |= WM8961_DACSLOPE;
712 else
713 reg &= WM8961_DACSLOPE;
714 snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
715
716 return 0;
717}
718
719static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
720 unsigned int freq,
721 int dir)
722{
723 struct snd_soc_codec *codec = dai->codec;
724 struct wm8961_priv *wm8961 = codec->private_data;
725 u16 reg = snd_soc_read(codec, WM8961_CLOCKING1);
726
727 if (freq > 33000000) {
728 dev_err(codec->dev, "MCLK must be <33MHz\n");
729 return -EINVAL;
730 }
731
732 if (freq > 16500000) {
733 dev_dbg(codec->dev, "Using MCLK/2 for %dHz MCLK\n", freq);
734 reg |= WM8961_MCLKDIV;
735 freq /= 2;
736 } else {
737 dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
738 reg &= WM8961_MCLKDIV;
739 }
740
741 snd_soc_write(codec, WM8961_CLOCKING1, reg);
742
743 wm8961->sysclk = freq;
744
745 return 0;
746}
747
748static int wm8961_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
749{
750 struct snd_soc_codec *codec = dai->codec;
751 u16 aif = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_0);
752
753 aif &= ~(WM8961_BCLKINV | WM8961_LRP |
754 WM8961_MS | WM8961_FORMAT_MASK);
755
756 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
757 case SND_SOC_DAIFMT_CBM_CFM:
758 aif |= WM8961_MS;
759 break;
760 case SND_SOC_DAIFMT_CBS_CFS:
761 break;
762 default:
763 return -EINVAL;
764 }
765
766 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
767 case SND_SOC_DAIFMT_RIGHT_J:
768 break;
769
770 case SND_SOC_DAIFMT_LEFT_J:
771 aif |= 1;
772 break;
773
774 case SND_SOC_DAIFMT_I2S:
775 aif |= 2;
776 break;
777
778 case SND_SOC_DAIFMT_DSP_B:
779 aif |= WM8961_LRP;
780 case SND_SOC_DAIFMT_DSP_A:
781 aif |= 3;
782 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
783 case SND_SOC_DAIFMT_NB_NF:
784 case SND_SOC_DAIFMT_IB_NF:
785 break;
786 default:
787 return -EINVAL;
788 }
789 break;
790
791 default:
792 return -EINVAL;
793 }
794
795 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
796 case SND_SOC_DAIFMT_NB_NF:
797 break;
798 case SND_SOC_DAIFMT_NB_IF:
799 aif |= WM8961_LRP;
800 break;
801 case SND_SOC_DAIFMT_IB_NF:
802 aif |= WM8961_BCLKINV;
803 break;
804 case SND_SOC_DAIFMT_IB_IF:
805 aif |= WM8961_BCLKINV | WM8961_LRP;
806 break;
807 default:
808 return -EINVAL;
809 }
810
811 return snd_soc_write(codec, WM8961_AUDIO_INTERFACE_0, aif);
812}
813
814static int wm8961_set_tristate(struct snd_soc_dai *dai, int tristate)
815{
816 struct snd_soc_codec *codec = dai->codec;
817 u16 reg = snd_soc_read(codec, WM8961_ADDITIONAL_CONTROL_2);
818
819 if (tristate)
820 reg |= WM8961_TRIS;
821 else
822 reg &= ~WM8961_TRIS;
823
824 return snd_soc_write(codec, WM8961_ADDITIONAL_CONTROL_2, reg);
825}
826
827static int wm8961_digital_mute(struct snd_soc_dai *dai, int mute)
828{
829 struct snd_soc_codec *codec = dai->codec;
830 u16 reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_1);
831
832 if (mute)
833 reg |= WM8961_DACMU;
834 else
835 reg &= ~WM8961_DACMU;
836
837 msleep(17);
838
839 return snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_1, reg);
840}
841
842static int wm8961_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
843{
844 struct snd_soc_codec *codec = dai->codec;
845 u16 reg;
846
847 switch (div_id) {
848 case WM8961_BCLK:
849 reg = snd_soc_read(codec, WM8961_CLOCKING2);
850 reg &= ~WM8961_BCLKDIV_MASK;
851 reg |= div;
852 snd_soc_write(codec, WM8961_CLOCKING2, reg);
853 break;
854
855 case WM8961_LRCLK:
856 reg = snd_soc_read(codec, WM8961_AUDIO_INTERFACE_2);
857 reg &= ~WM8961_LRCLK_RATE_MASK;
858 reg |= div;
859 snd_soc_write(codec, WM8961_AUDIO_INTERFACE_2, reg);
860 break;
861
862 default:
863 return -EINVAL;
864 }
865
866 return 0;
867}
868
869static int wm8961_set_bias_level(struct snd_soc_codec *codec,
870 enum snd_soc_bias_level level)
871{
872 u16 reg;
873
874 /* This is all slightly unusual since we have no bypass paths
875 * and the output amplifier structure means we can just slam
876 * the biases straight up rather than having to ramp them
877 * slowly.
878 */
879 switch (level) {
880 case SND_SOC_BIAS_ON:
881 break;
882
883 case SND_SOC_BIAS_PREPARE:
884 if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
885 /* Enable bias generation */
886 reg = snd_soc_read(codec, WM8961_ANTI_POP);
887 reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
888 snd_soc_write(codec, WM8961_ANTI_POP, reg);
889
890 /* VMID=2*50k, VREF */
891 reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
892 reg &= ~WM8961_VMIDSEL_MASK;
893 reg |= (1 << WM8961_VMIDSEL_SHIFT) | WM8961_VREF;
894 snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
895 }
896 break;
897
898 case SND_SOC_BIAS_STANDBY:
899 if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
900 /* VREF off */
901 reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
902 reg &= ~WM8961_VREF;
903 snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
904
905 /* Bias generation off */
906 reg = snd_soc_read(codec, WM8961_ANTI_POP);
907 reg &= ~(WM8961_BUFIOEN | WM8961_BUFDCOPEN);
908 snd_soc_write(codec, WM8961_ANTI_POP, reg);
909
910 /* VMID off */
911 reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
912 reg &= ~WM8961_VMIDSEL_MASK;
913 snd_soc_write(codec, WM8961_PWR_MGMT_1, reg);
914 }
915 break;
916
917 case SND_SOC_BIAS_OFF:
918 break;
919 }
920
921 codec->bias_level = level;
922
923 return 0;
924}
925
926
927#define WM8961_RATES SNDRV_PCM_RATE_8000_48000
928
929#define WM8961_FORMATS \
930 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
931 SNDRV_PCM_FMTBIT_S24_LE)
932
933static struct snd_soc_dai_ops wm8961_dai_ops = {
934 .hw_params = wm8961_hw_params,
935 .set_sysclk = wm8961_set_sysclk,
936 .set_fmt = wm8961_set_fmt,
937 .digital_mute = wm8961_digital_mute,
938 .set_tristate = wm8961_set_tristate,
939 .set_clkdiv = wm8961_set_clkdiv,
940};
941
942struct snd_soc_dai wm8961_dai = {
943 .name = "WM8961",
944 .playback = {
945 .stream_name = "HiFi Playback",
946 .channels_min = 1,
947 .channels_max = 2,
948 .rates = WM8961_RATES,
949 .formats = WM8961_FORMATS,},
950 .capture = {
951 .stream_name = "HiFi Capture",
952 .channels_min = 1,
953 .channels_max = 2,
954 .rates = WM8961_RATES,
955 .formats = WM8961_FORMATS,},
956 .ops = &wm8961_dai_ops,
957};
958EXPORT_SYMBOL_GPL(wm8961_dai);
959
960
961static struct snd_soc_codec *wm8961_codec;
962
963static int wm8961_probe(struct platform_device *pdev)
964{
965 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
966 struct snd_soc_codec *codec;
967 int ret = 0;
968
969 if (wm8961_codec == NULL) {
970 dev_err(&pdev->dev, "Codec device not registered\n");
971 return -ENODEV;
972 }
973
974 socdev->card->codec = wm8961_codec;
975 codec = wm8961_codec;
976
977 /* register pcms */
978 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
979 if (ret < 0) {
980 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
981 goto pcm_err;
982 }
983
984 snd_soc_add_controls(codec, wm8961_snd_controls,
985 ARRAY_SIZE(wm8961_snd_controls));
986 snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
987 ARRAY_SIZE(wm8961_dapm_widgets));
988 snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
989 snd_soc_dapm_new_widgets(codec);
990
991 ret = snd_soc_init_card(socdev);
992 if (ret < 0) {
993 dev_err(codec->dev, "failed to register card: %d\n", ret);
994 goto card_err;
995 }
996
997 return ret;
998
999card_err:
1000 snd_soc_free_pcms(socdev);
1001 snd_soc_dapm_free(socdev);
1002pcm_err:
1003 return ret;
1004}
1005
1006static int wm8961_remove(struct platform_device *pdev)
1007{
1008 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1009
1010 snd_soc_free_pcms(socdev);
1011 snd_soc_dapm_free(socdev);
1012
1013 return 0;
1014}
1015
1016#ifdef CONFIG_PM
1017static int wm8961_suspend(struct platform_device *pdev, pm_message_t state)
1018{
1019 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1020 struct snd_soc_codec *codec = socdev->card->codec;
1021
1022 wm8961_set_bias_level(codec, SND_SOC_BIAS_OFF);
1023
1024 return 0;
1025}
1026
1027static int wm8961_resume(struct platform_device *pdev)
1028{
1029 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1030 struct snd_soc_codec *codec = socdev->card->codec;
1031 u16 *reg_cache = codec->reg_cache;
1032 int i;
1033
1034 for (i = 0; i < codec->reg_cache_size; i++) {
1035 if (i == WM8961_SOFTWARE_RESET)
1036 continue;
1037
1038 snd_soc_write(codec, i, reg_cache[i]);
1039 }
1040
1041 wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1042
1043 return 0;
1044}
1045#else
1046#define wm8961_suspend NULL
1047#define wm8961_resume NULL
1048#endif
1049
1050struct snd_soc_codec_device soc_codec_dev_wm8961 = {
1051 .probe = wm8961_probe,
1052 .remove = wm8961_remove,
1053 .suspend = wm8961_suspend,
1054 .resume = wm8961_resume,
1055};
1056EXPORT_SYMBOL_GPL(soc_codec_dev_wm8961);
1057
1058static int wm8961_register(struct wm8961_priv *wm8961)
1059{
1060 struct snd_soc_codec *codec = &wm8961->codec;
1061 int ret;
1062 u16 reg;
1063
1064 if (wm8961_codec) {
1065 dev_err(codec->dev, "Another WM8961 is registered\n");
1066 ret = -EINVAL;
1067 goto err;
1068 }
1069
1070 mutex_init(&codec->mutex);
1071 INIT_LIST_HEAD(&codec->dapm_widgets);
1072 INIT_LIST_HEAD(&codec->dapm_paths);
1073
1074 codec->private_data = wm8961;
1075 codec->name = "WM8961";
1076 codec->owner = THIS_MODULE;
1077 codec->dai = &wm8961_dai;
1078 codec->num_dai = 1;
1079 codec->reg_cache_size = ARRAY_SIZE(wm8961->reg_cache);
1080 codec->reg_cache = &wm8961->reg_cache;
1081 codec->bias_level = SND_SOC_BIAS_OFF;
1082 codec->set_bias_level = wm8961_set_bias_level;
1083 codec->volatile_register = wm8961_volatile_register;
1084
1085 memcpy(codec->reg_cache, wm8961_reg_defaults,
1086 sizeof(wm8961_reg_defaults));
1087
1088 ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
1089 if (ret != 0) {
1090 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
1091 goto err;
1092 }
1093
1094 reg = snd_soc_read(codec, WM8961_SOFTWARE_RESET);
1095 if (reg != 0x1801) {
1096 dev_err(codec->dev, "Device is not a WM8961: ID=0x%x\n", reg);
1097 ret = -EINVAL;
1098 goto err;
1099 }
1100
1101 /* This isn't volatile - readback doesn't correspond to write */
1102 reg = codec->hw_read(codec, WM8961_RIGHT_INPUT_VOLUME);
1103 dev_info(codec->dev, "WM8961 family %d revision %c\n",
1104 (reg & WM8961_DEVICE_ID_MASK) >> WM8961_DEVICE_ID_SHIFT,
1105 ((reg & WM8961_CHIP_REV_MASK) >> WM8961_CHIP_REV_SHIFT)
1106 + 'A');
1107
1108 ret = wm8961_reset(codec);
1109 if (ret < 0) {
1110 dev_err(codec->dev, "Failed to issue reset\n");
1111 return ret;
1112 }
1113
1114 /* Enable class W */
1115 reg = snd_soc_read(codec, WM8961_CHARGE_PUMP_B);
1116 reg |= WM8961_CP_DYN_PWR_MASK;
1117 snd_soc_write(codec, WM8961_CHARGE_PUMP_B, reg);
1118
1119 /* Latch volume update bits (right channel only, we always
1120 * write both out) and default ZC on. */
1121 reg = snd_soc_read(codec, WM8961_ROUT1_VOLUME);
1122 snd_soc_write(codec, WM8961_ROUT1_VOLUME,
1123 reg | WM8961_LO1ZC | WM8961_OUT1VU);
1124 snd_soc_write(codec, WM8961_LOUT1_VOLUME, reg | WM8961_LO1ZC);
1125 reg = snd_soc_read(codec, WM8961_ROUT2_VOLUME);
1126 snd_soc_write(codec, WM8961_ROUT2_VOLUME,
1127 reg | WM8961_SPKRZC | WM8961_SPKVU);
1128 snd_soc_write(codec, WM8961_LOUT2_VOLUME, reg | WM8961_SPKLZC);
1129
1130 reg = snd_soc_read(codec, WM8961_RIGHT_ADC_VOLUME);
1131 snd_soc_write(codec, WM8961_RIGHT_ADC_VOLUME, reg | WM8961_ADCVU);
1132 reg = snd_soc_read(codec, WM8961_RIGHT_INPUT_VOLUME);
1133 snd_soc_write(codec, WM8961_RIGHT_INPUT_VOLUME, reg | WM8961_IPVU);
1134
1135 /* Use soft mute by default */
1136 reg = snd_soc_read(codec, WM8961_ADC_DAC_CONTROL_2);
1137 reg |= WM8961_DACSMM;
1138 snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
1139
1140 /* Use automatic clocking mode by default; for now this is all
1141 * we support.
1142 */
1143 reg = snd_soc_read(codec, WM8961_CLOCKING_3);
1144 reg &= ~WM8961_MANUAL_MODE;
1145 snd_soc_write(codec, WM8961_CLOCKING_3, reg);
1146
1147 wm8961_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1148
1149 wm8961_dai.dev = codec->dev;
1150
1151 wm8961_codec = codec;
1152
1153 ret = snd_soc_register_codec(codec);
1154 if (ret != 0) {
1155 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
1156 return ret;
1157 }
1158
1159 ret = snd_soc_register_dai(&wm8961_dai);
1160 if (ret != 0) {
1161 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
1162 snd_soc_unregister_codec(codec);
1163 return ret;
1164 }
1165
1166 return 0;
1167
1168err:
1169 kfree(wm8961);
1170 return ret;
1171}
1172
1173static void wm8961_unregister(struct wm8961_priv *wm8961)
1174{
1175 wm8961_set_bias_level(&wm8961->codec, SND_SOC_BIAS_OFF);
1176 snd_soc_unregister_dai(&wm8961_dai);
1177 snd_soc_unregister_codec(&wm8961->codec);
1178 kfree(wm8961);
1179 wm8961_codec = NULL;
1180}
1181
1182static __devinit int wm8961_i2c_probe(struct i2c_client *i2c,
1183 const struct i2c_device_id *id)
1184{
1185 struct wm8961_priv *wm8961;
1186 struct snd_soc_codec *codec;
1187
1188 wm8961 = kzalloc(sizeof(struct wm8961_priv), GFP_KERNEL);
1189 if (wm8961 == NULL)
1190 return -ENOMEM;
1191
1192 codec = &wm8961->codec;
1193
1194 i2c_set_clientdata(i2c, wm8961);
1195 codec->control_data = i2c;
1196
1197 codec->dev = &i2c->dev;
1198
1199 return wm8961_register(wm8961);
1200}
1201
1202static __devexit int wm8961_i2c_remove(struct i2c_client *client)
1203{
1204 struct wm8961_priv *wm8961 = i2c_get_clientdata(client);
1205 wm8961_unregister(wm8961);
1206 return 0;
1207}
1208
1209#ifdef CONFIG_PM
1210static int wm8961_i2c_suspend(struct i2c_client *client, pm_message_t state)
1211{
1212 return snd_soc_suspend_device(&client->dev);
1213}
1214
1215static int wm8961_i2c_resume(struct i2c_client *client)
1216{
1217 return snd_soc_resume_device(&client->dev);
1218}
1219#else
1220#define wm8961_i2c_suspend NULL
1221#define wm8961_i2c_resume NULL
1222#endif
1223
1224static const struct i2c_device_id wm8961_i2c_id[] = {
1225 { "wm8961", 0 },
1226 { }
1227};
1228MODULE_DEVICE_TABLE(i2c, wm8961_i2c_id);
1229
1230static struct i2c_driver wm8961_i2c_driver = {
1231 .driver = {
1232 .name = "wm8961",
1233 .owner = THIS_MODULE,
1234 },
1235 .probe = wm8961_i2c_probe,
1236 .remove = __devexit_p(wm8961_i2c_remove),
1237 .suspend = wm8961_i2c_suspend,
1238 .resume = wm8961_i2c_resume,
1239 .id_table = wm8961_i2c_id,
1240};
1241
1242static int __init wm8961_modinit(void)
1243{
1244 int ret;
1245
1246 ret = i2c_add_driver(&wm8961_i2c_driver);
1247 if (ret != 0) {
1248 printk(KERN_ERR "Failed to register WM8961 I2C driver: %d\n",
1249 ret);
1250 }
1251
1252 return ret;
1253}
1254module_init(wm8961_modinit);
1255
1256static void __exit wm8961_exit(void)
1257{
1258 i2c_del_driver(&wm8961_i2c_driver);
1259}
1260module_exit(wm8961_exit);
1261
1262
1263MODULE_DESCRIPTION("ASoC WM8961 driver");
1264MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
1265MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8961.h b/sound/soc/codecs/wm8961.h
new file mode 100644
index 000000000000..5513bfd720d6
--- /dev/null
+++ b/sound/soc/codecs/wm8961.h
@@ -0,0 +1,866 @@
1/*
2 * wm8961.h -- WM8961 Soc Audio driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _WM8961_H
10#define _WM8961_H
11
12#include <sound/soc.h>
13
14extern struct snd_soc_codec_device soc_codec_dev_wm8961;
15extern struct snd_soc_dai wm8961_dai;
16
17#define WM8961_BCLK 1
18#define WM8961_LRCLK 2
19
20#define WM8961_BCLK_DIV_1 0
21#define WM8961_BCLK_DIV_1_5 1
22#define WM8961_BCLK_DIV_2 2
23#define WM8961_BCLK_DIV_3 3
24#define WM8961_BCLK_DIV_4 4
25#define WM8961_BCLK_DIV_5_5 5
26#define WM8961_BCLK_DIV_6 6
27#define WM8961_BCLK_DIV_8 7
28#define WM8961_BCLK_DIV_11 8
29#define WM8961_BCLK_DIV_12 9
30#define WM8961_BCLK_DIV_16 10
31#define WM8961_BCLK_DIV_24 11
32#define WM8961_BCLK_DIV_32 13
33
34
35/*
36 * Register values.
37 */
38#define WM8961_LEFT_INPUT_VOLUME 0x00
39#define WM8961_RIGHT_INPUT_VOLUME 0x01
40#define WM8961_LOUT1_VOLUME 0x02
41#define WM8961_ROUT1_VOLUME 0x03
42#define WM8961_CLOCKING1 0x04
43#define WM8961_ADC_DAC_CONTROL_1 0x05
44#define WM8961_ADC_DAC_CONTROL_2 0x06
45#define WM8961_AUDIO_INTERFACE_0 0x07
46#define WM8961_CLOCKING2 0x08
47#define WM8961_AUDIO_INTERFACE_1 0x09
48#define WM8961_LEFT_DAC_VOLUME 0x0A
49#define WM8961_RIGHT_DAC_VOLUME 0x0B
50#define WM8961_AUDIO_INTERFACE_2 0x0E
51#define WM8961_SOFTWARE_RESET 0x0F
52#define WM8961_ALC1 0x11
53#define WM8961_ALC2 0x12
54#define WM8961_ALC3 0x13
55#define WM8961_NOISE_GATE 0x14
56#define WM8961_LEFT_ADC_VOLUME 0x15
57#define WM8961_RIGHT_ADC_VOLUME 0x16
58#define WM8961_ADDITIONAL_CONTROL_1 0x17
59#define WM8961_ADDITIONAL_CONTROL_2 0x18
60#define WM8961_PWR_MGMT_1 0x19
61#define WM8961_PWR_MGMT_2 0x1A
62#define WM8961_ADDITIONAL_CONTROL_3 0x1B
63#define WM8961_ANTI_POP 0x1C
64#define WM8961_CLOCKING_3 0x1E
65#define WM8961_ADCL_SIGNAL_PATH 0x20
66#define WM8961_ADCR_SIGNAL_PATH 0x21
67#define WM8961_LOUT2_VOLUME 0x28
68#define WM8961_ROUT2_VOLUME 0x29
69#define WM8961_PWR_MGMT_3 0x2F
70#define WM8961_ADDITIONAL_CONTROL_4 0x30
71#define WM8961_CLASS_D_CONTROL_1 0x31
72#define WM8961_CLASS_D_CONTROL_2 0x33
73#define WM8961_CLOCKING_4 0x38
74#define WM8961_DSP_SIDETONE_0 0x39
75#define WM8961_DSP_SIDETONE_1 0x3A
76#define WM8961_DC_SERVO_0 0x3C
77#define WM8961_DC_SERVO_1 0x3D
78#define WM8961_DC_SERVO_3 0x3F
79#define WM8961_DC_SERVO_5 0x41
80#define WM8961_ANALOGUE_PGA_BIAS 0x44
81#define WM8961_ANALOGUE_HP_0 0x45
82#define WM8961_ANALOGUE_HP_2 0x47
83#define WM8961_CHARGE_PUMP_1 0x48
84#define WM8961_CHARGE_PUMP_B 0x52
85#define WM8961_WRITE_SEQUENCER_1 0x57
86#define WM8961_WRITE_SEQUENCER_2 0x58
87#define WM8961_WRITE_SEQUENCER_3 0x59
88#define WM8961_WRITE_SEQUENCER_4 0x5A
89#define WM8961_WRITE_SEQUENCER_5 0x5B
90#define WM8961_WRITE_SEQUENCER_6 0x5C
91#define WM8961_WRITE_SEQUENCER_7 0x5D
92#define WM8961_GENERAL_TEST_1 0xFC
93
94
95/*
96 * Field Definitions.
97 */
98
99/*
100 * R0 (0x00) - Left Input volume
101 */
102#define WM8961_IPVU 0x0100 /* IPVU */
103#define WM8961_IPVU_MASK 0x0100 /* IPVU */
104#define WM8961_IPVU_SHIFT 8 /* IPVU */
105#define WM8961_IPVU_WIDTH 1 /* IPVU */
106#define WM8961_LINMUTE 0x0080 /* LINMUTE */
107#define WM8961_LINMUTE_MASK 0x0080 /* LINMUTE */
108#define WM8961_LINMUTE_SHIFT 7 /* LINMUTE */
109#define WM8961_LINMUTE_WIDTH 1 /* LINMUTE */
110#define WM8961_LIZC 0x0040 /* LIZC */
111#define WM8961_LIZC_MASK 0x0040 /* LIZC */
112#define WM8961_LIZC_SHIFT 6 /* LIZC */
113#define WM8961_LIZC_WIDTH 1 /* LIZC */
114#define WM8961_LINVOL_MASK 0x003F /* LINVOL - [5:0] */
115#define WM8961_LINVOL_SHIFT 0 /* LINVOL - [5:0] */
116#define WM8961_LINVOL_WIDTH 6 /* LINVOL - [5:0] */
117
118/*
119 * R1 (0x01) - Right Input volume
120 */
121#define WM8961_DEVICE_ID_MASK 0xF000 /* DEVICE_ID - [15:12] */
122#define WM8961_DEVICE_ID_SHIFT 12 /* DEVICE_ID - [15:12] */
123#define WM8961_DEVICE_ID_WIDTH 4 /* DEVICE_ID - [15:12] */
124#define WM8961_CHIP_REV_MASK 0x0E00 /* CHIP_REV - [11:9] */
125#define WM8961_CHIP_REV_SHIFT 9 /* CHIP_REV - [11:9] */
126#define WM8961_CHIP_REV_WIDTH 3 /* CHIP_REV - [11:9] */
127#define WM8961_IPVU 0x0100 /* IPVU */
128#define WM8961_IPVU_MASK 0x0100 /* IPVU */
129#define WM8961_IPVU_SHIFT 8 /* IPVU */
130#define WM8961_IPVU_WIDTH 1 /* IPVU */
131#define WM8961_RINMUTE 0x0080 /* RINMUTE */
132#define WM8961_RINMUTE_MASK 0x0080 /* RINMUTE */
133#define WM8961_RINMUTE_SHIFT 7 /* RINMUTE */
134#define WM8961_RINMUTE_WIDTH 1 /* RINMUTE */
135#define WM8961_RIZC 0x0040 /* RIZC */
136#define WM8961_RIZC_MASK 0x0040 /* RIZC */
137#define WM8961_RIZC_SHIFT 6 /* RIZC */
138#define WM8961_RIZC_WIDTH 1 /* RIZC */
139#define WM8961_RINVOL_MASK 0x003F /* RINVOL - [5:0] */
140#define WM8961_RINVOL_SHIFT 0 /* RINVOL - [5:0] */
141#define WM8961_RINVOL_WIDTH 6 /* RINVOL - [5:0] */
142
143/*
144 * R2 (0x02) - LOUT1 volume
145 */
146#define WM8961_OUT1VU 0x0100 /* OUT1VU */
147#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
148#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
149#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
150#define WM8961_LO1ZC 0x0080 /* LO1ZC */
151#define WM8961_LO1ZC_MASK 0x0080 /* LO1ZC */
152#define WM8961_LO1ZC_SHIFT 7 /* LO1ZC */
153#define WM8961_LO1ZC_WIDTH 1 /* LO1ZC */
154#define WM8961_LOUT1VOL_MASK 0x007F /* LOUT1VOL - [6:0] */
155#define WM8961_LOUT1VOL_SHIFT 0 /* LOUT1VOL - [6:0] */
156#define WM8961_LOUT1VOL_WIDTH 7 /* LOUT1VOL - [6:0] */
157
158/*
159 * R3 (0x03) - ROUT1 volume
160 */
161#define WM8961_OUT1VU 0x0100 /* OUT1VU */
162#define WM8961_OUT1VU_MASK 0x0100 /* OUT1VU */
163#define WM8961_OUT1VU_SHIFT 8 /* OUT1VU */
164#define WM8961_OUT1VU_WIDTH 1 /* OUT1VU */
165#define WM8961_RO1ZC 0x0080 /* RO1ZC */
166#define WM8961_RO1ZC_MASK 0x0080 /* RO1ZC */
167#define WM8961_RO1ZC_SHIFT 7 /* RO1ZC */
168#define WM8961_RO1ZC_WIDTH 1 /* RO1ZC */
169#define WM8961_ROUT1VOL_MASK 0x007F /* ROUT1VOL - [6:0] */
170#define WM8961_ROUT1VOL_SHIFT 0 /* ROUT1VOL - [6:0] */
171#define WM8961_ROUT1VOL_WIDTH 7 /* ROUT1VOL - [6:0] */
172
173/*
174 * R4 (0x04) - Clocking1
175 */
176#define WM8961_ADCDIV_MASK 0x01C0 /* ADCDIV - [8:6] */
177#define WM8961_ADCDIV_SHIFT 6 /* ADCDIV - [8:6] */
178#define WM8961_ADCDIV_WIDTH 3 /* ADCDIV - [8:6] */
179#define WM8961_DACDIV_MASK 0x0038 /* DACDIV - [5:3] */
180#define WM8961_DACDIV_SHIFT 3 /* DACDIV - [5:3] */
181#define WM8961_DACDIV_WIDTH 3 /* DACDIV - [5:3] */
182#define WM8961_MCLKDIV 0x0004 /* MCLKDIV */
183#define WM8961_MCLKDIV_MASK 0x0004 /* MCLKDIV */
184#define WM8961_MCLKDIV_SHIFT 2 /* MCLKDIV */
185#define WM8961_MCLKDIV_WIDTH 1 /* MCLKDIV */
186
187/*
188 * R5 (0x05) - ADC & DAC Control 1
189 */
190#define WM8961_ADCPOL_MASK 0x0060 /* ADCPOL - [6:5] */
191#define WM8961_ADCPOL_SHIFT 5 /* ADCPOL - [6:5] */
192#define WM8961_ADCPOL_WIDTH 2 /* ADCPOL - [6:5] */
193#define WM8961_DACMU 0x0008 /* DACMU */
194#define WM8961_DACMU_MASK 0x0008 /* DACMU */
195#define WM8961_DACMU_SHIFT 3 /* DACMU */
196#define WM8961_DACMU_WIDTH 1 /* DACMU */
197#define WM8961_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
198#define WM8961_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
199#define WM8961_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
200#define WM8961_ADCHPD 0x0001 /* ADCHPD */
201#define WM8961_ADCHPD_MASK 0x0001 /* ADCHPD */
202#define WM8961_ADCHPD_SHIFT 0 /* ADCHPD */
203#define WM8961_ADCHPD_WIDTH 1 /* ADCHPD */
204
205/*
206 * R6 (0x06) - ADC & DAC Control 2
207 */
208#define WM8961_ADC_HPF_CUT_MASK 0x0180 /* ADC_HPF_CUT - [8:7] */
209#define WM8961_ADC_HPF_CUT_SHIFT 7 /* ADC_HPF_CUT - [8:7] */
210#define WM8961_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [8:7] */
211#define WM8961_DACPOL_MASK 0x0060 /* DACPOL - [6:5] */
212#define WM8961_DACPOL_SHIFT 5 /* DACPOL - [6:5] */
213#define WM8961_DACPOL_WIDTH 2 /* DACPOL - [6:5] */
214#define WM8961_DACSMM 0x0008 /* DACSMM */
215#define WM8961_DACSMM_MASK 0x0008 /* DACSMM */
216#define WM8961_DACSMM_SHIFT 3 /* DACSMM */
217#define WM8961_DACSMM_WIDTH 1 /* DACSMM */
218#define WM8961_DACMR 0x0004 /* DACMR */
219#define WM8961_DACMR_MASK 0x0004 /* DACMR */
220#define WM8961_DACMR_SHIFT 2 /* DACMR */
221#define WM8961_DACMR_WIDTH 1 /* DACMR */
222#define WM8961_DACSLOPE 0x0002 /* DACSLOPE */
223#define WM8961_DACSLOPE_MASK 0x0002 /* DACSLOPE */
224#define WM8961_DACSLOPE_SHIFT 1 /* DACSLOPE */
225#define WM8961_DACSLOPE_WIDTH 1 /* DACSLOPE */
226#define WM8961_DAC_OSR128 0x0001 /* DAC_OSR128 */
227#define WM8961_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */
228#define WM8961_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */
229#define WM8961_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
230
231/*
232 * R7 (0x07) - Audio Interface 0
233 */
234#define WM8961_ALRSWAP 0x0100 /* ALRSWAP */
235#define WM8961_ALRSWAP_MASK 0x0100 /* ALRSWAP */
236#define WM8961_ALRSWAP_SHIFT 8 /* ALRSWAP */
237#define WM8961_ALRSWAP_WIDTH 1 /* ALRSWAP */
238#define WM8961_BCLKINV 0x0080 /* BCLKINV */
239#define WM8961_BCLKINV_MASK 0x0080 /* BCLKINV */
240#define WM8961_BCLKINV_SHIFT 7 /* BCLKINV */
241#define WM8961_BCLKINV_WIDTH 1 /* BCLKINV */
242#define WM8961_MS 0x0040 /* MS */
243#define WM8961_MS_MASK 0x0040 /* MS */
244#define WM8961_MS_SHIFT 6 /* MS */
245#define WM8961_MS_WIDTH 1 /* MS */
246#define WM8961_DLRSWAP 0x0020 /* DLRSWAP */
247#define WM8961_DLRSWAP_MASK 0x0020 /* DLRSWAP */
248#define WM8961_DLRSWAP_SHIFT 5 /* DLRSWAP */
249#define WM8961_DLRSWAP_WIDTH 1 /* DLRSWAP */
250#define WM8961_LRP 0x0010 /* LRP */
251#define WM8961_LRP_MASK 0x0010 /* LRP */
252#define WM8961_LRP_SHIFT 4 /* LRP */
253#define WM8961_LRP_WIDTH 1 /* LRP */
254#define WM8961_WL_MASK 0x000C /* WL - [3:2] */
255#define WM8961_WL_SHIFT 2 /* WL - [3:2] */
256#define WM8961_WL_WIDTH 2 /* WL - [3:2] */
257#define WM8961_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */
258#define WM8961_FORMAT_SHIFT 0 /* FORMAT - [1:0] */
259#define WM8961_FORMAT_WIDTH 2 /* FORMAT - [1:0] */
260
261/*
262 * R8 (0x08) - Clocking2
263 */
264#define WM8961_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */
265#define WM8961_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */
266#define WM8961_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */
267#define WM8961_CLK_SYS_ENA 0x0020 /* CLK_SYS_ENA */
268#define WM8961_CLK_SYS_ENA_MASK 0x0020 /* CLK_SYS_ENA */
269#define WM8961_CLK_SYS_ENA_SHIFT 5 /* CLK_SYS_ENA */
270#define WM8961_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
271#define WM8961_CLK_DSP_ENA 0x0010 /* CLK_DSP_ENA */
272#define WM8961_CLK_DSP_ENA_MASK 0x0010 /* CLK_DSP_ENA */
273#define WM8961_CLK_DSP_ENA_SHIFT 4 /* CLK_DSP_ENA */
274#define WM8961_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
275#define WM8961_BCLKDIV_MASK 0x000F /* BCLKDIV - [3:0] */
276#define WM8961_BCLKDIV_SHIFT 0 /* BCLKDIV - [3:0] */
277#define WM8961_BCLKDIV_WIDTH 4 /* BCLKDIV - [3:0] */
278
279/*
280 * R9 (0x09) - Audio Interface 1
281 */
282#define WM8961_DACCOMP_MASK 0x0018 /* DACCOMP - [4:3] */
283#define WM8961_DACCOMP_SHIFT 3 /* DACCOMP - [4:3] */
284#define WM8961_DACCOMP_WIDTH 2 /* DACCOMP - [4:3] */
285#define WM8961_ADCCOMP_MASK 0x0006 /* ADCCOMP - [2:1] */
286#define WM8961_ADCCOMP_SHIFT 1 /* ADCCOMP - [2:1] */
287#define WM8961_ADCCOMP_WIDTH 2 /* ADCCOMP - [2:1] */
288#define WM8961_LOOPBACK 0x0001 /* LOOPBACK */
289#define WM8961_LOOPBACK_MASK 0x0001 /* LOOPBACK */
290#define WM8961_LOOPBACK_SHIFT 0 /* LOOPBACK */
291#define WM8961_LOOPBACK_WIDTH 1 /* LOOPBACK */
292
293/*
294 * R10 (0x0A) - Left DAC volume
295 */
296#define WM8961_DACVU 0x0100 /* DACVU */
297#define WM8961_DACVU_MASK 0x0100 /* DACVU */
298#define WM8961_DACVU_SHIFT 8 /* DACVU */
299#define WM8961_DACVU_WIDTH 1 /* DACVU */
300#define WM8961_LDACVOL_MASK 0x00FF /* LDACVOL - [7:0] */
301#define WM8961_LDACVOL_SHIFT 0 /* LDACVOL - [7:0] */
302#define WM8961_LDACVOL_WIDTH 8 /* LDACVOL - [7:0] */
303
304/*
305 * R11 (0x0B) - Right DAC volume
306 */
307#define WM8961_DACVU 0x0100 /* DACVU */
308#define WM8961_DACVU_MASK 0x0100 /* DACVU */
309#define WM8961_DACVU_SHIFT 8 /* DACVU */
310#define WM8961_DACVU_WIDTH 1 /* DACVU */
311#define WM8961_RDACVOL_MASK 0x00FF /* RDACVOL - [7:0] */
312#define WM8961_RDACVOL_SHIFT 0 /* RDACVOL - [7:0] */
313#define WM8961_RDACVOL_WIDTH 8 /* RDACVOL - [7:0] */
314
315/*
316 * R14 (0x0E) - Audio Interface 2
317 */
318#define WM8961_LRCLK_RATE_MASK 0x01FF /* LRCLK_RATE - [8:0] */
319#define WM8961_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [8:0] */
320#define WM8961_LRCLK_RATE_WIDTH 9 /* LRCLK_RATE - [8:0] */
321
322/*
323 * R15 (0x0F) - Software Reset
324 */
325#define WM8961_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
326#define WM8961_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
327#define WM8961_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
328
329/*
330 * R17 (0x11) - ALC1
331 */
332#define WM8961_ALCSEL_MASK 0x0180 /* ALCSEL - [8:7] */
333#define WM8961_ALCSEL_SHIFT 7 /* ALCSEL - [8:7] */
334#define WM8961_ALCSEL_WIDTH 2 /* ALCSEL - [8:7] */
335#define WM8961_MAXGAIN_MASK 0x0070 /* MAXGAIN - [6:4] */
336#define WM8961_MAXGAIN_SHIFT 4 /* MAXGAIN - [6:4] */
337#define WM8961_MAXGAIN_WIDTH 3 /* MAXGAIN - [6:4] */
338#define WM8961_ALCL_MASK 0x000F /* ALCL - [3:0] */
339#define WM8961_ALCL_SHIFT 0 /* ALCL - [3:0] */
340#define WM8961_ALCL_WIDTH 4 /* ALCL - [3:0] */
341
342/*
343 * R18 (0x12) - ALC2
344 */
345#define WM8961_ALCZC 0x0080 /* ALCZC */
346#define WM8961_ALCZC_MASK 0x0080 /* ALCZC */
347#define WM8961_ALCZC_SHIFT 7 /* ALCZC */
348#define WM8961_ALCZC_WIDTH 1 /* ALCZC */
349#define WM8961_MINGAIN_MASK 0x0070 /* MINGAIN - [6:4] */
350#define WM8961_MINGAIN_SHIFT 4 /* MINGAIN - [6:4] */
351#define WM8961_MINGAIN_WIDTH 3 /* MINGAIN - [6:4] */
352#define WM8961_HLD_MASK 0x000F /* HLD - [3:0] */
353#define WM8961_HLD_SHIFT 0 /* HLD - [3:0] */
354#define WM8961_HLD_WIDTH 4 /* HLD - [3:0] */
355
356/*
357 * R19 (0x13) - ALC3
358 */
359#define WM8961_ALCMODE 0x0100 /* ALCMODE */
360#define WM8961_ALCMODE_MASK 0x0100 /* ALCMODE */
361#define WM8961_ALCMODE_SHIFT 8 /* ALCMODE */
362#define WM8961_ALCMODE_WIDTH 1 /* ALCMODE */
363#define WM8961_DCY_MASK 0x00F0 /* DCY - [7:4] */
364#define WM8961_DCY_SHIFT 4 /* DCY - [7:4] */
365#define WM8961_DCY_WIDTH 4 /* DCY - [7:4] */
366#define WM8961_ATK_MASK 0x000F /* ATK - [3:0] */
367#define WM8961_ATK_SHIFT 0 /* ATK - [3:0] */
368#define WM8961_ATK_WIDTH 4 /* ATK - [3:0] */
369
370/*
371 * R20 (0x14) - Noise Gate
372 */
373#define WM8961_NGTH_MASK 0x00F8 /* NGTH - [7:3] */
374#define WM8961_NGTH_SHIFT 3 /* NGTH - [7:3] */
375#define WM8961_NGTH_WIDTH 5 /* NGTH - [7:3] */
376#define WM8961_NGG 0x0002 /* NGG */
377#define WM8961_NGG_MASK 0x0002 /* NGG */
378#define WM8961_NGG_SHIFT 1 /* NGG */
379#define WM8961_NGG_WIDTH 1 /* NGG */
380#define WM8961_NGAT 0x0001 /* NGAT */
381#define WM8961_NGAT_MASK 0x0001 /* NGAT */
382#define WM8961_NGAT_SHIFT 0 /* NGAT */
383#define WM8961_NGAT_WIDTH 1 /* NGAT */
384
385/*
386 * R21 (0x15) - Left ADC volume
387 */
388#define WM8961_ADCVU 0x0100 /* ADCVU */
389#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
390#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
391#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
392#define WM8961_LADCVOL_MASK 0x00FF /* LADCVOL - [7:0] */
393#define WM8961_LADCVOL_SHIFT 0 /* LADCVOL - [7:0] */
394#define WM8961_LADCVOL_WIDTH 8 /* LADCVOL - [7:0] */
395
396/*
397 * R22 (0x16) - Right ADC volume
398 */
399#define WM8961_ADCVU 0x0100 /* ADCVU */
400#define WM8961_ADCVU_MASK 0x0100 /* ADCVU */
401#define WM8961_ADCVU_SHIFT 8 /* ADCVU */
402#define WM8961_ADCVU_WIDTH 1 /* ADCVU */
403#define WM8961_RADCVOL_MASK 0x00FF /* RADCVOL - [7:0] */
404#define WM8961_RADCVOL_SHIFT 0 /* RADCVOL - [7:0] */
405#define WM8961_RADCVOL_WIDTH 8 /* RADCVOL - [7:0] */
406
407/*
408 * R23 (0x17) - Additional control(1)
409 */
410#define WM8961_TSDEN 0x0100 /* TSDEN */
411#define WM8961_TSDEN_MASK 0x0100 /* TSDEN */
412#define WM8961_TSDEN_SHIFT 8 /* TSDEN */
413#define WM8961_TSDEN_WIDTH 1 /* TSDEN */
414#define WM8961_DMONOMIX 0x0010 /* DMONOMIX */
415#define WM8961_DMONOMIX_MASK 0x0010 /* DMONOMIX */
416#define WM8961_DMONOMIX_SHIFT 4 /* DMONOMIX */
417#define WM8961_DMONOMIX_WIDTH 1 /* DMONOMIX */
418#define WM8961_TOEN 0x0001 /* TOEN */
419#define WM8961_TOEN_MASK 0x0001 /* TOEN */
420#define WM8961_TOEN_SHIFT 0 /* TOEN */
421#define WM8961_TOEN_WIDTH 1 /* TOEN */
422
423/*
424 * R24 (0x18) - Additional control(2)
425 */
426#define WM8961_TRIS 0x0008 /* TRIS */
427#define WM8961_TRIS_MASK 0x0008 /* TRIS */
428#define WM8961_TRIS_SHIFT 3 /* TRIS */
429#define WM8961_TRIS_WIDTH 1 /* TRIS */
430
431/*
432 * R25 (0x19) - Pwr Mgmt (1)
433 */
434#define WM8961_VMIDSEL_MASK 0x0180 /* VMIDSEL - [8:7] */
435#define WM8961_VMIDSEL_SHIFT 7 /* VMIDSEL - [8:7] */
436#define WM8961_VMIDSEL_WIDTH 2 /* VMIDSEL - [8:7] */
437#define WM8961_VREF 0x0040 /* VREF */
438#define WM8961_VREF_MASK 0x0040 /* VREF */
439#define WM8961_VREF_SHIFT 6 /* VREF */
440#define WM8961_VREF_WIDTH 1 /* VREF */
441#define WM8961_AINL 0x0020 /* AINL */
442#define WM8961_AINL_MASK 0x0020 /* AINL */
443#define WM8961_AINL_SHIFT 5 /* AINL */
444#define WM8961_AINL_WIDTH 1 /* AINL */
445#define WM8961_AINR 0x0010 /* AINR */
446#define WM8961_AINR_MASK 0x0010 /* AINR */
447#define WM8961_AINR_SHIFT 4 /* AINR */
448#define WM8961_AINR_WIDTH 1 /* AINR */
449#define WM8961_ADCL 0x0008 /* ADCL */
450#define WM8961_ADCL_MASK 0x0008 /* ADCL */
451#define WM8961_ADCL_SHIFT 3 /* ADCL */
452#define WM8961_ADCL_WIDTH 1 /* ADCL */
453#define WM8961_ADCR 0x0004 /* ADCR */
454#define WM8961_ADCR_MASK 0x0004 /* ADCR */
455#define WM8961_ADCR_SHIFT 2 /* ADCR */
456#define WM8961_ADCR_WIDTH 1 /* ADCR */
457#define WM8961_MICB 0x0002 /* MICB */
458#define WM8961_MICB_MASK 0x0002 /* MICB */
459#define WM8961_MICB_SHIFT 1 /* MICB */
460#define WM8961_MICB_WIDTH 1 /* MICB */
461
462/*
463 * R26 (0x1A) - Pwr Mgmt (2)
464 */
465#define WM8961_DACL 0x0100 /* DACL */
466#define WM8961_DACL_MASK 0x0100 /* DACL */
467#define WM8961_DACL_SHIFT 8 /* DACL */
468#define WM8961_DACL_WIDTH 1 /* DACL */
469#define WM8961_DACR 0x0080 /* DACR */
470#define WM8961_DACR_MASK 0x0080 /* DACR */
471#define WM8961_DACR_SHIFT 7 /* DACR */
472#define WM8961_DACR_WIDTH 1 /* DACR */
473#define WM8961_LOUT1_PGA 0x0040 /* LOUT1_PGA */
474#define WM8961_LOUT1_PGA_MASK 0x0040 /* LOUT1_PGA */
475#define WM8961_LOUT1_PGA_SHIFT 6 /* LOUT1_PGA */
476#define WM8961_LOUT1_PGA_WIDTH 1 /* LOUT1_PGA */
477#define WM8961_ROUT1_PGA 0x0020 /* ROUT1_PGA */
478#define WM8961_ROUT1_PGA_MASK 0x0020 /* ROUT1_PGA */
479#define WM8961_ROUT1_PGA_SHIFT 5 /* ROUT1_PGA */
480#define WM8961_ROUT1_PGA_WIDTH 1 /* ROUT1_PGA */
481#define WM8961_SPKL_PGA 0x0010 /* SPKL_PGA */
482#define WM8961_SPKL_PGA_MASK 0x0010 /* SPKL_PGA */
483#define WM8961_SPKL_PGA_SHIFT 4 /* SPKL_PGA */
484#define WM8961_SPKL_PGA_WIDTH 1 /* SPKL_PGA */
485#define WM8961_SPKR_PGA 0x0008 /* SPKR_PGA */
486#define WM8961_SPKR_PGA_MASK 0x0008 /* SPKR_PGA */
487#define WM8961_SPKR_PGA_SHIFT 3 /* SPKR_PGA */
488#define WM8961_SPKR_PGA_WIDTH 1 /* SPKR_PGA */
489
490/*
491 * R27 (0x1B) - Additional Control (3)
492 */
493#define WM8961_SAMPLE_RATE_MASK 0x0007 /* SAMPLE_RATE - [2:0] */
494#define WM8961_SAMPLE_RATE_SHIFT 0 /* SAMPLE_RATE - [2:0] */
495#define WM8961_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [2:0] */
496
497/*
498 * R28 (0x1C) - Anti-pop
499 */
500#define WM8961_BUFDCOPEN 0x0010 /* BUFDCOPEN */
501#define WM8961_BUFDCOPEN_MASK 0x0010 /* BUFDCOPEN */
502#define WM8961_BUFDCOPEN_SHIFT 4 /* BUFDCOPEN */
503#define WM8961_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */
504#define WM8961_BUFIOEN 0x0008 /* BUFIOEN */
505#define WM8961_BUFIOEN_MASK 0x0008 /* BUFIOEN */
506#define WM8961_BUFIOEN_SHIFT 3 /* BUFIOEN */
507#define WM8961_BUFIOEN_WIDTH 1 /* BUFIOEN */
508#define WM8961_SOFT_ST 0x0004 /* SOFT_ST */
509#define WM8961_SOFT_ST_MASK 0x0004 /* SOFT_ST */
510#define WM8961_SOFT_ST_SHIFT 2 /* SOFT_ST */
511#define WM8961_SOFT_ST_WIDTH 1 /* SOFT_ST */
512
513/*
514 * R30 (0x1E) - Clocking 3
515 */
516#define WM8961_CLK_TO_DIV_MASK 0x0180 /* CLK_TO_DIV - [8:7] */
517#define WM8961_CLK_TO_DIV_SHIFT 7 /* CLK_TO_DIV - [8:7] */
518#define WM8961_CLK_TO_DIV_WIDTH 2 /* CLK_TO_DIV - [8:7] */
519#define WM8961_CLK_256K_DIV_MASK 0x007E /* CLK_256K_DIV - [6:1] */
520#define WM8961_CLK_256K_DIV_SHIFT 1 /* CLK_256K_DIV - [6:1] */
521#define WM8961_CLK_256K_DIV_WIDTH 6 /* CLK_256K_DIV - [6:1] */
522#define WM8961_MANUAL_MODE 0x0001 /* MANUAL_MODE */
523#define WM8961_MANUAL_MODE_MASK 0x0001 /* MANUAL_MODE */
524#define WM8961_MANUAL_MODE_SHIFT 0 /* MANUAL_MODE */
525#define WM8961_MANUAL_MODE_WIDTH 1 /* MANUAL_MODE */
526
527/*
528 * R32 (0x20) - ADCL signal path
529 */
530#define WM8961_LMICBOOST_MASK 0x0030 /* LMICBOOST - [5:4] */
531#define WM8961_LMICBOOST_SHIFT 4 /* LMICBOOST - [5:4] */
532#define WM8961_LMICBOOST_WIDTH 2 /* LMICBOOST - [5:4] */
533
534/*
535 * R33 (0x21) - ADCR signal path
536 */
537#define WM8961_RMICBOOST_MASK 0x0030 /* RMICBOOST - [5:4] */
538#define WM8961_RMICBOOST_SHIFT 4 /* RMICBOOST - [5:4] */
539#define WM8961_RMICBOOST_WIDTH 2 /* RMICBOOST - [5:4] */
540
541/*
542 * R40 (0x28) - LOUT2 volume
543 */
544#define WM8961_SPKVU 0x0100 /* SPKVU */
545#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
546#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
547#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
548#define WM8961_SPKLZC 0x0080 /* SPKLZC */
549#define WM8961_SPKLZC_MASK 0x0080 /* SPKLZC */
550#define WM8961_SPKLZC_SHIFT 7 /* SPKLZC */
551#define WM8961_SPKLZC_WIDTH 1 /* SPKLZC */
552#define WM8961_SPKLVOL_MASK 0x007F /* SPKLVOL - [6:0] */
553#define WM8961_SPKLVOL_SHIFT 0 /* SPKLVOL - [6:0] */
554#define WM8961_SPKLVOL_WIDTH 7 /* SPKLVOL - [6:0] */
555
556/*
557 * R41 (0x29) - ROUT2 volume
558 */
559#define WM8961_SPKVU 0x0100 /* SPKVU */
560#define WM8961_SPKVU_MASK 0x0100 /* SPKVU */
561#define WM8961_SPKVU_SHIFT 8 /* SPKVU */
562#define WM8961_SPKVU_WIDTH 1 /* SPKVU */
563#define WM8961_SPKRZC 0x0080 /* SPKRZC */
564#define WM8961_SPKRZC_MASK 0x0080 /* SPKRZC */
565#define WM8961_SPKRZC_SHIFT 7 /* SPKRZC */
566#define WM8961_SPKRZC_WIDTH 1 /* SPKRZC */
567#define WM8961_SPKRVOL_MASK 0x007F /* SPKRVOL - [6:0] */
568#define WM8961_SPKRVOL_SHIFT 0 /* SPKRVOL - [6:0] */
569#define WM8961_SPKRVOL_WIDTH 7 /* SPKRVOL - [6:0] */
570
571/*
572 * R47 (0x2F) - Pwr Mgmt (3)
573 */
574#define WM8961_TEMP_SHUT 0x0002 /* TEMP_SHUT */
575#define WM8961_TEMP_SHUT_MASK 0x0002 /* TEMP_SHUT */
576#define WM8961_TEMP_SHUT_SHIFT 1 /* TEMP_SHUT */
577#define WM8961_TEMP_SHUT_WIDTH 1 /* TEMP_SHUT */
578#define WM8961_TEMP_WARN 0x0001 /* TEMP_WARN */
579#define WM8961_TEMP_WARN_MASK 0x0001 /* TEMP_WARN */
580#define WM8961_TEMP_WARN_SHIFT 0 /* TEMP_WARN */
581#define WM8961_TEMP_WARN_WIDTH 1 /* TEMP_WARN */
582
583/*
584 * R48 (0x30) - Additional Control (4)
585 */
586#define WM8961_TSENSEN 0x0002 /* TSENSEN */
587#define WM8961_TSENSEN_MASK 0x0002 /* TSENSEN */
588#define WM8961_TSENSEN_SHIFT 1 /* TSENSEN */
589#define WM8961_TSENSEN_WIDTH 1 /* TSENSEN */
590#define WM8961_MBSEL 0x0001 /* MBSEL */
591#define WM8961_MBSEL_MASK 0x0001 /* MBSEL */
592#define WM8961_MBSEL_SHIFT 0 /* MBSEL */
593#define WM8961_MBSEL_WIDTH 1 /* MBSEL */
594
595/*
596 * R49 (0x31) - Class D Control 1
597 */
598#define WM8961_SPKR_ENA 0x0080 /* SPKR_ENA */
599#define WM8961_SPKR_ENA_MASK 0x0080 /* SPKR_ENA */
600#define WM8961_SPKR_ENA_SHIFT 7 /* SPKR_ENA */
601#define WM8961_SPKR_ENA_WIDTH 1 /* SPKR_ENA */
602#define WM8961_SPKL_ENA 0x0040 /* SPKL_ENA */
603#define WM8961_SPKL_ENA_MASK 0x0040 /* SPKL_ENA */
604#define WM8961_SPKL_ENA_SHIFT 6 /* SPKL_ENA */
605#define WM8961_SPKL_ENA_WIDTH 1 /* SPKL_ENA */
606
607/*
608 * R51 (0x33) - Class D Control 2
609 */
610#define WM8961_CLASSD_ACGAIN_MASK 0x0007 /* CLASSD_ACGAIN - [2:0] */
611#define WM8961_CLASSD_ACGAIN_SHIFT 0 /* CLASSD_ACGAIN - [2:0] */
612#define WM8961_CLASSD_ACGAIN_WIDTH 3 /* CLASSD_ACGAIN - [2:0] */
613
614/*
615 * R56 (0x38) - Clocking 4
616 */
617#define WM8961_CLK_DCS_DIV_MASK 0x01E0 /* CLK_DCS_DIV - [8:5] */
618#define WM8961_CLK_DCS_DIV_SHIFT 5 /* CLK_DCS_DIV - [8:5] */
619#define WM8961_CLK_DCS_DIV_WIDTH 4 /* CLK_DCS_DIV - [8:5] */
620#define WM8961_CLK_SYS_RATE_MASK 0x001E /* CLK_SYS_RATE - [4:1] */
621#define WM8961_CLK_SYS_RATE_SHIFT 1 /* CLK_SYS_RATE - [4:1] */
622#define WM8961_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [4:1] */
623
624/*
625 * R57 (0x39) - DSP Sidetone 0
626 */
627#define WM8961_ADCR_DAC_SVOL_MASK 0x00F0 /* ADCR_DAC_SVOL - [7:4] */
628#define WM8961_ADCR_DAC_SVOL_SHIFT 4 /* ADCR_DAC_SVOL - [7:4] */
629#define WM8961_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [7:4] */
630#define WM8961_ADC_TO_DACR_MASK 0x000C /* ADC_TO_DACR - [3:2] */
631#define WM8961_ADC_TO_DACR_SHIFT 2 /* ADC_TO_DACR - [3:2] */
632#define WM8961_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [3:2] */
633
634/*
635 * R58 (0x3A) - DSP Sidetone 1
636 */
637#define WM8961_ADCL_DAC_SVOL_MASK 0x00F0 /* ADCL_DAC_SVOL - [7:4] */
638#define WM8961_ADCL_DAC_SVOL_SHIFT 4 /* ADCL_DAC_SVOL - [7:4] */
639#define WM8961_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [7:4] */
640#define WM8961_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
641#define WM8961_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
642#define WM8961_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
643
644/*
645 * R60 (0x3C) - DC Servo 0
646 */
647#define WM8961_DCS_ENA_CHAN_INL 0x0080 /* DCS_ENA_CHAN_INL */
648#define WM8961_DCS_ENA_CHAN_INL_MASK 0x0080 /* DCS_ENA_CHAN_INL */
649#define WM8961_DCS_ENA_CHAN_INL_SHIFT 7 /* DCS_ENA_CHAN_INL */
650#define WM8961_DCS_ENA_CHAN_INL_WIDTH 1 /* DCS_ENA_CHAN_INL */
651#define WM8961_DCS_TRIG_STARTUP_INL 0x0040 /* DCS_TRIG_STARTUP_INL */
652#define WM8961_DCS_TRIG_STARTUP_INL_MASK 0x0040 /* DCS_TRIG_STARTUP_INL */
653#define WM8961_DCS_TRIG_STARTUP_INL_SHIFT 6 /* DCS_TRIG_STARTUP_INL */
654#define WM8961_DCS_TRIG_STARTUP_INL_WIDTH 1 /* DCS_TRIG_STARTUP_INL */
655#define WM8961_DCS_TRIG_SERIES_INL 0x0010 /* DCS_TRIG_SERIES_INL */
656#define WM8961_DCS_TRIG_SERIES_INL_MASK 0x0010 /* DCS_TRIG_SERIES_INL */
657#define WM8961_DCS_TRIG_SERIES_INL_SHIFT 4 /* DCS_TRIG_SERIES_INL */
658#define WM8961_DCS_TRIG_SERIES_INL_WIDTH 1 /* DCS_TRIG_SERIES_INL */
659#define WM8961_DCS_ENA_CHAN_INR 0x0008 /* DCS_ENA_CHAN_INR */
660#define WM8961_DCS_ENA_CHAN_INR_MASK 0x0008 /* DCS_ENA_CHAN_INR */
661#define WM8961_DCS_ENA_CHAN_INR_SHIFT 3 /* DCS_ENA_CHAN_INR */
662#define WM8961_DCS_ENA_CHAN_INR_WIDTH 1 /* DCS_ENA_CHAN_INR */
663#define WM8961_DCS_TRIG_STARTUP_INR 0x0004 /* DCS_TRIG_STARTUP_INR */
664#define WM8961_DCS_TRIG_STARTUP_INR_MASK 0x0004 /* DCS_TRIG_STARTUP_INR */
665#define WM8961_DCS_TRIG_STARTUP_INR_SHIFT 2 /* DCS_TRIG_STARTUP_INR */
666#define WM8961_DCS_TRIG_STARTUP_INR_WIDTH 1 /* DCS_TRIG_STARTUP_INR */
667#define WM8961_DCS_TRIG_SERIES_INR 0x0001 /* DCS_TRIG_SERIES_INR */
668#define WM8961_DCS_TRIG_SERIES_INR_MASK 0x0001 /* DCS_TRIG_SERIES_INR */
669#define WM8961_DCS_TRIG_SERIES_INR_SHIFT 0 /* DCS_TRIG_SERIES_INR */
670#define WM8961_DCS_TRIG_SERIES_INR_WIDTH 1 /* DCS_TRIG_SERIES_INR */
671
672/*
673 * R61 (0x3D) - DC Servo 1
674 */
675#define WM8961_DCS_ENA_CHAN_HPL 0x0080 /* DCS_ENA_CHAN_HPL */
676#define WM8961_DCS_ENA_CHAN_HPL_MASK 0x0080 /* DCS_ENA_CHAN_HPL */
677#define WM8961_DCS_ENA_CHAN_HPL_SHIFT 7 /* DCS_ENA_CHAN_HPL */
678#define WM8961_DCS_ENA_CHAN_HPL_WIDTH 1 /* DCS_ENA_CHAN_HPL */
679#define WM8961_DCS_TRIG_STARTUP_HPL 0x0040 /* DCS_TRIG_STARTUP_HPL */
680#define WM8961_DCS_TRIG_STARTUP_HPL_MASK 0x0040 /* DCS_TRIG_STARTUP_HPL */
681#define WM8961_DCS_TRIG_STARTUP_HPL_SHIFT 6 /* DCS_TRIG_STARTUP_HPL */
682#define WM8961_DCS_TRIG_STARTUP_HPL_WIDTH 1 /* DCS_TRIG_STARTUP_HPL */
683#define WM8961_DCS_TRIG_SERIES_HPL 0x0010 /* DCS_TRIG_SERIES_HPL */
684#define WM8961_DCS_TRIG_SERIES_HPL_MASK 0x0010 /* DCS_TRIG_SERIES_HPL */
685#define WM8961_DCS_TRIG_SERIES_HPL_SHIFT 4 /* DCS_TRIG_SERIES_HPL */
686#define WM8961_DCS_TRIG_SERIES_HPL_WIDTH 1 /* DCS_TRIG_SERIES_HPL */
687#define WM8961_DCS_ENA_CHAN_HPR 0x0008 /* DCS_ENA_CHAN_HPR */
688#define WM8961_DCS_ENA_CHAN_HPR_MASK 0x0008 /* DCS_ENA_CHAN_HPR */
689#define WM8961_DCS_ENA_CHAN_HPR_SHIFT 3 /* DCS_ENA_CHAN_HPR */
690#define WM8961_DCS_ENA_CHAN_HPR_WIDTH 1 /* DCS_ENA_CHAN_HPR */
691#define WM8961_DCS_TRIG_STARTUP_HPR 0x0004 /* DCS_TRIG_STARTUP_HPR */
692#define WM8961_DCS_TRIG_STARTUP_HPR_MASK 0x0004 /* DCS_TRIG_STARTUP_HPR */
693#define WM8961_DCS_TRIG_STARTUP_HPR_SHIFT 2 /* DCS_TRIG_STARTUP_HPR */
694#define WM8961_DCS_TRIG_STARTUP_HPR_WIDTH 1 /* DCS_TRIG_STARTUP_HPR */
695#define WM8961_DCS_TRIG_SERIES_HPR 0x0001 /* DCS_TRIG_SERIES_HPR */
696#define WM8961_DCS_TRIG_SERIES_HPR_MASK 0x0001 /* DCS_TRIG_SERIES_HPR */
697#define WM8961_DCS_TRIG_SERIES_HPR_SHIFT 0 /* DCS_TRIG_SERIES_HPR */
698#define WM8961_DCS_TRIG_SERIES_HPR_WIDTH 1 /* DCS_TRIG_SERIES_HPR */
699
700/*
701 * R63 (0x3F) - DC Servo 3
702 */
703#define WM8961_DCS_FILT_BW_SERIES_MASK 0x0030 /* DCS_FILT_BW_SERIES - [5:4] */
704#define WM8961_DCS_FILT_BW_SERIES_SHIFT 4 /* DCS_FILT_BW_SERIES - [5:4] */
705#define WM8961_DCS_FILT_BW_SERIES_WIDTH 2 /* DCS_FILT_BW_SERIES - [5:4] */
706
707/*
708 * R65 (0x41) - DC Servo 5
709 */
710#define WM8961_DCS_SERIES_NO_HP_MASK 0x007F /* DCS_SERIES_NO_HP - [6:0] */
711#define WM8961_DCS_SERIES_NO_HP_SHIFT 0 /* DCS_SERIES_NO_HP - [6:0] */
712#define WM8961_DCS_SERIES_NO_HP_WIDTH 7 /* DCS_SERIES_NO_HP - [6:0] */
713
714/*
715 * R68 (0x44) - Analogue PGA Bias
716 */
717#define WM8961_HP_PGAS_BIAS_MASK 0x0007 /* HP_PGAS_BIAS - [2:0] */
718#define WM8961_HP_PGAS_BIAS_SHIFT 0 /* HP_PGAS_BIAS - [2:0] */
719#define WM8961_HP_PGAS_BIAS_WIDTH 3 /* HP_PGAS_BIAS - [2:0] */
720
721/*
722 * R69 (0x45) - Analogue HP 0
723 */
724#define WM8961_HPL_RMV_SHORT 0x0080 /* HPL_RMV_SHORT */
725#define WM8961_HPL_RMV_SHORT_MASK 0x0080 /* HPL_RMV_SHORT */
726#define WM8961_HPL_RMV_SHORT_SHIFT 7 /* HPL_RMV_SHORT */
727#define WM8961_HPL_RMV_SHORT_WIDTH 1 /* HPL_RMV_SHORT */
728#define WM8961_HPL_ENA_OUTP 0x0040 /* HPL_ENA_OUTP */
729#define WM8961_HPL_ENA_OUTP_MASK 0x0040 /* HPL_ENA_OUTP */
730#define WM8961_HPL_ENA_OUTP_SHIFT 6 /* HPL_ENA_OUTP */
731#define WM8961_HPL_ENA_OUTP_WIDTH 1 /* HPL_ENA_OUTP */
732#define WM8961_HPL_ENA_DLY 0x0020 /* HPL_ENA_DLY */
733#define WM8961_HPL_ENA_DLY_MASK 0x0020 /* HPL_ENA_DLY */
734#define WM8961_HPL_ENA_DLY_SHIFT 5 /* HPL_ENA_DLY */
735#define WM8961_HPL_ENA_DLY_WIDTH 1 /* HPL_ENA_DLY */
736#define WM8961_HPL_ENA 0x0010 /* HPL_ENA */
737#define WM8961_HPL_ENA_MASK 0x0010 /* HPL_ENA */
738#define WM8961_HPL_ENA_SHIFT 4 /* HPL_ENA */
739#define WM8961_HPL_ENA_WIDTH 1 /* HPL_ENA */
740#define WM8961_HPR_RMV_SHORT 0x0008 /* HPR_RMV_SHORT */
741#define WM8961_HPR_RMV_SHORT_MASK 0x0008 /* HPR_RMV_SHORT */
742#define WM8961_HPR_RMV_SHORT_SHIFT 3 /* HPR_RMV_SHORT */
743#define WM8961_HPR_RMV_SHORT_WIDTH 1 /* HPR_RMV_SHORT */
744#define WM8961_HPR_ENA_OUTP 0x0004 /* HPR_ENA_OUTP */
745#define WM8961_HPR_ENA_OUTP_MASK 0x0004 /* HPR_ENA_OUTP */
746#define WM8961_HPR_ENA_OUTP_SHIFT 2 /* HPR_ENA_OUTP */
747#define WM8961_HPR_ENA_OUTP_WIDTH 1 /* HPR_ENA_OUTP */
748#define WM8961_HPR_ENA_DLY 0x0002 /* HPR_ENA_DLY */
749#define WM8961_HPR_ENA_DLY_MASK 0x0002 /* HPR_ENA_DLY */
750#define WM8961_HPR_ENA_DLY_SHIFT 1 /* HPR_ENA_DLY */
751#define WM8961_HPR_ENA_DLY_WIDTH 1 /* HPR_ENA_DLY */
752#define WM8961_HPR_ENA 0x0001 /* HPR_ENA */
753#define WM8961_HPR_ENA_MASK 0x0001 /* HPR_ENA */
754#define WM8961_HPR_ENA_SHIFT 0 /* HPR_ENA */
755#define WM8961_HPR_ENA_WIDTH 1 /* HPR_ENA */
756
757/*
758 * R71 (0x47) - Analogue HP 2
759 */
760#define WM8961_HPL_VOL_MASK 0x01C0 /* HPL_VOL - [8:6] */
761#define WM8961_HPL_VOL_SHIFT 6 /* HPL_VOL - [8:6] */
762#define WM8961_HPL_VOL_WIDTH 3 /* HPL_VOL - [8:6] */
763#define WM8961_HPR_VOL_MASK 0x0038 /* HPR_VOL - [5:3] */
764#define WM8961_HPR_VOL_SHIFT 3 /* HPR_VOL - [5:3] */
765#define WM8961_HPR_VOL_WIDTH 3 /* HPR_VOL - [5:3] */
766#define WM8961_HP_BIAS_BOOST_MASK 0x0007 /* HP_BIAS_BOOST - [2:0] */
767#define WM8961_HP_BIAS_BOOST_SHIFT 0 /* HP_BIAS_BOOST - [2:0] */
768#define WM8961_HP_BIAS_BOOST_WIDTH 3 /* HP_BIAS_BOOST - [2:0] */
769
770/*
771 * R72 (0x48) - Charge Pump 1
772 */
773#define WM8961_CP_ENA 0x0001 /* CP_ENA */
774#define WM8961_CP_ENA_MASK 0x0001 /* CP_ENA */
775#define WM8961_CP_ENA_SHIFT 0 /* CP_ENA */
776#define WM8961_CP_ENA_WIDTH 1 /* CP_ENA */
777
778/*
779 * R82 (0x52) - Charge Pump B
780 */
781#define WM8961_CP_DYN_PWR_MASK 0x0003 /* CP_DYN_PWR - [1:0] */
782#define WM8961_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR - [1:0] */
783#define WM8961_CP_DYN_PWR_WIDTH 2 /* CP_DYN_PWR - [1:0] */
784
785/*
786 * R87 (0x57) - Write Sequencer 1
787 */
788#define WM8961_WSEQ_ENA 0x0020 /* WSEQ_ENA */
789#define WM8961_WSEQ_ENA_MASK 0x0020 /* WSEQ_ENA */
790#define WM8961_WSEQ_ENA_SHIFT 5 /* WSEQ_ENA */
791#define WM8961_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
792#define WM8961_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
793#define WM8961_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
794#define WM8961_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
795
796/*
797 * R88 (0x58) - Write Sequencer 2
798 */
799#define WM8961_WSEQ_EOS 0x0100 /* WSEQ_EOS */
800#define WM8961_WSEQ_EOS_MASK 0x0100 /* WSEQ_EOS */
801#define WM8961_WSEQ_EOS_SHIFT 8 /* WSEQ_EOS */
802#define WM8961_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
803#define WM8961_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
804#define WM8961_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
805#define WM8961_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
806
807/*
808 * R89 (0x59) - Write Sequencer 3
809 */
810#define WM8961_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
811#define WM8961_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
812#define WM8961_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
813
814/*
815 * R90 (0x5A) - Write Sequencer 4
816 */
817#define WM8961_WSEQ_ABORT 0x0100 /* WSEQ_ABORT */
818#define WM8961_WSEQ_ABORT_MASK 0x0100 /* WSEQ_ABORT */
819#define WM8961_WSEQ_ABORT_SHIFT 8 /* WSEQ_ABORT */
820#define WM8961_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
821#define WM8961_WSEQ_START 0x0080 /* WSEQ_START */
822#define WM8961_WSEQ_START_MASK 0x0080 /* WSEQ_START */
823#define WM8961_WSEQ_START_SHIFT 7 /* WSEQ_START */
824#define WM8961_WSEQ_START_WIDTH 1 /* WSEQ_START */
825#define WM8961_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
826#define WM8961_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
827#define WM8961_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
828
829/*
830 * R91 (0x5B) - Write Sequencer 5
831 */
832#define WM8961_WSEQ_DATA_WIDTH_MASK 0x0070 /* WSEQ_DATA_WIDTH - [6:4] */
833#define WM8961_WSEQ_DATA_WIDTH_SHIFT 4 /* WSEQ_DATA_WIDTH - [6:4] */
834#define WM8961_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [6:4] */
835#define WM8961_WSEQ_DATA_START_MASK 0x000F /* WSEQ_DATA_START - [3:0] */
836#define WM8961_WSEQ_DATA_START_SHIFT 0 /* WSEQ_DATA_START - [3:0] */
837#define WM8961_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [3:0] */
838
839/*
840 * R92 (0x5C) - Write Sequencer 6
841 */
842#define WM8961_WSEQ_DELAY_MASK 0x000F /* WSEQ_DELAY - [3:0] */
843#define WM8961_WSEQ_DELAY_SHIFT 0 /* WSEQ_DELAY - [3:0] */
844#define WM8961_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [3:0] */
845
846/*
847 * R93 (0x5D) - Write Sequencer 7
848 */
849#define WM8961_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
850#define WM8961_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
851#define WM8961_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
852#define WM8961_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
853
854/*
855 * R252 (0xFC) - General test 1
856 */
857#define WM8961_ARA_ENA 0x0002 /* ARA_ENA */
858#define WM8961_ARA_ENA_MASK 0x0002 /* ARA_ENA */
859#define WM8961_ARA_ENA_SHIFT 1 /* ARA_ENA */
860#define WM8961_ARA_ENA_WIDTH 1 /* ARA_ENA */
861#define WM8961_AUTO_INC 0x0001 /* AUTO_INC */
862#define WM8961_AUTO_INC_MASK 0x0001 /* AUTO_INC */
863#define WM8961_AUTO_INC_SHIFT 0 /* AUTO_INC */
864#define WM8961_AUTO_INC_WIDTH 1 /* AUTO_INC */
865
866#endif
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 032dca22dbd3..d66efb0546ea 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -59,44 +59,7 @@ static const u16 wm8971_reg[] = {
59 0x0079, 0x0079, 0x0079, /* 40 */ 59 0x0079, 0x0079, 0x0079, /* 40 */
60}; 60};
61 61
62static inline unsigned int wm8971_read_reg_cache(struct snd_soc_codec *codec, 62#define wm8971_reset(c) snd_soc_write(c, WM8971_RESET, 0)
63 unsigned int reg)
64{
65 u16 *cache = codec->reg_cache;
66 if (reg < WM8971_REG_COUNT)
67 return cache[reg];
68
69 return -1;
70}
71
72static inline void wm8971_write_reg_cache(struct snd_soc_codec *codec,
73 unsigned int reg, unsigned int value)
74{
75 u16 *cache = codec->reg_cache;
76 if (reg < WM8971_REG_COUNT)
77 cache[reg] = value;
78}
79
80static int wm8971_write(struct snd_soc_codec *codec, unsigned int reg,
81 unsigned int value)
82{
83 u8 data[2];
84
85 /* data is
86 * D15..D9 WM8753 register offset
87 * D8...D0 register data
88 */
89 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
90 data[1] = value & 0x00ff;
91
92 wm8971_write_reg_cache (codec, reg, value);
93 if (codec->hw_write(codec->control_data, data, 2) == 2)
94 return 0;
95 else
96 return -EIO;
97}
98
99#define wm8971_reset(c) wm8971_write(c, WM8971_RESET, 0)
100 63
101/* WM8971 Controls */ 64/* WM8971 Controls */
102static const char *wm8971_bass[] = { "Linear Control", "Adaptive Boost" }; 65static const char *wm8971_bass[] = { "Linear Control", "Adaptive Boost" };
@@ -521,7 +484,7 @@ static int wm8971_set_dai_fmt(struct snd_soc_dai *codec_dai,
521 return -EINVAL; 484 return -EINVAL;
522 } 485 }
523 486
524 wm8971_write(codec, WM8971_IFACE, iface); 487 snd_soc_write(codec, WM8971_IFACE, iface);
525 return 0; 488 return 0;
526} 489}
527 490
@@ -533,8 +496,8 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
533 struct snd_soc_device *socdev = rtd->socdev; 496 struct snd_soc_device *socdev = rtd->socdev;
534 struct snd_soc_codec *codec = socdev->card->codec; 497 struct snd_soc_codec *codec = socdev->card->codec;
535 struct wm8971_priv *wm8971 = codec->private_data; 498 struct wm8971_priv *wm8971 = codec->private_data;
536 u16 iface = wm8971_read_reg_cache(codec, WM8971_IFACE) & 0x1f3; 499 u16 iface = snd_soc_read(codec, WM8971_IFACE) & 0x1f3;
537 u16 srate = wm8971_read_reg_cache(codec, WM8971_SRATE) & 0x1c0; 500 u16 srate = snd_soc_read(codec, WM8971_SRATE) & 0x1c0;
538 int coeff = get_coeff(wm8971->sysclk, params_rate(params)); 501 int coeff = get_coeff(wm8971->sysclk, params_rate(params));
539 502
540 /* bit size */ 503 /* bit size */
@@ -553,9 +516,9 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
553 } 516 }
554 517
555 /* set iface & srate */ 518 /* set iface & srate */
556 wm8971_write(codec, WM8971_IFACE, iface); 519 snd_soc_write(codec, WM8971_IFACE, iface);
557 if (coeff >= 0) 520 if (coeff >= 0)
558 wm8971_write(codec, WM8971_SRATE, srate | 521 snd_soc_write(codec, WM8971_SRATE, srate |
559 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); 522 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
560 523
561 return 0; 524 return 0;
@@ -564,33 +527,33 @@ static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream,
564static int wm8971_mute(struct snd_soc_dai *dai, int mute) 527static int wm8971_mute(struct snd_soc_dai *dai, int mute)
565{ 528{
566 struct snd_soc_codec *codec = dai->codec; 529 struct snd_soc_codec *codec = dai->codec;
567 u16 mute_reg = wm8971_read_reg_cache(codec, WM8971_ADCDAC) & 0xfff7; 530 u16 mute_reg = snd_soc_read(codec, WM8971_ADCDAC) & 0xfff7;
568 531
569 if (mute) 532 if (mute)
570 wm8971_write(codec, WM8971_ADCDAC, mute_reg | 0x8); 533 snd_soc_write(codec, WM8971_ADCDAC, mute_reg | 0x8);
571 else 534 else
572 wm8971_write(codec, WM8971_ADCDAC, mute_reg); 535 snd_soc_write(codec, WM8971_ADCDAC, mute_reg);
573 return 0; 536 return 0;
574} 537}
575 538
576static int wm8971_set_bias_level(struct snd_soc_codec *codec, 539static int wm8971_set_bias_level(struct snd_soc_codec *codec,
577 enum snd_soc_bias_level level) 540 enum snd_soc_bias_level level)
578{ 541{
579 u16 pwr_reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e; 542 u16 pwr_reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
580 543
581 switch (level) { 544 switch (level) {
582 case SND_SOC_BIAS_ON: 545 case SND_SOC_BIAS_ON:
583 /* set vmid to 50k and unmute dac */ 546 /* set vmid to 50k and unmute dac */
584 wm8971_write(codec, WM8971_PWR1, pwr_reg | 0x00c1); 547 snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x00c1);
585 break; 548 break;
586 case SND_SOC_BIAS_PREPARE: 549 case SND_SOC_BIAS_PREPARE:
587 break; 550 break;
588 case SND_SOC_BIAS_STANDBY: 551 case SND_SOC_BIAS_STANDBY:
589 /* mute dac and set vmid to 500k, enable VREF */ 552 /* mute dac and set vmid to 500k, enable VREF */
590 wm8971_write(codec, WM8971_PWR1, pwr_reg | 0x0140); 553 snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x0140);
591 break; 554 break;
592 case SND_SOC_BIAS_OFF: 555 case SND_SOC_BIAS_OFF:
593 wm8971_write(codec, WM8971_PWR1, 0x0001); 556 snd_soc_write(codec, WM8971_PWR1, 0x0001);
594 break; 557 break;
595 } 558 }
596 codec->bias_level = level; 559 codec->bias_level = level;
@@ -667,8 +630,8 @@ static int wm8971_resume(struct platform_device *pdev)
667 630
668 /* charge wm8971 caps */ 631 /* charge wm8971 caps */
669 if (codec->suspend_bias_level == SND_SOC_BIAS_ON) { 632 if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
670 reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e; 633 reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
671 wm8971_write(codec, WM8971_PWR1, reg | 0x01c0); 634 snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
672 codec->bias_level = SND_SOC_BIAS_ON; 635 codec->bias_level = SND_SOC_BIAS_ON;
673 queue_delayed_work(wm8971_workq, &codec->delayed_work, 636 queue_delayed_work(wm8971_workq, &codec->delayed_work,
674 msecs_to_jiffies(1000)); 637 msecs_to_jiffies(1000));
@@ -677,15 +640,14 @@ static int wm8971_resume(struct platform_device *pdev)
677 return 0; 640 return 0;
678} 641}
679 642
680static int wm8971_init(struct snd_soc_device *socdev) 643static int wm8971_init(struct snd_soc_device *socdev,
644 enum snd_soc_control_type control)
681{ 645{
682 struct snd_soc_codec *codec = socdev->card->codec; 646 struct snd_soc_codec *codec = socdev->card->codec;
683 int reg, ret = 0; 647 int reg, ret = 0;
684 648
685 codec->name = "WM8971"; 649 codec->name = "WM8971";
686 codec->owner = THIS_MODULE; 650 codec->owner = THIS_MODULE;
687 codec->read = wm8971_read_reg_cache;
688 codec->write = wm8971_write;
689 codec->set_bias_level = wm8971_set_bias_level; 651 codec->set_bias_level = wm8971_set_bias_level;
690 codec->dai = &wm8971_dai; 652 codec->dai = &wm8971_dai;
691 codec->reg_cache_size = ARRAY_SIZE(wm8971_reg); 653 codec->reg_cache_size = ARRAY_SIZE(wm8971_reg);
@@ -695,42 +657,48 @@ static int wm8971_init(struct snd_soc_device *socdev)
695 if (codec->reg_cache == NULL) 657 if (codec->reg_cache == NULL)
696 return -ENOMEM; 658 return -ENOMEM;
697 659
660 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
661 if (ret < 0) {
662 printk(KERN_ERR "wm8971: failed to set cache I/O: %d\n", ret);
663 goto err;
664 }
665
698 wm8971_reset(codec); 666 wm8971_reset(codec);
699 667
700 /* register pcms */ 668 /* register pcms */
701 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); 669 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
702 if (ret < 0) { 670 if (ret < 0) {
703 printk(KERN_ERR "wm8971: failed to create pcms\n"); 671 printk(KERN_ERR "wm8971: failed to create pcms\n");
704 goto pcm_err; 672 goto err;
705 } 673 }
706 674
707 /* charge output caps - set vmid to 5k for quick power up */ 675 /* charge output caps - set vmid to 5k for quick power up */
708 reg = wm8971_read_reg_cache(codec, WM8971_PWR1) & 0xfe3e; 676 reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
709 wm8971_write(codec, WM8971_PWR1, reg | 0x01c0); 677 snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
710 codec->bias_level = SND_SOC_BIAS_STANDBY; 678 codec->bias_level = SND_SOC_BIAS_STANDBY;
711 queue_delayed_work(wm8971_workq, &codec->delayed_work, 679 queue_delayed_work(wm8971_workq, &codec->delayed_work,
712 msecs_to_jiffies(1000)); 680 msecs_to_jiffies(1000));
713 681
714 /* set the update bits */ 682 /* set the update bits */
715 reg = wm8971_read_reg_cache(codec, WM8971_LDAC); 683 reg = snd_soc_read(codec, WM8971_LDAC);
716 wm8971_write(codec, WM8971_LDAC, reg | 0x0100); 684 snd_soc_write(codec, WM8971_LDAC, reg | 0x0100);
717 reg = wm8971_read_reg_cache(codec, WM8971_RDAC); 685 reg = snd_soc_read(codec, WM8971_RDAC);
718 wm8971_write(codec, WM8971_RDAC, reg | 0x0100); 686 snd_soc_write(codec, WM8971_RDAC, reg | 0x0100);
719 687
720 reg = wm8971_read_reg_cache(codec, WM8971_LOUT1V); 688 reg = snd_soc_read(codec, WM8971_LOUT1V);
721 wm8971_write(codec, WM8971_LOUT1V, reg | 0x0100); 689 snd_soc_write(codec, WM8971_LOUT1V, reg | 0x0100);
722 reg = wm8971_read_reg_cache(codec, WM8971_ROUT1V); 690 reg = snd_soc_read(codec, WM8971_ROUT1V);
723 wm8971_write(codec, WM8971_ROUT1V, reg | 0x0100); 691 snd_soc_write(codec, WM8971_ROUT1V, reg | 0x0100);
724 692
725 reg = wm8971_read_reg_cache(codec, WM8971_LOUT2V); 693 reg = snd_soc_read(codec, WM8971_LOUT2V);
726 wm8971_write(codec, WM8971_LOUT2V, reg | 0x0100); 694 snd_soc_write(codec, WM8971_LOUT2V, reg | 0x0100);
727 reg = wm8971_read_reg_cache(codec, WM8971_ROUT2V); 695 reg = snd_soc_read(codec, WM8971_ROUT2V);
728 wm8971_write(codec, WM8971_ROUT2V, reg | 0x0100); 696 snd_soc_write(codec, WM8971_ROUT2V, reg | 0x0100);
729 697
730 reg = wm8971_read_reg_cache(codec, WM8971_LINVOL); 698 reg = snd_soc_read(codec, WM8971_LINVOL);
731 wm8971_write(codec, WM8971_LINVOL, reg | 0x0100); 699 snd_soc_write(codec, WM8971_LINVOL, reg | 0x0100);
732 reg = wm8971_read_reg_cache(codec, WM8971_RINVOL); 700 reg = snd_soc_read(codec, WM8971_RINVOL);
733 wm8971_write(codec, WM8971_RINVOL, reg | 0x0100); 701 snd_soc_write(codec, WM8971_RINVOL, reg | 0x0100);
734 702
735 snd_soc_add_controls(codec, wm8971_snd_controls, 703 snd_soc_add_controls(codec, wm8971_snd_controls,
736 ARRAY_SIZE(wm8971_snd_controls)); 704 ARRAY_SIZE(wm8971_snd_controls));
@@ -745,7 +713,7 @@ static int wm8971_init(struct snd_soc_device *socdev)
745card_err: 713card_err:
746 snd_soc_free_pcms(socdev); 714 snd_soc_free_pcms(socdev);
747 snd_soc_dapm_free(socdev); 715 snd_soc_dapm_free(socdev);
748pcm_err: 716err:
749 kfree(codec->reg_cache); 717 kfree(codec->reg_cache);
750 return ret; 718 return ret;
751} 719}
@@ -767,7 +735,7 @@ static int wm8971_i2c_probe(struct i2c_client *i2c,
767 735
768 codec->control_data = i2c; 736 codec->control_data = i2c;
769 737
770 ret = wm8971_init(socdev); 738 ret = wm8971_init(socdev, SND_SOC_I2C);
771 if (ret < 0) 739 if (ret < 0)
772 pr_err("failed to initialise WM8971\n"); 740 pr_err("failed to initialise WM8971\n");
773 741
@@ -877,7 +845,6 @@ static int wm8971_probe(struct platform_device *pdev)
877 845
878#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE) 846#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE)
879 if (setup->i2c_address) { 847 if (setup->i2c_address) {
880 codec->hw_write = (hw_write_t)i2c_master_send;
881 ret = wm8971_add_i2c_device(pdev, setup); 848 ret = wm8971_add_i2c_device(pdev, setup);
882 } 849 }
883#endif 850#endif
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
new file mode 100644
index 000000000000..d8a013ab3177
--- /dev/null
+++ b/sound/soc/codecs/wm8974.c
@@ -0,0 +1,808 @@
1/*
2 * wm8974.c -- WM8974 ALSA Soc Audio driver
3 *
4 * Copyright 2006-2009 Wolfson Microelectronics PLC.
5 *
6 * Author: Liam Girdwood <linux@wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/version.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/delay.h>
19#include <linux/pm.h>
20#include <linux/i2c.h>
21#include <linux/platform_device.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include <sound/soc-dapm.h>
27#include <sound/initval.h>
28#include <sound/tlv.h>
29
30#include "wm8974.h"
31
32static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
33 0x0000, 0x0000, 0x0000, 0x0000,
34 0x0050, 0x0000, 0x0140, 0x0000,
35 0x0000, 0x0000, 0x0000, 0x00ff,
36 0x0000, 0x0000, 0x0100, 0x00ff,
37 0x0000, 0x0000, 0x012c, 0x002c,
38 0x002c, 0x002c, 0x002c, 0x0000,
39 0x0032, 0x0000, 0x0000, 0x0000,
40 0x0000, 0x0000, 0x0000, 0x0000,
41 0x0038, 0x000b, 0x0032, 0x0000,
42 0x0008, 0x000c, 0x0093, 0x00e9,
43 0x0000, 0x0000, 0x0000, 0x0000,
44 0x0003, 0x0010, 0x0000, 0x0000,
45 0x0000, 0x0002, 0x0000, 0x0000,
46 0x0000, 0x0000, 0x0039, 0x0000,
47 0x0000,
48};
49
50#define WM8974_POWER1_BIASEN 0x08
51#define WM8974_POWER1_BUFIOEN 0x10
52
53struct wm8974_priv {
54 struct snd_soc_codec codec;
55 u16 reg_cache[WM8974_CACHEREGNUM];
56};
57
58static struct snd_soc_codec *wm8974_codec;
59
60#define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0)
61
62static const char *wm8974_companding[] = {"Off", "NC", "u-law", "A-law" };
63static const char *wm8974_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz" };
64static const char *wm8974_eqmode[] = {"Capture", "Playback" };
65static const char *wm8974_bw[] = {"Narrow", "Wide" };
66static const char *wm8974_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz" };
67static const char *wm8974_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz" };
68static const char *wm8974_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz" };
69static const char *wm8974_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz" };
70static const char *wm8974_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz" };
71static const char *wm8974_alc[] = {"ALC", "Limiter" };
72
73static const struct soc_enum wm8974_enum[] = {
74 SOC_ENUM_SINGLE(WM8974_COMP, 1, 4, wm8974_companding), /* adc */
75 SOC_ENUM_SINGLE(WM8974_COMP, 3, 4, wm8974_companding), /* dac */
76 SOC_ENUM_SINGLE(WM8974_DAC, 4, 4, wm8974_deemp),
77 SOC_ENUM_SINGLE(WM8974_EQ1, 8, 2, wm8974_eqmode),
78
79 SOC_ENUM_SINGLE(WM8974_EQ1, 5, 4, wm8974_eq1),
80 SOC_ENUM_SINGLE(WM8974_EQ2, 8, 2, wm8974_bw),
81 SOC_ENUM_SINGLE(WM8974_EQ2, 5, 4, wm8974_eq2),
82 SOC_ENUM_SINGLE(WM8974_EQ3, 8, 2, wm8974_bw),
83
84 SOC_ENUM_SINGLE(WM8974_EQ3, 5, 4, wm8974_eq3),
85 SOC_ENUM_SINGLE(WM8974_EQ4, 8, 2, wm8974_bw),
86 SOC_ENUM_SINGLE(WM8974_EQ4, 5, 4, wm8974_eq4),
87 SOC_ENUM_SINGLE(WM8974_EQ5, 8, 2, wm8974_bw),
88
89 SOC_ENUM_SINGLE(WM8974_EQ5, 5, 4, wm8974_eq5),
90 SOC_ENUM_SINGLE(WM8974_ALC3, 8, 2, wm8974_alc),
91};
92
93static const char *wm8974_auxmode_text[] = { "Buffer", "Mixer" };
94
95static const struct soc_enum wm8974_auxmode =
96 SOC_ENUM_SINGLE(WM8974_INPUT, 3, 2, wm8974_auxmode_text);
97
98static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
99static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
100static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
101static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
102
103static const struct snd_kcontrol_new wm8974_snd_controls[] = {
104
105SOC_SINGLE("Digital Loopback Switch", WM8974_COMP, 0, 1, 0),
106
107SOC_ENUM("DAC Companding", wm8974_enum[1]),
108SOC_ENUM("ADC Companding", wm8974_enum[0]),
109
110SOC_ENUM("Playback De-emphasis", wm8974_enum[2]),
111SOC_SINGLE("DAC Inversion Switch", WM8974_DAC, 0, 1, 0),
112
113SOC_SINGLE_TLV("PCM Volume", WM8974_DACVOL, 0, 255, 0, digital_tlv),
114
115SOC_SINGLE("High Pass Filter Switch", WM8974_ADC, 8, 1, 0),
116SOC_SINGLE("High Pass Cut Off", WM8974_ADC, 4, 7, 0),
117SOC_SINGLE("ADC Inversion Switch", WM8974_ADC, 0, 1, 0),
118
119SOC_SINGLE_TLV("Capture Volume", WM8974_ADCVOL, 0, 255, 0, digital_tlv),
120
121SOC_ENUM("Equaliser Function", wm8974_enum[3]),
122SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]),
123SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1, 0, 24, 1, eq_tlv),
124
125SOC_ENUM("Equaliser EQ2 Bandwith", wm8974_enum[5]),
126SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]),
127SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2, 0, 24, 1, eq_tlv),
128
129SOC_ENUM("Equaliser EQ3 Bandwith", wm8974_enum[7]),
130SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]),
131SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3, 0, 24, 1, eq_tlv),
132
133SOC_ENUM("Equaliser EQ4 Bandwith", wm8974_enum[9]),
134SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]),
135SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4, 0, 24, 1, eq_tlv),
136
137SOC_ENUM("Equaliser EQ5 Bandwith", wm8974_enum[11]),
138SOC_ENUM("EQ5 Cut Off", wm8974_enum[12]),
139SOC_SINGLE_TLV("EQ5 Volume", WM8974_EQ5, 0, 24, 1, eq_tlv),
140
141SOC_SINGLE("DAC Playback Limiter Switch", WM8974_DACLIM1, 8, 1, 0),
142SOC_SINGLE("DAC Playback Limiter Decay", WM8974_DACLIM1, 4, 15, 0),
143SOC_SINGLE("DAC Playback Limiter Attack", WM8974_DACLIM1, 0, 15, 0),
144
145SOC_SINGLE("DAC Playback Limiter Threshold", WM8974_DACLIM2, 4, 7, 0),
146SOC_SINGLE("DAC Playback Limiter Boost", WM8974_DACLIM2, 0, 15, 0),
147
148SOC_SINGLE("ALC Enable Switch", WM8974_ALC1, 8, 1, 0),
149SOC_SINGLE("ALC Capture Max Gain", WM8974_ALC1, 3, 7, 0),
150SOC_SINGLE("ALC Capture Min Gain", WM8974_ALC1, 0, 7, 0),
151
152SOC_SINGLE("ALC Capture ZC Switch", WM8974_ALC2, 8, 1, 0),
153SOC_SINGLE("ALC Capture Hold", WM8974_ALC2, 4, 7, 0),
154SOC_SINGLE("ALC Capture Target", WM8974_ALC2, 0, 15, 0),
155
156SOC_ENUM("ALC Capture Mode", wm8974_enum[13]),
157SOC_SINGLE("ALC Capture Decay", WM8974_ALC3, 4, 15, 0),
158SOC_SINGLE("ALC Capture Attack", WM8974_ALC3, 0, 15, 0),
159
160SOC_SINGLE("ALC Capture Noise Gate Switch", WM8974_NGATE, 3, 1, 0),
161SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8974_NGATE, 0, 7, 0),
162
163SOC_SINGLE("Capture PGA ZC Switch", WM8974_INPPGA, 7, 1, 0),
164SOC_SINGLE_TLV("Capture PGA Volume", WM8974_INPPGA, 0, 63, 0, inpga_tlv),
165
166SOC_SINGLE("Speaker Playback ZC Switch", WM8974_SPKVOL, 7, 1, 0),
167SOC_SINGLE("Speaker Playback Switch", WM8974_SPKVOL, 6, 1, 1),
168SOC_SINGLE_TLV("Speaker Playback Volume", WM8974_SPKVOL, 0, 63, 0, spk_tlv),
169
170SOC_ENUM("Aux Mode", wm8974_auxmode),
171
172SOC_SINGLE("Capture Boost(+20dB)", WM8974_ADCBOOST, 8, 1, 0),
173SOC_SINGLE("Mono Playback Switch", WM8974_MONOMIX, 6, 1, 1),
174};
175
176/* Speaker Output Mixer */
177static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = {
178SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0),
179SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0),
180SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 1),
181};
182
183/* Mono Output Mixer */
184static const struct snd_kcontrol_new wm8974_mono_mixer_controls[] = {
185SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_MONOMIX, 1, 1, 0),
186SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_MONOMIX, 2, 1, 0),
187SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
188};
189
190/* Boost mixer */
191static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
192SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0),
193};
194
195/* Input PGA */
196static const struct snd_kcontrol_new wm8974_inpga[] = {
197SOC_DAPM_SINGLE("Aux Switch", WM8974_INPUT, 2, 1, 0),
198SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0),
199SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0),
200};
201
202/* AUX Input boost vol */
203static const struct snd_kcontrol_new wm8974_aux_boost_controls =
204SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0);
205
206/* Mic Input boost vol */
207static const struct snd_kcontrol_new wm8974_mic_boost_controls =
208SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0);
209
210static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = {
211SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0,
212 &wm8974_speaker_mixer_controls[0],
213 ARRAY_SIZE(wm8974_speaker_mixer_controls)),
214SND_SOC_DAPM_MIXER("Mono Mixer", WM8974_POWER3, 3, 0,
215 &wm8974_mono_mixer_controls[0],
216 ARRAY_SIZE(wm8974_mono_mixer_controls)),
217SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8974_POWER3, 0, 0),
218SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8974_POWER2, 0, 0),
219SND_SOC_DAPM_PGA("Aux Input", WM8974_POWER1, 6, 0, NULL, 0),
220SND_SOC_DAPM_PGA("SpkN Out", WM8974_POWER3, 5, 0, NULL, 0),
221SND_SOC_DAPM_PGA("SpkP Out", WM8974_POWER3, 6, 0, NULL, 0),
222SND_SOC_DAPM_PGA("Mono Out", WM8974_POWER3, 7, 0, NULL, 0),
223
224SND_SOC_DAPM_MIXER("Input PGA", WM8974_POWER2, 2, 0, wm8974_inpga,
225 ARRAY_SIZE(wm8974_inpga)),
226SND_SOC_DAPM_MIXER("Boost Mixer", WM8974_POWER2, 4, 0,
227 wm8974_boost_mixer, ARRAY_SIZE(wm8974_boost_mixer)),
228
229SND_SOC_DAPM_MICBIAS("Mic Bias", WM8974_POWER1, 4, 0),
230
231SND_SOC_DAPM_INPUT("MICN"),
232SND_SOC_DAPM_INPUT("MICP"),
233SND_SOC_DAPM_INPUT("AUX"),
234SND_SOC_DAPM_OUTPUT("MONOOUT"),
235SND_SOC_DAPM_OUTPUT("SPKOUTP"),
236SND_SOC_DAPM_OUTPUT("SPKOUTN"),
237};
238
239static const struct snd_soc_dapm_route audio_map[] = {
240 /* Mono output mixer */
241 {"Mono Mixer", "PCM Playback Switch", "DAC"},
242 {"Mono Mixer", "Aux Playback Switch", "Aux Input"},
243 {"Mono Mixer", "Line Bypass Switch", "Boost Mixer"},
244
245 /* Speaker output mixer */
246 {"Speaker Mixer", "PCM Playback Switch", "DAC"},
247 {"Speaker Mixer", "Aux Playback Switch", "Aux Input"},
248 {"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"},
249
250 /* Outputs */
251 {"Mono Out", NULL, "Mono Mixer"},
252 {"MONOOUT", NULL, "Mono Out"},
253 {"SpkN Out", NULL, "Speaker Mixer"},
254 {"SpkP Out", NULL, "Speaker Mixer"},
255 {"SPKOUTN", NULL, "SpkN Out"},
256 {"SPKOUTP", NULL, "SpkP Out"},
257
258 /* Boost Mixer */
259 {"ADC", NULL, "Boost Mixer"},
260 {"Boost Mixer", "Aux Switch", "Aux Input"},
261 {"Boost Mixer", NULL, "Input PGA"},
262 {"Boost Mixer", NULL, "MICP"},
263
264 /* Input PGA */
265 {"Input PGA", "Aux Switch", "Aux Input"},
266 {"Input PGA", "MicN Switch", "MICN"},
267 {"Input PGA", "MicP Switch", "MICP"},
268
269 /* Inputs */
270 {"Aux Input", NULL, "AUX"},
271};
272
273static int wm8974_add_widgets(struct snd_soc_codec *codec)
274{
275 snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets,
276 ARRAY_SIZE(wm8974_dapm_widgets));
277
278 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
279
280 snd_soc_dapm_new_widgets(codec);
281 return 0;
282}
283
284struct pll_ {
285 unsigned int pre_div:4; /* prescale - 1 */
286 unsigned int n:4;
287 unsigned int k;
288};
289
290static struct pll_ pll_div;
291
292/* The size in bits of the pll divide multiplied by 10
293 * to allow rounding later */
294#define FIXED_PLL_SIZE ((1 << 24) * 10)
295
296static void pll_factors(unsigned int target, unsigned int source)
297{
298 unsigned long long Kpart;
299 unsigned int K, Ndiv, Nmod;
300
301 Ndiv = target / source;
302 if (Ndiv < 6) {
303 source >>= 1;
304 pll_div.pre_div = 1;
305 Ndiv = target / source;
306 } else
307 pll_div.pre_div = 0;
308
309 if ((Ndiv < 6) || (Ndiv > 12))
310 printk(KERN_WARNING
311 "WM8974 N value %u outwith recommended range!\n",
312 Ndiv);
313
314 pll_div.n = Ndiv;
315 Nmod = target % source;
316 Kpart = FIXED_PLL_SIZE * (long long)Nmod;
317
318 do_div(Kpart, source);
319
320 K = Kpart & 0xFFFFFFFF;
321
322 /* Check if we need to round */
323 if ((K % 10) >= 5)
324 K += 5;
325
326 /* Move down to proper range now rounding is done */
327 K /= 10;
328
329 pll_div.k = K;
330}
331
332static int wm8974_set_dai_pll(struct snd_soc_dai *codec_dai,
333 int pll_id, unsigned int freq_in, unsigned int freq_out)
334{
335 struct snd_soc_codec *codec = codec_dai->codec;
336 u16 reg;
337
338 if (freq_in == 0 || freq_out == 0) {
339 /* Clock CODEC directly from MCLK */
340 reg = snd_soc_read(codec, WM8974_CLOCK);
341 snd_soc_write(codec, WM8974_CLOCK, reg & 0x0ff);
342
343 /* Turn off PLL */
344 reg = snd_soc_read(codec, WM8974_POWER1);
345 snd_soc_write(codec, WM8974_POWER1, reg & 0x1df);
346 return 0;
347 }
348
349 pll_factors(freq_out*4, freq_in);
350
351 snd_soc_write(codec, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n);
352 snd_soc_write(codec, WM8974_PLLK1, pll_div.k >> 18);
353 snd_soc_write(codec, WM8974_PLLK2, (pll_div.k >> 9) & 0x1ff);
354 snd_soc_write(codec, WM8974_PLLK3, pll_div.k & 0x1ff);
355 reg = snd_soc_read(codec, WM8974_POWER1);
356 snd_soc_write(codec, WM8974_POWER1, reg | 0x020);
357
358 /* Run CODEC from PLL instead of MCLK */
359 reg = snd_soc_read(codec, WM8974_CLOCK);
360 snd_soc_write(codec, WM8974_CLOCK, reg | 0x100);
361
362 return 0;
363}
364
365/*
366 * Configure WM8974 clock dividers.
367 */
368static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
369 int div_id, int div)
370{
371 struct snd_soc_codec *codec = codec_dai->codec;
372 u16 reg;
373
374 switch (div_id) {
375 case WM8974_OPCLKDIV:
376 reg = snd_soc_read(codec, WM8974_GPIO) & 0x1cf;
377 snd_soc_write(codec, WM8974_GPIO, reg | div);
378 break;
379 case WM8974_MCLKDIV:
380 reg = snd_soc_read(codec, WM8974_CLOCK) & 0x11f;
381 snd_soc_write(codec, WM8974_CLOCK, reg | div);
382 break;
383 case WM8974_ADCCLK:
384 reg = snd_soc_read(codec, WM8974_ADC) & 0x1f7;
385 snd_soc_write(codec, WM8974_ADC, reg | div);
386 break;
387 case WM8974_DACCLK:
388 reg = snd_soc_read(codec, WM8974_DAC) & 0x1f7;
389 snd_soc_write(codec, WM8974_DAC, reg | div);
390 break;
391 case WM8974_BCLKDIV:
392 reg = snd_soc_read(codec, WM8974_CLOCK) & 0x1e3;
393 snd_soc_write(codec, WM8974_CLOCK, reg | div);
394 break;
395 default:
396 return -EINVAL;
397 }
398
399 return 0;
400}
401
402static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
403 unsigned int fmt)
404{
405 struct snd_soc_codec *codec = codec_dai->codec;
406 u16 iface = 0;
407 u16 clk = snd_soc_read(codec, WM8974_CLOCK) & 0x1fe;
408
409 /* set master/slave audio interface */
410 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
411 case SND_SOC_DAIFMT_CBM_CFM:
412 clk |= 0x0001;
413 break;
414 case SND_SOC_DAIFMT_CBS_CFS:
415 break;
416 default:
417 return -EINVAL;
418 }
419
420 /* interface format */
421 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
422 case SND_SOC_DAIFMT_I2S:
423 iface |= 0x0010;
424 break;
425 case SND_SOC_DAIFMT_RIGHT_J:
426 break;
427 case SND_SOC_DAIFMT_LEFT_J:
428 iface |= 0x0008;
429 break;
430 case SND_SOC_DAIFMT_DSP_A:
431 iface |= 0x00018;
432 break;
433 default:
434 return -EINVAL;
435 }
436
437 /* clock inversion */
438 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
439 case SND_SOC_DAIFMT_NB_NF:
440 break;
441 case SND_SOC_DAIFMT_IB_IF:
442 iface |= 0x0180;
443 break;
444 case SND_SOC_DAIFMT_IB_NF:
445 iface |= 0x0100;
446 break;
447 case SND_SOC_DAIFMT_NB_IF:
448 iface |= 0x0080;
449 break;
450 default:
451 return -EINVAL;
452 }
453
454 snd_soc_write(codec, WM8974_IFACE, iface);
455 snd_soc_write(codec, WM8974_CLOCK, clk);
456 return 0;
457}
458
459static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
460 struct snd_pcm_hw_params *params,
461 struct snd_soc_dai *dai)
462{
463 struct snd_soc_codec *codec = dai->codec;
464 u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f;
465 u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1;
466
467 /* bit size */
468 switch (params_format(params)) {
469 case SNDRV_PCM_FORMAT_S16_LE:
470 break;
471 case SNDRV_PCM_FORMAT_S20_3LE:
472 iface |= 0x0020;
473 break;
474 case SNDRV_PCM_FORMAT_S24_LE:
475 iface |= 0x0040;
476 break;
477 case SNDRV_PCM_FORMAT_S32_LE:
478 iface |= 0x0060;
479 break;
480 }
481
482 /* filter coefficient */
483 switch (params_rate(params)) {
484 case SNDRV_PCM_RATE_8000:
485 adn |= 0x5 << 1;
486 break;
487 case SNDRV_PCM_RATE_11025:
488 adn |= 0x4 << 1;
489 break;
490 case SNDRV_PCM_RATE_16000:
491 adn |= 0x3 << 1;
492 break;
493 case SNDRV_PCM_RATE_22050:
494 adn |= 0x2 << 1;
495 break;
496 case SNDRV_PCM_RATE_32000:
497 adn |= 0x1 << 1;
498 break;
499 case SNDRV_PCM_RATE_44100:
500 case SNDRV_PCM_RATE_48000:
501 break;
502 }
503
504 snd_soc_write(codec, WM8974_IFACE, iface);
505 snd_soc_write(codec, WM8974_ADD, adn);
506 return 0;
507}
508
509static int wm8974_mute(struct snd_soc_dai *dai, int mute)
510{
511 struct snd_soc_codec *codec = dai->codec;
512 u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
513
514 if (mute)
515 snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
516 else
517 snd_soc_write(codec, WM8974_DAC, mute_reg);
518 return 0;
519}
520
521/* liam need to make this lower power with dapm */
522static int wm8974_set_bias_level(struct snd_soc_codec *codec,
523 enum snd_soc_bias_level level)
524{
525 u16 power1 = snd_soc_read(codec, WM8974_POWER1) & ~0x3;
526
527 switch (level) {
528 case SND_SOC_BIAS_ON:
529 case SND_SOC_BIAS_PREPARE:
530 power1 |= 0x1; /* VMID 50k */
531 snd_soc_write(codec, WM8974_POWER1, power1);
532 break;
533
534 case SND_SOC_BIAS_STANDBY:
535 power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
536
537 if (codec->bias_level == SND_SOC_BIAS_OFF) {
538 /* Initial cap charge at VMID 5k */
539 snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
540 mdelay(100);
541 }
542
543 power1 |= 0x2; /* VMID 500k */
544 snd_soc_write(codec, WM8974_POWER1, power1);
545 break;
546
547 case SND_SOC_BIAS_OFF:
548 snd_soc_write(codec, WM8974_POWER1, 0);
549 snd_soc_write(codec, WM8974_POWER2, 0);
550 snd_soc_write(codec, WM8974_POWER3, 0);
551 break;
552 }
553
554 codec->bias_level = level;
555 return 0;
556}
557
558#define WM8974_RATES (SNDRV_PCM_RATE_8000_48000)
559
560#define WM8974_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
561 SNDRV_PCM_FMTBIT_S24_LE)
562
563static struct snd_soc_dai_ops wm8974_ops = {
564 .hw_params = wm8974_pcm_hw_params,
565 .digital_mute = wm8974_mute,
566 .set_fmt = wm8974_set_dai_fmt,
567 .set_clkdiv = wm8974_set_dai_clkdiv,
568 .set_pll = wm8974_set_dai_pll,
569};
570
571struct snd_soc_dai wm8974_dai = {
572 .name = "WM8974 HiFi",
573 .playback = {
574 .stream_name = "Playback",
575 .channels_min = 1,
576 .channels_max = 2, /* Only 1 channel of data */
577 .rates = WM8974_RATES,
578 .formats = WM8974_FORMATS,},
579 .capture = {
580 .stream_name = "Capture",
581 .channels_min = 1,
582 .channels_max = 2, /* Only 1 channel of data */
583 .rates = WM8974_RATES,
584 .formats = WM8974_FORMATS,},
585 .ops = &wm8974_ops,
586 .symmetric_rates = 1,
587};
588EXPORT_SYMBOL_GPL(wm8974_dai);
589
590static int wm8974_suspend(struct platform_device *pdev, pm_message_t state)
591{
592 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
593 struct snd_soc_codec *codec = socdev->card->codec;
594
595 wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
596 return 0;
597}
598
599static int wm8974_resume(struct platform_device *pdev)
600{
601 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
602 struct snd_soc_codec *codec = socdev->card->codec;
603 int i;
604 u8 data[2];
605 u16 *cache = codec->reg_cache;
606
607 /* Sync reg_cache with the hardware */
608 for (i = 0; i < ARRAY_SIZE(wm8974_reg); i++) {
609 data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
610 data[1] = cache[i] & 0x00ff;
611 codec->hw_write(codec->control_data, data, 2);
612 }
613 wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
614 wm8974_set_bias_level(codec, codec->suspend_bias_level);
615 return 0;
616}
617
618static int wm8974_probe(struct platform_device *pdev)
619{
620 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
621 struct snd_soc_codec *codec;
622 int ret = 0;
623
624 if (wm8974_codec == NULL) {
625 dev_err(&pdev->dev, "Codec device not registered\n");
626 return -ENODEV;
627 }
628
629 socdev->card->codec = wm8974_codec;
630 codec = wm8974_codec;
631
632 /* register pcms */
633 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
634 if (ret < 0) {
635 dev_err(codec->dev, "failed to create pcms: %d\n", ret);
636 goto pcm_err;
637 }
638
639 snd_soc_add_controls(codec, wm8974_snd_controls,
640 ARRAY_SIZE(wm8974_snd_controls));
641 wm8974_add_widgets(codec);
642 ret = snd_soc_init_card(socdev);
643 if (ret < 0) {
644 dev_err(codec->dev, "failed to register card: %d\n", ret);
645 goto card_err;
646 }
647
648 return ret;
649
650card_err:
651 snd_soc_free_pcms(socdev);
652 snd_soc_dapm_free(socdev);
653pcm_err:
654 return ret;
655}
656
657/* power down chip */
658static int wm8974_remove(struct platform_device *pdev)
659{
660 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
661
662 snd_soc_free_pcms(socdev);
663 snd_soc_dapm_free(socdev);
664
665 return 0;
666}
667
668struct snd_soc_codec_device soc_codec_dev_wm8974 = {
669 .probe = wm8974_probe,
670 .remove = wm8974_remove,
671 .suspend = wm8974_suspend,
672 .resume = wm8974_resume,
673};
674EXPORT_SYMBOL_GPL(soc_codec_dev_wm8974);
675
676static __devinit int wm8974_register(struct wm8974_priv *wm8974)
677{
678 int ret;
679 struct snd_soc_codec *codec = &wm8974->codec;
680
681 if (wm8974_codec) {
682 dev_err(codec->dev, "Another WM8974 is registered\n");
683 return -EINVAL;
684 }
685
686 mutex_init(&codec->mutex);
687 INIT_LIST_HEAD(&codec->dapm_widgets);
688 INIT_LIST_HEAD(&codec->dapm_paths);
689
690 codec->private_data = wm8974;
691 codec->name = "WM8974";
692 codec->owner = THIS_MODULE;
693 codec->bias_level = SND_SOC_BIAS_OFF;
694 codec->set_bias_level = wm8974_set_bias_level;
695 codec->dai = &wm8974_dai;
696 codec->num_dai = 1;
697 codec->reg_cache_size = WM8974_CACHEREGNUM;
698 codec->reg_cache = &wm8974->reg_cache;
699
700 ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_I2C);
701 if (ret < 0) {
702 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
703 goto err;
704 }
705
706 memcpy(codec->reg_cache, wm8974_reg, sizeof(wm8974_reg));
707
708 ret = wm8974_reset(codec);
709 if (ret < 0) {
710 dev_err(codec->dev, "Failed to issue reset\n");
711 goto err;
712 }
713
714 wm8974_dai.dev = codec->dev;
715
716 wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
717
718 wm8974_codec = codec;
719
720 ret = snd_soc_register_codec(codec);
721 if (ret != 0) {
722 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
723 goto err;
724 }
725
726 ret = snd_soc_register_dai(&wm8974_dai);
727 if (ret != 0) {
728 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
729 goto err_codec;
730 }
731
732 return 0;
733
734err_codec:
735 snd_soc_unregister_codec(codec);
736err:
737 kfree(wm8974);
738 return ret;
739}
740
741static __devexit void wm8974_unregister(struct wm8974_priv *wm8974)
742{
743 wm8974_set_bias_level(&wm8974->codec, SND_SOC_BIAS_OFF);
744 snd_soc_unregister_dai(&wm8974_dai);
745 snd_soc_unregister_codec(&wm8974->codec);
746 kfree(wm8974);
747 wm8974_codec = NULL;
748}
749
750static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
751 const struct i2c_device_id *id)
752{
753 struct wm8974_priv *wm8974;
754 struct snd_soc_codec *codec;
755
756 wm8974 = kzalloc(sizeof(struct wm8974_priv), GFP_KERNEL);
757 if (wm8974 == NULL)
758 return -ENOMEM;
759
760 codec = &wm8974->codec;
761 codec->hw_write = (hw_write_t)i2c_master_send;
762
763 i2c_set_clientdata(i2c, wm8974);
764 codec->control_data = i2c;
765
766 codec->dev = &i2c->dev;
767
768 return wm8974_register(wm8974);
769}
770
771static __devexit int wm8974_i2c_remove(struct i2c_client *client)
772{
773 struct wm8974_priv *wm8974 = i2c_get_clientdata(client);
774 wm8974_unregister(wm8974);
775 return 0;
776}
777
778static const struct i2c_device_id wm8974_i2c_id[] = {
779 { "wm8974", 0 },
780 { }
781};
782MODULE_DEVICE_TABLE(i2c, wm8974_i2c_id);
783
784static struct i2c_driver wm8974_i2c_driver = {
785 .driver = {
786 .name = "WM8974",
787 .owner = THIS_MODULE,
788 },
789 .probe = wm8974_i2c_probe,
790 .remove = __devexit_p(wm8974_i2c_remove),
791 .id_table = wm8974_i2c_id,
792};
793
794static int __init wm8974_modinit(void)
795{
796 return i2c_add_driver(&wm8974_i2c_driver);
797}
798module_init(wm8974_modinit);
799
800static void __exit wm8974_exit(void)
801{
802 i2c_del_driver(&wm8974_i2c_driver);
803}
804module_exit(wm8974_exit);
805
806MODULE_DESCRIPTION("ASoC WM8974 driver");
807MODULE_AUTHOR("Liam Girdwood");
808MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8974.h b/sound/soc/codecs/wm8974.h
new file mode 100644
index 000000000000..98de9562d4d2
--- /dev/null
+++ b/sound/soc/codecs/wm8974.h
@@ -0,0 +1,99 @@
1/*
2 * wm8974.h -- WM8974 Soc Audio driver
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _WM8974_H
10#define _WM8974_H
11
12/* WM8974 register space */
13
14#define WM8974_RESET 0x0
15#define WM8974_POWER1 0x1
16#define WM8974_POWER2 0x2
17#define WM8974_POWER3 0x3
18#define WM8974_IFACE 0x4
19#define WM8974_COMP 0x5
20#define WM8974_CLOCK 0x6
21#define WM8974_ADD 0x7
22#define WM8974_GPIO 0x8
23#define WM8974_DAC 0xa
24#define WM8974_DACVOL 0xb
25#define WM8974_ADC 0xe
26#define WM8974_ADCVOL 0xf
27#define WM8974_EQ1 0x12
28#define WM8974_EQ2 0x13
29#define WM8974_EQ3 0x14
30#define WM8974_EQ4 0x15
31#define WM8974_EQ5 0x16
32#define WM8974_DACLIM1 0x18
33#define WM8974_DACLIM2 0x19
34#define WM8974_NOTCH1 0x1b
35#define WM8974_NOTCH2 0x1c
36#define WM8974_NOTCH3 0x1d
37#define WM8974_NOTCH4 0x1e
38#define WM8974_ALC1 0x20
39#define WM8974_ALC2 0x21
40#define WM8974_ALC3 0x22
41#define WM8974_NGATE 0x23
42#define WM8974_PLLN 0x24
43#define WM8974_PLLK1 0x25
44#define WM8974_PLLK2 0x26
45#define WM8974_PLLK3 0x27
46#define WM8974_ATTEN 0x28
47#define WM8974_INPUT 0x2c
48#define WM8974_INPPGA 0x2d
49#define WM8974_ADCBOOST 0x2f
50#define WM8974_OUTPUT 0x31
51#define WM8974_SPKMIX 0x32
52#define WM8974_SPKVOL 0x36
53#define WM8974_MONOMIX 0x38
54
55#define WM8974_CACHEREGNUM 57
56
57/* Clock divider Id's */
58#define WM8974_OPCLKDIV 0
59#define WM8974_MCLKDIV 1
60#define WM8974_ADCCLK 2
61#define WM8974_DACCLK 3
62#define WM8974_BCLKDIV 4
63
64/* DAC clock dividers */
65#define WM8974_DACCLK_F2 (1 << 3)
66#define WM8974_DACCLK_F4 (0 << 3)
67
68/* ADC clock dividers */
69#define WM8974_ADCCLK_F2 (1 << 3)
70#define WM8974_ADCCLK_F4 (0 << 3)
71
72/* PLL Out dividers */
73#define WM8974_OPCLKDIV_1 (0 << 4)
74#define WM8974_OPCLKDIV_2 (1 << 4)
75#define WM8974_OPCLKDIV_3 (2 << 4)
76#define WM8974_OPCLKDIV_4 (3 << 4)
77
78/* BCLK clock dividers */
79#define WM8974_BCLKDIV_1 (0 << 2)
80#define WM8974_BCLKDIV_2 (1 << 2)
81#define WM8974_BCLKDIV_4 (2 << 2)
82#define WM8974_BCLKDIV_8 (3 << 2)
83#define WM8974_BCLKDIV_16 (4 << 2)
84#define WM8974_BCLKDIV_32 (5 << 2)
85
86/* MCLK clock dividers */
87#define WM8974_MCLKDIV_1 (0 << 5)
88#define WM8974_MCLKDIV_1_5 (1 << 5)
89#define WM8974_MCLKDIV_2 (2 << 5)
90#define WM8974_MCLKDIV_3 (3 << 5)
91#define WM8974_MCLKDIV_4 (4 << 5)
92#define WM8974_MCLKDIV_6 (5 << 5)
93#define WM8974_MCLKDIV_8 (6 << 5)
94#define WM8974_MCLKDIV_12 (7 << 5)
95
96extern struct snd_soc_dai wm8974_dai;
97extern struct snd_soc_codec_device soc_codec_dev_wm8974;
98
99#endif
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index 8c0fdf84aac3..3f530f8a972a 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -57,50 +57,7 @@ struct wm8988_priv {
57}; 57};
58 58
59 59
60/* 60#define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0)
61 * read wm8988 register cache
62 */
63static inline unsigned int wm8988_read_reg_cache(struct snd_soc_codec *codec,
64 unsigned int reg)
65{
66 u16 *cache = codec->reg_cache;
67 if (reg > WM8988_NUM_REG)
68 return -1;
69 return cache[reg];
70}
71
72/*
73 * write wm8988 register cache
74 */
75static inline void wm8988_write_reg_cache(struct snd_soc_codec *codec,
76 unsigned int reg, unsigned int value)
77{
78 u16 *cache = codec->reg_cache;
79 if (reg > WM8988_NUM_REG)
80 return;
81 cache[reg] = value;
82}
83
84static int wm8988_write(struct snd_soc_codec *codec, unsigned int reg,
85 unsigned int value)
86{
87 u8 data[2];
88
89 /* data is
90 * D15..D9 WM8753 register offset
91 * D8...D0 register data
92 */
93 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
94 data[1] = value & 0x00ff;
95
96 wm8988_write_reg_cache(codec, reg, value);
97 if (codec->hw_write(codec->control_data, data, 2) == 2)
98 return 0;
99 else
100 return -EIO;
101}
102
103#define wm8988_reset(c) wm8988_write(c, WM8988_RESET, 0)
104 61
105/* 62/*
106 * WM8988 Controls 63 * WM8988 Controls
@@ -226,15 +183,15 @@ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w,
226 struct snd_kcontrol *kcontrol, int event) 183 struct snd_kcontrol *kcontrol, int event)
227{ 184{
228 struct snd_soc_codec *codec = w->codec; 185 struct snd_soc_codec *codec = w->codec;
229 u16 adctl2 = wm8988_read_reg_cache(codec, WM8988_ADCTL2); 186 u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2);
230 187
231 /* Use the DAC to gate LRC if active, otherwise use ADC */ 188 /* Use the DAC to gate LRC if active, otherwise use ADC */
232 if (wm8988_read_reg_cache(codec, WM8988_PWR2) & 0x180) 189 if (snd_soc_read(codec, WM8988_PWR2) & 0x180)
233 adctl2 &= ~0x4; 190 adctl2 &= ~0x4;
234 else 191 else
235 adctl2 |= 0x4; 192 adctl2 |= 0x4;
236 193
237 return wm8988_write(codec, WM8988_ADCTL2, adctl2); 194 return snd_soc_write(codec, WM8988_ADCTL2, adctl2);
238} 195}
239 196
240static const char *wm8988_line_texts[] = { 197static const char *wm8988_line_texts[] = {
@@ -619,7 +576,7 @@ static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai,
619 return -EINVAL; 576 return -EINVAL;
620 } 577 }
621 578
622 wm8988_write(codec, WM8988_IFACE, iface); 579 snd_soc_write(codec, WM8988_IFACE, iface);
623 return 0; 580 return 0;
624} 581}
625 582
@@ -653,8 +610,8 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
653 struct snd_soc_device *socdev = rtd->socdev; 610 struct snd_soc_device *socdev = rtd->socdev;
654 struct snd_soc_codec *codec = socdev->card->codec; 611 struct snd_soc_codec *codec = socdev->card->codec;
655 struct wm8988_priv *wm8988 = codec->private_data; 612 struct wm8988_priv *wm8988 = codec->private_data;
656 u16 iface = wm8988_read_reg_cache(codec, WM8988_IFACE) & 0x1f3; 613 u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3;
657 u16 srate = wm8988_read_reg_cache(codec, WM8988_SRATE) & 0x180; 614 u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180;
658 int coeff; 615 int coeff;
659 616
660 coeff = get_coeff(wm8988->sysclk, params_rate(params)); 617 coeff = get_coeff(wm8988->sysclk, params_rate(params));
@@ -685,9 +642,9 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
685 } 642 }
686 643
687 /* set iface & srate */ 644 /* set iface & srate */
688 wm8988_write(codec, WM8988_IFACE, iface); 645 snd_soc_write(codec, WM8988_IFACE, iface);
689 if (coeff >= 0) 646 if (coeff >= 0)
690 wm8988_write(codec, WM8988_SRATE, srate | 647 snd_soc_write(codec, WM8988_SRATE, srate |
691 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); 648 (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb);
692 649
693 return 0; 650 return 0;
@@ -696,19 +653,19 @@ static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream,
696static int wm8988_mute(struct snd_soc_dai *dai, int mute) 653static int wm8988_mute(struct snd_soc_dai *dai, int mute)
697{ 654{
698 struct snd_soc_codec *codec = dai->codec; 655 struct snd_soc_codec *codec = dai->codec;
699 u16 mute_reg = wm8988_read_reg_cache(codec, WM8988_ADCDAC) & 0xfff7; 656 u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7;
700 657
701 if (mute) 658 if (mute)
702 wm8988_write(codec, WM8988_ADCDAC, mute_reg | 0x8); 659 snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8);
703 else 660 else
704 wm8988_write(codec, WM8988_ADCDAC, mute_reg); 661 snd_soc_write(codec, WM8988_ADCDAC, mute_reg);
705 return 0; 662 return 0;
706} 663}
707 664
708static int wm8988_set_bias_level(struct snd_soc_codec *codec, 665static int wm8988_set_bias_level(struct snd_soc_codec *codec,
709 enum snd_soc_bias_level level) 666 enum snd_soc_bias_level level)
710{ 667{
711 u16 pwr_reg = wm8988_read_reg_cache(codec, WM8988_PWR1) & ~0x1c1; 668 u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1;
712 669
713 switch (level) { 670 switch (level) {
714 case SND_SOC_BIAS_ON: 671 case SND_SOC_BIAS_ON:
@@ -716,24 +673,24 @@ static int wm8988_set_bias_level(struct snd_soc_codec *codec,
716 673
717 case SND_SOC_BIAS_PREPARE: 674 case SND_SOC_BIAS_PREPARE:
718 /* VREF, VMID=2x50k, digital enabled */ 675 /* VREF, VMID=2x50k, digital enabled */
719 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x00c0); 676 snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0);
720 break; 677 break;
721 678
722 case SND_SOC_BIAS_STANDBY: 679 case SND_SOC_BIAS_STANDBY:
723 if (codec->bias_level == SND_SOC_BIAS_OFF) { 680 if (codec->bias_level == SND_SOC_BIAS_OFF) {
724 /* VREF, VMID=2x5k */ 681 /* VREF, VMID=2x5k */
725 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x1c1); 682 snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
726 683
727 /* Charge caps */ 684 /* Charge caps */
728 msleep(100); 685 msleep(100);
729 } 686 }
730 687
731 /* VREF, VMID=2*500k, digital stopped */ 688 /* VREF, VMID=2*500k, digital stopped */
732 wm8988_write(codec, WM8988_PWR1, pwr_reg | 0x0141); 689 snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141);
733 break; 690 break;
734 691
735 case SND_SOC_BIAS_OFF: 692 case SND_SOC_BIAS_OFF:
736 wm8988_write(codec, WM8988_PWR1, 0x0000); 693 snd_soc_write(codec, WM8988_PWR1, 0x0000);
737 break; 694 break;
738 } 695 }
739 codec->bias_level = level; 696 codec->bias_level = level;
@@ -868,7 +825,8 @@ struct snd_soc_codec_device soc_codec_dev_wm8988 = {
868}; 825};
869EXPORT_SYMBOL_GPL(soc_codec_dev_wm8988); 826EXPORT_SYMBOL_GPL(soc_codec_dev_wm8988);
870 827
871static int wm8988_register(struct wm8988_priv *wm8988) 828static int wm8988_register(struct wm8988_priv *wm8988,
829 enum snd_soc_control_type control)
872{ 830{
873 struct snd_soc_codec *codec = &wm8988->codec; 831 struct snd_soc_codec *codec = &wm8988->codec;
874 int ret; 832 int ret;
@@ -887,8 +845,6 @@ static int wm8988_register(struct wm8988_priv *wm8988)
887 codec->private_data = wm8988; 845 codec->private_data = wm8988;
888 codec->name = "WM8988"; 846 codec->name = "WM8988";
889 codec->owner = THIS_MODULE; 847 codec->owner = THIS_MODULE;
890 codec->read = wm8988_read_reg_cache;
891 codec->write = wm8988_write;
892 codec->dai = &wm8988_dai; 848 codec->dai = &wm8988_dai;
893 codec->num_dai = 1; 849 codec->num_dai = 1;
894 codec->reg_cache_size = ARRAY_SIZE(wm8988->reg_cache); 850 codec->reg_cache_size = ARRAY_SIZE(wm8988->reg_cache);
@@ -899,23 +855,29 @@ static int wm8988_register(struct wm8988_priv *wm8988)
899 memcpy(codec->reg_cache, wm8988_reg, 855 memcpy(codec->reg_cache, wm8988_reg,
900 sizeof(wm8988_reg)); 856 sizeof(wm8988_reg));
901 857
858 ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
859 if (ret < 0) {
860 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
861 goto err;
862 }
863
902 ret = wm8988_reset(codec); 864 ret = wm8988_reset(codec);
903 if (ret < 0) { 865 if (ret < 0) {
904 dev_err(codec->dev, "Failed to issue reset\n"); 866 dev_err(codec->dev, "Failed to issue reset\n");
905 return ret; 867 goto err;
906 } 868 }
907 869
908 /* set the update bits (we always update left then right) */ 870 /* set the update bits (we always update left then right) */
909 reg = wm8988_read_reg_cache(codec, WM8988_RADC); 871 reg = snd_soc_read(codec, WM8988_RADC);
910 wm8988_write(codec, WM8988_RADC, reg | 0x100); 872 snd_soc_write(codec, WM8988_RADC, reg | 0x100);
911 reg = wm8988_read_reg_cache(codec, WM8988_RDAC); 873 reg = snd_soc_read(codec, WM8988_RDAC);
912 wm8988_write(codec, WM8988_RDAC, reg | 0x0100); 874 snd_soc_write(codec, WM8988_RDAC, reg | 0x0100);
913 reg = wm8988_read_reg_cache(codec, WM8988_ROUT1V); 875 reg = snd_soc_read(codec, WM8988_ROUT1V);
914 wm8988_write(codec, WM8988_ROUT1V, reg | 0x0100); 876 snd_soc_write(codec, WM8988_ROUT1V, reg | 0x0100);
915 reg = wm8988_read_reg_cache(codec, WM8988_ROUT2V); 877 reg = snd_soc_read(codec, WM8988_ROUT2V);
916 wm8988_write(codec, WM8988_ROUT2V, reg | 0x0100); 878 snd_soc_write(codec, WM8988_ROUT2V, reg | 0x0100);
917 reg = wm8988_read_reg_cache(codec, WM8988_RINVOL); 879 reg = snd_soc_read(codec, WM8988_RINVOL);
918 wm8988_write(codec, WM8988_RINVOL, reg | 0x0100); 880 snd_soc_write(codec, WM8988_RINVOL, reg | 0x0100);
919 881
920 wm8988_set_bias_level(&wm8988->codec, SND_SOC_BIAS_STANDBY); 882 wm8988_set_bias_level(&wm8988->codec, SND_SOC_BIAS_STANDBY);
921 883
@@ -926,18 +888,20 @@ static int wm8988_register(struct wm8988_priv *wm8988)
926 ret = snd_soc_register_codec(codec); 888 ret = snd_soc_register_codec(codec);
927 if (ret != 0) { 889 if (ret != 0) {
928 dev_err(codec->dev, "Failed to register codec: %d\n", ret); 890 dev_err(codec->dev, "Failed to register codec: %d\n", ret);
929 return ret; 891 goto err;
930 } 892 }
931 893
932 ret = snd_soc_register_dai(&wm8988_dai); 894 ret = snd_soc_register_dai(&wm8988_dai);
933 if (ret != 0) { 895 if (ret != 0) {
934 dev_err(codec->dev, "Failed to register DAI: %d\n", ret); 896 dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
935 snd_soc_unregister_codec(codec); 897 snd_soc_unregister_codec(codec);
936 return ret; 898 goto err_codec;
937 } 899 }
938 900
939 return 0; 901 return 0;
940 902
903err_codec:
904 snd_soc_unregister_codec(codec);
941err: 905err:
942 kfree(wm8988); 906 kfree(wm8988);
943 return ret; 907 return ret;
@@ -964,14 +928,13 @@ static int wm8988_i2c_probe(struct i2c_client *i2c,
964 return -ENOMEM; 928 return -ENOMEM;
965 929
966 codec = &wm8988->codec; 930 codec = &wm8988->codec;
967 codec->hw_write = (hw_write_t)i2c_master_send;
968 931
969 i2c_set_clientdata(i2c, wm8988); 932 i2c_set_clientdata(i2c, wm8988);
970 codec->control_data = i2c; 933 codec->control_data = i2c;
971 934
972 codec->dev = &i2c->dev; 935 codec->dev = &i2c->dev;
973 936
974 return wm8988_register(wm8988); 937 return wm8988_register(wm8988, SND_SOC_I2C);
975} 938}
976 939
977static int wm8988_i2c_remove(struct i2c_client *client) 940static int wm8988_i2c_remove(struct i2c_client *client)
@@ -981,6 +944,21 @@ static int wm8988_i2c_remove(struct i2c_client *client)
981 return 0; 944 return 0;
982} 945}
983 946
947#ifdef CONFIG_PM
948static int wm8988_i2c_suspend(struct i2c_client *client, pm_message_t msg)
949{
950 return snd_soc_suspend_device(&client->dev);
951}
952
953static int wm8988_i2c_resume(struct i2c_client *client)
954{
955 return snd_soc_resume_device(&client->dev);
956}
957#else
958#define wm8988_i2c_suspend NULL
959#define wm8988_i2c_resume NULL
960#endif
961
984static const struct i2c_device_id wm8988_i2c_id[] = { 962static const struct i2c_device_id wm8988_i2c_id[] = {
985 { "wm8988", 0 }, 963 { "wm8988", 0 },
986 { } 964 { }
@@ -994,35 +972,13 @@ static struct i2c_driver wm8988_i2c_driver = {
994 }, 972 },
995 .probe = wm8988_i2c_probe, 973 .probe = wm8988_i2c_probe,
996 .remove = wm8988_i2c_remove, 974 .remove = wm8988_i2c_remove,
975 .suspend = wm8988_i2c_suspend,
976 .resume = wm8988_i2c_resume,
997 .id_table = wm8988_i2c_id, 977 .id_table = wm8988_i2c_id,
998}; 978};
999#endif 979#endif
1000 980
1001#if defined(CONFIG_SPI_MASTER) 981#if defined(CONFIG_SPI_MASTER)
1002static int wm8988_spi_write(struct spi_device *spi, const char *data, int len)
1003{
1004 struct spi_transfer t;
1005 struct spi_message m;
1006 u8 msg[2];
1007
1008 if (len <= 0)
1009 return 0;
1010
1011 msg[0] = data[0];
1012 msg[1] = data[1];
1013
1014 spi_message_init(&m);
1015 memset(&t, 0, (sizeof t));
1016
1017 t.tx_buf = &msg[0];
1018 t.len = len;
1019
1020 spi_message_add_tail(&t, &m);
1021 spi_sync(spi, &m);
1022
1023 return len;
1024}
1025
1026static int __devinit wm8988_spi_probe(struct spi_device *spi) 982static int __devinit wm8988_spi_probe(struct spi_device *spi)
1027{ 983{
1028 struct wm8988_priv *wm8988; 984 struct wm8988_priv *wm8988;
@@ -1033,13 +989,12 @@ static int __devinit wm8988_spi_probe(struct spi_device *spi)
1033 return -ENOMEM; 989 return -ENOMEM;
1034 990
1035 codec = &wm8988->codec; 991 codec = &wm8988->codec;
1036 codec->hw_write = (hw_write_t)wm8988_spi_write;
1037 codec->control_data = spi; 992 codec->control_data = spi;
1038 codec->dev = &spi->dev; 993 codec->dev = &spi->dev;
1039 994
1040 dev_set_drvdata(&spi->dev, wm8988); 995 dev_set_drvdata(&spi->dev, wm8988);
1041 996
1042 return wm8988_register(wm8988); 997 return wm8988_register(wm8988, SND_SOC_SPI);
1043} 998}
1044 999
1045static int __devexit wm8988_spi_remove(struct spi_device *spi) 1000static int __devexit wm8988_spi_remove(struct spi_device *spi)
@@ -1051,6 +1006,21 @@ static int __devexit wm8988_spi_remove(struct spi_device *spi)
1051 return 0; 1006 return 0;
1052} 1007}
1053 1008
1009#ifdef CONFIG_PM
1010static int wm8988_spi_suspend(struct spi_device *spi, pm_message_t msg)
1011{
1012 return snd_soc_suspend_device(&spi->dev);
1013}
1014
1015static int wm8988_spi_resume(struct spi_device *spi)
1016{
1017 return snd_soc_resume_device(&spi->dev);
1018}
1019#else
1020#define wm8988_spi_suspend NULL
1021#define wm8988_spi_resume NULL
1022#endif
1023
1054static struct spi_driver wm8988_spi_driver = { 1024static struct spi_driver wm8988_spi_driver = {
1055 .driver = { 1025 .driver = {
1056 .name = "wm8988", 1026 .name = "wm8988",
@@ -1059,6 +1029,8 @@ static struct spi_driver wm8988_spi_driver = {
1059 }, 1029 },
1060 .probe = wm8988_spi_probe, 1030 .probe = wm8988_spi_probe,
1061 .remove = __devexit_p(wm8988_spi_remove), 1031 .remove = __devexit_p(wm8988_spi_remove),
1032 .suspend = wm8988_spi_suspend,
1033 .resume = wm8988_spi_resume,
1062}; 1034};
1063#endif 1035#endif
1064 1036
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index d029818350e9..2d702db4131d 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -108,53 +108,7 @@ static const u16 wm8990_reg[] = {
108 0x0000, /* R63 - Driver internal */ 108 0x0000, /* R63 - Driver internal */
109}; 109};
110 110
111/* 111#define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0)
112 * read wm8990 register cache
113 */
114static inline unsigned int wm8990_read_reg_cache(struct snd_soc_codec *codec,
115 unsigned int reg)
116{
117 u16 *cache = codec->reg_cache;
118 BUG_ON(reg >= ARRAY_SIZE(wm8990_reg));
119 return cache[reg];
120}
121
122/*
123 * write wm8990 register cache
124 */
125static inline void wm8990_write_reg_cache(struct snd_soc_codec *codec,
126 unsigned int reg, unsigned int value)
127{
128 u16 *cache = codec->reg_cache;
129
130 /* Reset register and reserved registers are uncached */
131 if (reg == 0 || reg >= ARRAY_SIZE(wm8990_reg))
132 return;
133
134 cache[reg] = value;
135}
136
137/*
138 * write to the wm8990 register space
139 */
140static int wm8990_write(struct snd_soc_codec *codec, unsigned int reg,
141 unsigned int value)
142{
143 u8 data[3];
144
145 data[0] = reg & 0xFF;
146 data[1] = (value >> 8) & 0xFF;
147 data[2] = value & 0xFF;
148
149 wm8990_write_reg_cache(codec, reg, value);
150
151 if (codec->hw_write(codec->control_data, data, 3) == 2)
152 return 0;
153 else
154 return -EIO;
155}
156
157#define wm8990_reset(c) wm8990_write(c, WM8990_RESET, 0)
158 112
159static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); 113static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
160 114
@@ -187,8 +141,8 @@ static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
187 return ret; 141 return ret;
188 142
189 /* now hit the volume update bits (always bit 8) */ 143 /* now hit the volume update bits (always bit 8) */
190 val = wm8990_read_reg_cache(codec, reg); 144 val = snd_soc_read(codec, reg);
191 return wm8990_write(codec, reg, val | 0x0100); 145 return snd_soc_write(codec, reg, val | 0x0100);
192} 146}
193 147
194#define SOC_WM899X_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert,\ 148#define SOC_WM899X_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert,\
@@ -427,8 +381,8 @@ static int inmixer_event(struct snd_soc_dapm_widget *w,
427{ 381{
428 u16 reg, fakepower; 382 u16 reg, fakepower;
429 383
430 reg = wm8990_read_reg_cache(w->codec, WM8990_POWER_MANAGEMENT_2); 384 reg = snd_soc_read(w->codec, WM8990_POWER_MANAGEMENT_2);
431 fakepower = wm8990_read_reg_cache(w->codec, WM8990_INTDRIVBITS); 385 fakepower = snd_soc_read(w->codec, WM8990_INTDRIVBITS);
432 386
433 if (fakepower & ((1 << WM8990_INMIXL_PWR_BIT) | 387 if (fakepower & ((1 << WM8990_INMIXL_PWR_BIT) |
434 (1 << WM8990_AINLMUX_PWR_BIT))) { 388 (1 << WM8990_AINLMUX_PWR_BIT))) {
@@ -443,7 +397,7 @@ static int inmixer_event(struct snd_soc_dapm_widget *w,
443 } else { 397 } else {
444 reg &= ~WM8990_AINL_ENA; 398 reg &= ~WM8990_AINL_ENA;
445 } 399 }
446 wm8990_write(w->codec, WM8990_POWER_MANAGEMENT_2, reg); 400 snd_soc_write(w->codec, WM8990_POWER_MANAGEMENT_2, reg);
447 401
448 return 0; 402 return 0;
449} 403}
@@ -457,7 +411,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
457 411
458 switch (reg_shift) { 412 switch (reg_shift) {
459 case WM8990_SPEAKER_MIXER | (WM8990_LDSPK_BIT << 8) : 413 case WM8990_SPEAKER_MIXER | (WM8990_LDSPK_BIT << 8) :
460 reg = wm8990_read_reg_cache(w->codec, WM8990_OUTPUT_MIXER1); 414 reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER1);
461 if (reg & WM8990_LDLO) { 415 if (reg & WM8990_LDLO) {
462 printk(KERN_WARNING 416 printk(KERN_WARNING
463 "Cannot set as Output Mixer 1 LDLO Set\n"); 417 "Cannot set as Output Mixer 1 LDLO Set\n");
@@ -465,7 +419,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
465 } 419 }
466 break; 420 break;
467 case WM8990_SPEAKER_MIXER | (WM8990_RDSPK_BIT << 8): 421 case WM8990_SPEAKER_MIXER | (WM8990_RDSPK_BIT << 8):
468 reg = wm8990_read_reg_cache(w->codec, WM8990_OUTPUT_MIXER2); 422 reg = snd_soc_read(w->codec, WM8990_OUTPUT_MIXER2);
469 if (reg & WM8990_RDRO) { 423 if (reg & WM8990_RDRO) {
470 printk(KERN_WARNING 424 printk(KERN_WARNING
471 "Cannot set as Output Mixer 2 RDRO Set\n"); 425 "Cannot set as Output Mixer 2 RDRO Set\n");
@@ -473,7 +427,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
473 } 427 }
474 break; 428 break;
475 case WM8990_OUTPUT_MIXER1 | (WM8990_LDLO_BIT << 8): 429 case WM8990_OUTPUT_MIXER1 | (WM8990_LDLO_BIT << 8):
476 reg = wm8990_read_reg_cache(w->codec, WM8990_SPEAKER_MIXER); 430 reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
477 if (reg & WM8990_LDSPK) { 431 if (reg & WM8990_LDSPK) {
478 printk(KERN_WARNING 432 printk(KERN_WARNING
479 "Cannot set as Speaker Mixer LDSPK Set\n"); 433 "Cannot set as Speaker Mixer LDSPK Set\n");
@@ -481,7 +435,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w,
481 } 435 }
482 break; 436 break;
483 case WM8990_OUTPUT_MIXER2 | (WM8990_RDRO_BIT << 8): 437 case WM8990_OUTPUT_MIXER2 | (WM8990_RDRO_BIT << 8):
484 reg = wm8990_read_reg_cache(w->codec, WM8990_SPEAKER_MIXER); 438 reg = snd_soc_read(w->codec, WM8990_SPEAKER_MIXER);
485 if (reg & WM8990_RDSPK) { 439 if (reg & WM8990_RDSPK) {
486 printk(KERN_WARNING 440 printk(KERN_WARNING
487 "Cannot set as Speaker Mixer RDSPK Set\n"); 441 "Cannot set as Speaker Mixer RDSPK Set\n");
@@ -1029,24 +983,24 @@ static int wm8990_set_dai_pll(struct snd_soc_dai *codec_dai,
1029 pll_factors(&pll_div, freq_out * 4, freq_in); 983 pll_factors(&pll_div, freq_out * 4, freq_in);
1030 984
1031 /* Turn on PLL */ 985 /* Turn on PLL */
1032 reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2); 986 reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
1033 reg |= WM8990_PLL_ENA; 987 reg |= WM8990_PLL_ENA;
1034 wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg); 988 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
1035 989
1036 /* sysclk comes from PLL */ 990 /* sysclk comes from PLL */
1037 reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2); 991 reg = snd_soc_read(codec, WM8990_CLOCKING_2);
1038 wm8990_write(codec, WM8990_CLOCKING_2, reg | WM8990_SYSCLK_SRC); 992 snd_soc_write(codec, WM8990_CLOCKING_2, reg | WM8990_SYSCLK_SRC);
1039 993
1040 /* set up N , fractional mode and pre-divisor if neccessary */ 994 /* set up N , fractional mode and pre-divisor if neccessary */
1041 wm8990_write(codec, WM8990_PLL1, pll_div.n | WM8990_SDM | 995 snd_soc_write(codec, WM8990_PLL1, pll_div.n | WM8990_SDM |
1042 (pll_div.div2?WM8990_PRESCALE:0)); 996 (pll_div.div2?WM8990_PRESCALE:0));
1043 wm8990_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8)); 997 snd_soc_write(codec, WM8990_PLL2, (u8)(pll_div.k>>8));
1044 wm8990_write(codec, WM8990_PLL3, (u8)(pll_div.k & 0xFF)); 998 snd_soc_write(codec, WM8990_PLL3, (u8)(pll_div.k & 0xFF));
1045 } else { 999 } else {
1046 /* Turn on PLL */ 1000 /* Turn on PLL */
1047 reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2); 1001 reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
1048 reg &= ~WM8990_PLL_ENA; 1002 reg &= ~WM8990_PLL_ENA;
1049 wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg); 1003 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg);
1050 } 1004 }
1051 return 0; 1005 return 0;
1052} 1006}
@@ -1073,8 +1027,8 @@ static int wm8990_set_dai_fmt(struct snd_soc_dai *codec_dai,
1073 struct snd_soc_codec *codec = codec_dai->codec; 1027 struct snd_soc_codec *codec = codec_dai->codec;
1074 u16 audio1, audio3; 1028 u16 audio1, audio3;
1075 1029
1076 audio1 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_1); 1030 audio1 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_1);
1077 audio3 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_3); 1031 audio3 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_3);
1078 1032
1079 /* set master/slave audio interface */ 1033 /* set master/slave audio interface */
1080 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 1034 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -1115,8 +1069,8 @@ static int wm8990_set_dai_fmt(struct snd_soc_dai *codec_dai,
1115 return -EINVAL; 1069 return -EINVAL;
1116 } 1070 }
1117 1071
1118 wm8990_write(codec, WM8990_AUDIO_INTERFACE_1, audio1); 1072 snd_soc_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
1119 wm8990_write(codec, WM8990_AUDIO_INTERFACE_3, audio3); 1073 snd_soc_write(codec, WM8990_AUDIO_INTERFACE_3, audio3);
1120 return 0; 1074 return 0;
1121} 1075}
1122 1076
@@ -1128,24 +1082,24 @@ static int wm8990_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
1128 1082
1129 switch (div_id) { 1083 switch (div_id) {
1130 case WM8990_MCLK_DIV: 1084 case WM8990_MCLK_DIV:
1131 reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) & 1085 reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
1132 ~WM8990_MCLK_DIV_MASK; 1086 ~WM8990_MCLK_DIV_MASK;
1133 wm8990_write(codec, WM8990_CLOCKING_2, reg | div); 1087 snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
1134 break; 1088 break;
1135 case WM8990_DACCLK_DIV: 1089 case WM8990_DACCLK_DIV:
1136 reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) & 1090 reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
1137 ~WM8990_DAC_CLKDIV_MASK; 1091 ~WM8990_DAC_CLKDIV_MASK;
1138 wm8990_write(codec, WM8990_CLOCKING_2, reg | div); 1092 snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
1139 break; 1093 break;
1140 case WM8990_ADCCLK_DIV: 1094 case WM8990_ADCCLK_DIV:
1141 reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_2) & 1095 reg = snd_soc_read(codec, WM8990_CLOCKING_2) &
1142 ~WM8990_ADC_CLKDIV_MASK; 1096 ~WM8990_ADC_CLKDIV_MASK;
1143 wm8990_write(codec, WM8990_CLOCKING_2, reg | div); 1097 snd_soc_write(codec, WM8990_CLOCKING_2, reg | div);
1144 break; 1098 break;
1145 case WM8990_BCLK_DIV: 1099 case WM8990_BCLK_DIV:
1146 reg = wm8990_read_reg_cache(codec, WM8990_CLOCKING_1) & 1100 reg = snd_soc_read(codec, WM8990_CLOCKING_1) &
1147 ~WM8990_BCLK_DIV_MASK; 1101 ~WM8990_BCLK_DIV_MASK;
1148 wm8990_write(codec, WM8990_CLOCKING_1, reg | div); 1102 snd_soc_write(codec, WM8990_CLOCKING_1, reg | div);
1149 break; 1103 break;
1150 default: 1104 default:
1151 return -EINVAL; 1105 return -EINVAL;
@@ -1164,7 +1118,7 @@ static int wm8990_hw_params(struct snd_pcm_substream *substream,
1164 struct snd_soc_pcm_runtime *rtd = substream->private_data; 1118 struct snd_soc_pcm_runtime *rtd = substream->private_data;
1165 struct snd_soc_device *socdev = rtd->socdev; 1119 struct snd_soc_device *socdev = rtd->socdev;
1166 struct snd_soc_codec *codec = socdev->card->codec; 1120 struct snd_soc_codec *codec = socdev->card->codec;
1167 u16 audio1 = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_1); 1121 u16 audio1 = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_1);
1168 1122
1169 audio1 &= ~WM8990_AIF_WL_MASK; 1123 audio1 &= ~WM8990_AIF_WL_MASK;
1170 /* bit size */ 1124 /* bit size */
@@ -1182,7 +1136,7 @@ static int wm8990_hw_params(struct snd_pcm_substream *substream,
1182 break; 1136 break;
1183 } 1137 }
1184 1138
1185 wm8990_write(codec, WM8990_AUDIO_INTERFACE_1, audio1); 1139 snd_soc_write(codec, WM8990_AUDIO_INTERFACE_1, audio1);
1186 return 0; 1140 return 0;
1187} 1141}
1188 1142
@@ -1191,12 +1145,12 @@ static int wm8990_mute(struct snd_soc_dai *dai, int mute)
1191 struct snd_soc_codec *codec = dai->codec; 1145 struct snd_soc_codec *codec = dai->codec;
1192 u16 val; 1146 u16 val;
1193 1147
1194 val = wm8990_read_reg_cache(codec, WM8990_DAC_CTRL) & ~WM8990_DAC_MUTE; 1148 val = snd_soc_read(codec, WM8990_DAC_CTRL) & ~WM8990_DAC_MUTE;
1195 1149
1196 if (mute) 1150 if (mute)
1197 wm8990_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE); 1151 snd_soc_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
1198 else 1152 else
1199 wm8990_write(codec, WM8990_DAC_CTRL, val); 1153 snd_soc_write(codec, WM8990_DAC_CTRL, val);
1200 1154
1201 return 0; 1155 return 0;
1202} 1156}
@@ -1212,21 +1166,21 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
1212 1166
1213 case SND_SOC_BIAS_PREPARE: 1167 case SND_SOC_BIAS_PREPARE:
1214 /* VMID=2*50k */ 1168 /* VMID=2*50k */
1215 val = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_1) & 1169 val = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_1) &
1216 ~WM8990_VMID_MODE_MASK; 1170 ~WM8990_VMID_MODE_MASK;
1217 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x2); 1171 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x2);
1218 break; 1172 break;
1219 1173
1220 case SND_SOC_BIAS_STANDBY: 1174 case SND_SOC_BIAS_STANDBY:
1221 if (codec->bias_level == SND_SOC_BIAS_OFF) { 1175 if (codec->bias_level == SND_SOC_BIAS_OFF) {
1222 /* Enable all output discharge bits */ 1176 /* Enable all output discharge bits */
1223 wm8990_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE | 1177 snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
1224 WM8990_DIS_RLINE | WM8990_DIS_OUT3 | 1178 WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
1225 WM8990_DIS_OUT4 | WM8990_DIS_LOUT | 1179 WM8990_DIS_OUT4 | WM8990_DIS_LOUT |
1226 WM8990_DIS_ROUT); 1180 WM8990_DIS_ROUT);
1227 1181
1228 /* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */ 1182 /* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */
1229 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | 1183 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
1230 WM8990_BUFDCOPEN | WM8990_POBCTRL | 1184 WM8990_BUFDCOPEN | WM8990_POBCTRL |
1231 WM8990_VMIDTOG); 1185 WM8990_VMIDTOG);
1232 1186
@@ -1234,83 +1188,83 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec,
1234 msleep(msecs_to_jiffies(300)); 1188 msleep(msecs_to_jiffies(300));
1235 1189
1236 /* Disable VMIDTOG */ 1190 /* Disable VMIDTOG */
1237 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | 1191 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
1238 WM8990_BUFDCOPEN | WM8990_POBCTRL); 1192 WM8990_BUFDCOPEN | WM8990_POBCTRL);
1239 1193
1240 /* disable all output discharge bits */ 1194 /* disable all output discharge bits */
1241 wm8990_write(codec, WM8990_ANTIPOP1, 0); 1195 snd_soc_write(codec, WM8990_ANTIPOP1, 0);
1242 1196
1243 /* Enable outputs */ 1197 /* Enable outputs */
1244 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00); 1198 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00);
1245 1199
1246 msleep(msecs_to_jiffies(50)); 1200 msleep(msecs_to_jiffies(50));
1247 1201
1248 /* Enable VMID at 2x50k */ 1202 /* Enable VMID at 2x50k */
1249 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02); 1203 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02);
1250 1204
1251 msleep(msecs_to_jiffies(100)); 1205 msleep(msecs_to_jiffies(100));
1252 1206
1253 /* Enable VREF */ 1207 /* Enable VREF */
1254 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03); 1208 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
1255 1209
1256 msleep(msecs_to_jiffies(600)); 1210 msleep(msecs_to_jiffies(600));
1257 1211
1258 /* Enable BUFIOEN */ 1212 /* Enable BUFIOEN */
1259 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | 1213 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
1260 WM8990_BUFDCOPEN | WM8990_POBCTRL | 1214 WM8990_BUFDCOPEN | WM8990_POBCTRL |
1261 WM8990_BUFIOEN); 1215 WM8990_BUFIOEN);
1262 1216
1263 /* Disable outputs */ 1217 /* Disable outputs */
1264 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x3); 1218 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x3);
1265 1219
1266 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ 1220 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
1267 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_BUFIOEN); 1221 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_BUFIOEN);
1268 1222
1269 /* Enable workaround for ADC clocking issue. */ 1223 /* Enable workaround for ADC clocking issue. */
1270 wm8990_write(codec, WM8990_EXT_ACCESS_ENA, 0x2); 1224 snd_soc_write(codec, WM8990_EXT_ACCESS_ENA, 0x2);
1271 wm8990_write(codec, WM8990_EXT_CTL1, 0xa003); 1225 snd_soc_write(codec, WM8990_EXT_CTL1, 0xa003);
1272 wm8990_write(codec, WM8990_EXT_ACCESS_ENA, 0); 1226 snd_soc_write(codec, WM8990_EXT_ACCESS_ENA, 0);
1273 } 1227 }
1274 1228
1275 /* VMID=2*250k */ 1229 /* VMID=2*250k */
1276 val = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_1) & 1230 val = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_1) &
1277 ~WM8990_VMID_MODE_MASK; 1231 ~WM8990_VMID_MODE_MASK;
1278 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x4); 1232 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, val | 0x4);
1279 break; 1233 break;
1280 1234
1281 case SND_SOC_BIAS_OFF: 1235 case SND_SOC_BIAS_OFF:
1282 /* Enable POBCTRL and SOFT_ST */ 1236 /* Enable POBCTRL and SOFT_ST */
1283 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | 1237 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
1284 WM8990_POBCTRL | WM8990_BUFIOEN); 1238 WM8990_POBCTRL | WM8990_BUFIOEN);
1285 1239
1286 /* Enable POBCTRL, SOFT_ST and BUFDCOPEN */ 1240 /* Enable POBCTRL, SOFT_ST and BUFDCOPEN */
1287 wm8990_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | 1241 snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST |
1288 WM8990_BUFDCOPEN | WM8990_POBCTRL | 1242 WM8990_BUFDCOPEN | WM8990_POBCTRL |
1289 WM8990_BUFIOEN); 1243 WM8990_BUFIOEN);
1290 1244
1291 /* mute DAC */ 1245 /* mute DAC */
1292 val = wm8990_read_reg_cache(codec, WM8990_DAC_CTRL); 1246 val = snd_soc_read(codec, WM8990_DAC_CTRL);
1293 wm8990_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE); 1247 snd_soc_write(codec, WM8990_DAC_CTRL, val | WM8990_DAC_MUTE);
1294 1248
1295 /* Enable any disabled outputs */ 1249 /* Enable any disabled outputs */
1296 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03); 1250 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03);
1297 1251
1298 /* Disable VMID */ 1252 /* Disable VMID */
1299 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01); 1253 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01);
1300 1254
1301 msleep(msecs_to_jiffies(300)); 1255 msleep(msecs_to_jiffies(300));
1302 1256
1303 /* Enable all output discharge bits */ 1257 /* Enable all output discharge bits */
1304 wm8990_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE | 1258 snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
1305 WM8990_DIS_RLINE | WM8990_DIS_OUT3 | 1259 WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
1306 WM8990_DIS_OUT4 | WM8990_DIS_LOUT | 1260 WM8990_DIS_OUT4 | WM8990_DIS_LOUT |
1307 WM8990_DIS_ROUT); 1261 WM8990_DIS_ROUT);
1308 1262
1309 /* Disable VREF */ 1263 /* Disable VREF */
1310 wm8990_write(codec, WM8990_POWER_MANAGEMENT_1, 0x0); 1264 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x0);
1311 1265
1312 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ 1266 /* disable POBCTRL, SOFT_ST and BUFDCOPEN */
1313 wm8990_write(codec, WM8990_ANTIPOP2, 0x0); 1267 snd_soc_write(codec, WM8990_ANTIPOP2, 0x0);
1314 break; 1268 break;
1315 } 1269 }
1316 1270
@@ -1411,8 +1365,6 @@ static int wm8990_init(struct snd_soc_device *socdev)
1411 1365
1412 codec->name = "WM8990"; 1366 codec->name = "WM8990";
1413 codec->owner = THIS_MODULE; 1367 codec->owner = THIS_MODULE;
1414 codec->read = wm8990_read_reg_cache;
1415 codec->write = wm8990_write;
1416 codec->set_bias_level = wm8990_set_bias_level; 1368 codec->set_bias_level = wm8990_set_bias_level;
1417 codec->dai = &wm8990_dai; 1369 codec->dai = &wm8990_dai;
1418 codec->num_dai = 2; 1370 codec->num_dai = 2;
@@ -1422,6 +1374,12 @@ static int wm8990_init(struct snd_soc_device *socdev)
1422 if (codec->reg_cache == NULL) 1374 if (codec->reg_cache == NULL)
1423 return -ENOMEM; 1375 return -ENOMEM;
1424 1376
1377 ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_I2C);
1378 if (ret < 0) {
1379 printk(KERN_ERR "wm8990: failed to set cache I/O: %d\n", ret);
1380 goto pcm_err;
1381 }
1382
1425 wm8990_reset(codec); 1383 wm8990_reset(codec);
1426 1384
1427 /* register pcms */ 1385 /* register pcms */
@@ -1435,18 +1393,18 @@ static int wm8990_init(struct snd_soc_device *socdev)
1435 codec->bias_level = SND_SOC_BIAS_OFF; 1393 codec->bias_level = SND_SOC_BIAS_OFF;
1436 wm8990_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1394 wm8990_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1437 1395
1438 reg = wm8990_read_reg_cache(codec, WM8990_AUDIO_INTERFACE_4); 1396 reg = snd_soc_read(codec, WM8990_AUDIO_INTERFACE_4);
1439 wm8990_write(codec, WM8990_AUDIO_INTERFACE_4, reg | WM8990_ALRCGPIO1); 1397 snd_soc_write(codec, WM8990_AUDIO_INTERFACE_4, reg | WM8990_ALRCGPIO1);
1440 1398
1441 reg = wm8990_read_reg_cache(codec, WM8990_GPIO1_GPIO2) & 1399 reg = snd_soc_read(codec, WM8990_GPIO1_GPIO2) &
1442 ~WM8990_GPIO1_SEL_MASK; 1400 ~WM8990_GPIO1_SEL_MASK;
1443 wm8990_write(codec, WM8990_GPIO1_GPIO2, reg | 1); 1401 snd_soc_write(codec, WM8990_GPIO1_GPIO2, reg | 1);
1444 1402
1445 reg = wm8990_read_reg_cache(codec, WM8990_POWER_MANAGEMENT_2); 1403 reg = snd_soc_read(codec, WM8990_POWER_MANAGEMENT_2);
1446 wm8990_write(codec, WM8990_POWER_MANAGEMENT_2, reg | WM8990_OPCLK_ENA); 1404 snd_soc_write(codec, WM8990_POWER_MANAGEMENT_2, reg | WM8990_OPCLK_ENA);
1447 1405
1448 wm8990_write(codec, WM8990_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8)); 1406 snd_soc_write(codec, WM8990_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8));
1449 wm8990_write(codec, WM8990_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8)); 1407 snd_soc_write(codec, WM8990_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8));
1450 1408
1451 snd_soc_add_controls(codec, wm8990_snd_controls, 1409 snd_soc_add_controls(codec, wm8990_snd_controls,
1452 ARRAY_SIZE(wm8990_snd_controls)); 1410 ARRAY_SIZE(wm8990_snd_controls));
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
new file mode 100644
index 000000000000..d9987999e92c
--- /dev/null
+++ b/sound/soc/codecs/wm8993.c
@@ -0,0 +1,1675 @@
1/*
2 * wm8993.c -- WM8993 ALSA SoC audio driver
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/pm.h>
18#include <linux/i2c.h>
19#include <linux/spi/spi.h>
20#include <sound/core.h>
21#include <sound/pcm.h>
22#include <sound/pcm_params.h>
23#include <sound/tlv.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26#include <sound/initval.h>
27#include <sound/wm8993.h>
28
29#include "wm8993.h"
30#include "wm_hubs.h"
31
32static u16 wm8993_reg_defaults[WM8993_REGISTER_COUNT] = {
33 0x8993, /* R0 - Software Reset */
34 0x0000, /* R1 - Power Management (1) */
35 0x6000, /* R2 - Power Management (2) */
36 0x0000, /* R3 - Power Management (3) */
37 0x4050, /* R4 - Audio Interface (1) */
38 0x4000, /* R5 - Audio Interface (2) */
39 0x01C8, /* R6 - Clocking 1 */
40 0x0000, /* R7 - Clocking 2 */
41 0x0000, /* R8 - Audio Interface (3) */
42 0x0040, /* R9 - Audio Interface (4) */
43 0x0004, /* R10 - DAC CTRL */
44 0x00C0, /* R11 - Left DAC Digital Volume */
45 0x00C0, /* R12 - Right DAC Digital Volume */
46 0x0000, /* R13 - Digital Side Tone */
47 0x0300, /* R14 - ADC CTRL */
48 0x00C0, /* R15 - Left ADC Digital Volume */
49 0x00C0, /* R16 - Right ADC Digital Volume */
50 0x0000, /* R17 */
51 0x0000, /* R18 - GPIO CTRL 1 */
52 0x0010, /* R19 - GPIO1 */
53 0x0000, /* R20 - IRQ_DEBOUNCE */
54 0x0000, /* R21 */
55 0x8000, /* R22 - GPIOCTRL 2 */
56 0x0800, /* R23 - GPIO_POL */
57 0x008B, /* R24 - Left Line Input 1&2 Volume */
58 0x008B, /* R25 - Left Line Input 3&4 Volume */
59 0x008B, /* R26 - Right Line Input 1&2 Volume */
60 0x008B, /* R27 - Right Line Input 3&4 Volume */
61 0x006D, /* R28 - Left Output Volume */
62 0x006D, /* R29 - Right Output Volume */
63 0x0066, /* R30 - Line Outputs Volume */
64 0x0020, /* R31 - HPOUT2 Volume */
65 0x0079, /* R32 - Left OPGA Volume */
66 0x0079, /* R33 - Right OPGA Volume */
67 0x0003, /* R34 - SPKMIXL Attenuation */
68 0x0003, /* R35 - SPKMIXR Attenuation */
69 0x0011, /* R36 - SPKOUT Mixers */
70 0x0100, /* R37 - SPKOUT Boost */
71 0x0079, /* R38 - Speaker Volume Left */
72 0x0079, /* R39 - Speaker Volume Right */
73 0x0000, /* R40 - Input Mixer2 */
74 0x0000, /* R41 - Input Mixer3 */
75 0x0000, /* R42 - Input Mixer4 */
76 0x0000, /* R43 - Input Mixer5 */
77 0x0000, /* R44 - Input Mixer6 */
78 0x0000, /* R45 - Output Mixer1 */
79 0x0000, /* R46 - Output Mixer2 */
80 0x0000, /* R47 - Output Mixer3 */
81 0x0000, /* R48 - Output Mixer4 */
82 0x0000, /* R49 - Output Mixer5 */
83 0x0000, /* R50 - Output Mixer6 */
84 0x0000, /* R51 - HPOUT2 Mixer */
85 0x0000, /* R52 - Line Mixer1 */
86 0x0000, /* R53 - Line Mixer2 */
87 0x0000, /* R54 - Speaker Mixer */
88 0x0000, /* R55 - Additional Control */
89 0x0000, /* R56 - AntiPOP1 */
90 0x0000, /* R57 - AntiPOP2 */
91 0x0000, /* R58 - MICBIAS */
92 0x0000, /* R59 */
93 0x0000, /* R60 - FLL Control 1 */
94 0x0000, /* R61 - FLL Control 2 */
95 0x0000, /* R62 - FLL Control 3 */
96 0x2EE0, /* R63 - FLL Control 4 */
97 0x0002, /* R64 - FLL Control 5 */
98 0x2287, /* R65 - Clocking 3 */
99 0x025F, /* R66 - Clocking 4 */
100 0x0000, /* R67 - MW Slave Control */
101 0x0000, /* R68 */
102 0x0002, /* R69 - Bus Control 1 */
103 0x0000, /* R70 - Write Sequencer 0 */
104 0x0000, /* R71 - Write Sequencer 1 */
105 0x0000, /* R72 - Write Sequencer 2 */
106 0x0000, /* R73 - Write Sequencer 3 */
107 0x0000, /* R74 - Write Sequencer 4 */
108 0x0000, /* R75 - Write Sequencer 5 */
109 0x1F25, /* R76 - Charge Pump 1 */
110 0x0000, /* R77 */
111 0x0000, /* R78 */
112 0x0000, /* R79 */
113 0x0000, /* R80 */
114 0x0000, /* R81 - Class W 0 */
115 0x0000, /* R82 */
116 0x0000, /* R83 */
117 0x0000, /* R84 - DC Servo 0 */
118 0x054A, /* R85 - DC Servo 1 */
119 0x0000, /* R86 */
120 0x0000, /* R87 - DC Servo 3 */
121 0x0000, /* R88 - DC Servo Readback 0 */
122 0x0000, /* R89 - DC Servo Readback 1 */
123 0x0000, /* R90 - DC Servo Readback 2 */
124 0x0000, /* R91 */
125 0x0000, /* R92 */
126 0x0000, /* R93 */
127 0x0000, /* R94 */
128 0x0000, /* R95 */
129 0x0100, /* R96 - Analogue HP 0 */
130 0x0000, /* R97 */
131 0x0000, /* R98 - EQ1 */
132 0x000C, /* R99 - EQ2 */
133 0x000C, /* R100 - EQ3 */
134 0x000C, /* R101 - EQ4 */
135 0x000C, /* R102 - EQ5 */
136 0x000C, /* R103 - EQ6 */
137 0x0FCA, /* R104 - EQ7 */
138 0x0400, /* R105 - EQ8 */
139 0x00D8, /* R106 - EQ9 */
140 0x1EB5, /* R107 - EQ10 */
141 0xF145, /* R108 - EQ11 */
142 0x0B75, /* R109 - EQ12 */
143 0x01C5, /* R110 - EQ13 */
144 0x1C58, /* R111 - EQ14 */
145 0xF373, /* R112 - EQ15 */
146 0x0A54, /* R113 - EQ16 */
147 0x0558, /* R114 - EQ17 */
148 0x168E, /* R115 - EQ18 */
149 0xF829, /* R116 - EQ19 */
150 0x07AD, /* R117 - EQ20 */
151 0x1103, /* R118 - EQ21 */
152 0x0564, /* R119 - EQ22 */
153 0x0559, /* R120 - EQ23 */
154 0x4000, /* R121 - EQ24 */
155 0x0000, /* R122 - Digital Pulls */
156 0x0F08, /* R123 - DRC Control 1 */
157 0x0000, /* R124 - DRC Control 2 */
158 0x0080, /* R125 - DRC Control 3 */
159 0x0000, /* R126 - DRC Control 4 */
160};
161
162static struct {
163 int ratio;
164 int clk_sys_rate;
165} clk_sys_rates[] = {
166 { 64, 0 },
167 { 128, 1 },
168 { 192, 2 },
169 { 256, 3 },
170 { 384, 4 },
171 { 512, 5 },
172 { 768, 6 },
173 { 1024, 7 },
174 { 1408, 8 },
175 { 1536, 9 },
176};
177
178static struct {
179 int rate;
180 int sample_rate;
181} sample_rates[] = {
182 { 8000, 0 },
183 { 11025, 1 },
184 { 12000, 1 },
185 { 16000, 2 },
186 { 22050, 3 },
187 { 24000, 3 },
188 { 32000, 4 },
189 { 44100, 5 },
190 { 48000, 5 },
191};
192
193static struct {
194 int div; /* *10 due to .5s */
195 int bclk_div;
196} bclk_divs[] = {
197 { 10, 0 },
198 { 15, 1 },
199 { 20, 2 },
200 { 30, 3 },
201 { 40, 4 },
202 { 55, 5 },
203 { 60, 6 },
204 { 80, 7 },
205 { 110, 8 },
206 { 120, 9 },
207 { 160, 10 },
208 { 220, 11 },
209 { 240, 12 },
210 { 320, 13 },
211 { 440, 14 },
212 { 480, 15 },
213};
214
215struct wm8993_priv {
216 u16 reg_cache[WM8993_REGISTER_COUNT];
217 struct wm8993_platform_data pdata;
218 struct snd_soc_codec codec;
219 int master;
220 int sysclk_source;
221 int tdm_slots;
222 int tdm_width;
223 unsigned int mclk_rate;
224 unsigned int sysclk_rate;
225 unsigned int fs;
226 unsigned int bclk;
227 int class_w_users;
228 unsigned int fll_fref;
229 unsigned int fll_fout;
230};
231
232static unsigned int wm8993_read_hw(struct snd_soc_codec *codec, u8 reg)
233{
234 struct i2c_msg xfer[2];
235 u16 data;
236 int ret;
237 struct i2c_client *i2c = codec->control_data;
238
239 /* Write register */
240 xfer[0].addr = i2c->addr;
241 xfer[0].flags = 0;
242 xfer[0].len = 1;
243 xfer[0].buf = &reg;
244
245 /* Read data */
246 xfer[1].addr = i2c->addr;
247 xfer[1].flags = I2C_M_RD;
248 xfer[1].len = 2;
249 xfer[1].buf = (u8 *)&data;
250
251 ret = i2c_transfer(i2c->adapter, xfer, 2);
252 if (ret != 2) {
253 dev_err(codec->dev, "Failed to read 0x%x: %d\n", reg, ret);
254 return 0;
255 }
256
257 return (data >> 8) | ((data & 0xff) << 8);
258}
259
260static int wm8993_volatile(unsigned int reg)
261{
262 switch (reg) {
263 case WM8993_SOFTWARE_RESET:
264 case WM8993_DC_SERVO_0:
265 case WM8993_DC_SERVO_READBACK_0:
266 case WM8993_DC_SERVO_READBACK_1:
267 case WM8993_DC_SERVO_READBACK_2:
268 return 1;
269 default:
270 return 0;
271 }
272}
273
274static unsigned int wm8993_read(struct snd_soc_codec *codec,
275 unsigned int reg)
276{
277 u16 *reg_cache = codec->reg_cache;
278
279 BUG_ON(reg > WM8993_MAX_REGISTER);
280
281 if (wm8993_volatile(reg))
282 return wm8993_read_hw(codec, reg);
283 else
284 return reg_cache[reg];
285}
286
287static int wm8993_write(struct snd_soc_codec *codec, unsigned int reg,
288 unsigned int value)
289{
290 u16 *reg_cache = codec->reg_cache;
291 u8 data[3];
292 int ret;
293
294 BUG_ON(reg > WM8993_MAX_REGISTER);
295
296 /* data is
297 * D15..D9 WM8993 register offset
298 * D8...D0 register data
299 */
300 data[0] = reg;
301 data[1] = value >> 8;
302 data[2] = value & 0x00ff;
303
304 if (!wm8993_volatile(reg))
305 reg_cache[reg] = value;
306
307 ret = codec->hw_write(codec->control_data, data, 3);
308
309 if (ret == 3)
310 return 0;
311 if (ret < 0)
312 return ret;
313 return -EIO;
314}
315
316struct _fll_div {
317 u16 fll_fratio;
318 u16 fll_outdiv;
319 u16 fll_clk_ref_div;
320 u16 n;
321 u16 k;
322};
323
324/* The size in bits of the FLL divide multiplied by 10
325 * to allow rounding later */
326#define FIXED_FLL_SIZE ((1 << 16) * 10)
327
328static struct {
329 unsigned int min;
330 unsigned int max;
331 u16 fll_fratio;
332 int ratio;
333} fll_fratios[] = {
334 { 0, 64000, 4, 16 },
335 { 64000, 128000, 3, 8 },
336 { 128000, 256000, 2, 4 },
337 { 256000, 1000000, 1, 2 },
338 { 1000000, 13500000, 0, 1 },
339};
340
341static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
342 unsigned int Fout)
343{
344 u64 Kpart;
345 unsigned int K, Ndiv, Nmod, target;
346 unsigned int div;
347 int i;
348
349 /* Fref must be <=13.5MHz */
350 div = 1;
351 fll_div->fll_clk_ref_div = 0;
352 while ((Fref / div) > 13500000) {
353 div *= 2;
354 fll_div->fll_clk_ref_div++;
355
356 if (div > 8) {
357 pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
358 Fref);
359 return -EINVAL;
360 }
361 }
362
363 pr_debug("Fref=%u Fout=%u\n", Fref, Fout);
364
365 /* Apply the division for our remaining calculations */
366 Fref /= div;
367
368 /* Fvco should be 90-100MHz; don't check the upper bound */
369 div = 0;
370 target = Fout * 2;
371 while (target < 90000000) {
372 div++;
373 target *= 2;
374 if (div > 7) {
375 pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
376 Fout);
377 return -EINVAL;
378 }
379 }
380 fll_div->fll_outdiv = div;
381
382 pr_debug("Fvco=%dHz\n", target);
383
384 /* Find an appropraite FLL_FRATIO and factor it out of the target */
385 for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
386 if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
387 fll_div->fll_fratio = fll_fratios[i].fll_fratio;
388 target /= fll_fratios[i].ratio;
389 break;
390 }
391 }
392 if (i == ARRAY_SIZE(fll_fratios)) {
393 pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
394 return -EINVAL;
395 }
396
397 /* Now, calculate N.K */
398 Ndiv = target / Fref;
399
400 fll_div->n = Ndiv;
401 Nmod = target % Fref;
402 pr_debug("Nmod=%d\n", Nmod);
403
404 /* Calculate fractional part - scale up so we can round. */
405 Kpart = FIXED_FLL_SIZE * (long long)Nmod;
406
407 do_div(Kpart, Fref);
408
409 K = Kpart & 0xFFFFFFFF;
410
411 if ((K % 10) >= 5)
412 K += 5;
413
414 /* Move down to proper range now rounding is done */
415 fll_div->k = K / 10;
416
417 pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n",
418 fll_div->n, fll_div->k,
419 fll_div->fll_fratio, fll_div->fll_outdiv,
420 fll_div->fll_clk_ref_div);
421
422 return 0;
423}
424
425static int wm8993_set_fll(struct snd_soc_dai *dai, int fll_id,
426 unsigned int Fref, unsigned int Fout)
427{
428 struct snd_soc_codec *codec = dai->codec;
429 struct wm8993_priv *wm8993 = codec->private_data;
430 u16 reg1, reg4, reg5;
431 struct _fll_div fll_div;
432 int ret;
433
434 /* Any change? */
435 if (Fref == wm8993->fll_fref && Fout == wm8993->fll_fout)
436 return 0;
437
438 /* Disable the FLL */
439 if (Fout == 0) {
440 dev_dbg(codec->dev, "FLL disabled\n");
441 wm8993->fll_fref = 0;
442 wm8993->fll_fout = 0;
443
444 reg1 = wm8993_read(codec, WM8993_FLL_CONTROL_1);
445 reg1 &= ~WM8993_FLL_ENA;
446 wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
447
448 return 0;
449 }
450
451 ret = fll_factors(&fll_div, Fref, Fout);
452 if (ret != 0)
453 return ret;
454
455 reg5 = wm8993_read(codec, WM8993_FLL_CONTROL_5);
456 reg5 &= ~WM8993_FLL_CLK_SRC_MASK;
457
458 switch (fll_id) {
459 case WM8993_FLL_MCLK:
460 break;
461
462 case WM8993_FLL_LRCLK:
463 reg5 |= 1;
464 break;
465
466 case WM8993_FLL_BCLK:
467 reg5 |= 2;
468 break;
469
470 default:
471 dev_err(codec->dev, "Unknown FLL ID %d\n", fll_id);
472 return -EINVAL;
473 }
474
475 /* Any FLL configuration change requires that the FLL be
476 * disabled first. */
477 reg1 = wm8993_read(codec, WM8993_FLL_CONTROL_1);
478 reg1 &= ~WM8993_FLL_ENA;
479 wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
480
481 /* Apply the configuration */
482 if (fll_div.k)
483 reg1 |= WM8993_FLL_FRAC_MASK;
484 else
485 reg1 &= ~WM8993_FLL_FRAC_MASK;
486 wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1);
487
488 wm8993_write(codec, WM8993_FLL_CONTROL_2,
489 (fll_div.fll_outdiv << WM8993_FLL_OUTDIV_SHIFT) |
490 (fll_div.fll_fratio << WM8993_FLL_FRATIO_SHIFT));
491 wm8993_write(codec, WM8993_FLL_CONTROL_3, fll_div.k);
492
493 reg4 = wm8993_read(codec, WM8993_FLL_CONTROL_4);
494 reg4 &= ~WM8993_FLL_N_MASK;
495 reg4 |= fll_div.n << WM8993_FLL_N_SHIFT;
496 wm8993_write(codec, WM8993_FLL_CONTROL_4, reg4);
497
498 reg5 &= ~WM8993_FLL_CLK_REF_DIV_MASK;
499 reg5 |= fll_div.fll_clk_ref_div << WM8993_FLL_CLK_REF_DIV_SHIFT;
500 wm8993_write(codec, WM8993_FLL_CONTROL_5, reg5);
501
502 /* Enable the FLL */
503 wm8993_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
504
505 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
506
507 wm8993->fll_fref = Fref;
508 wm8993->fll_fout = Fout;
509
510 return 0;
511}
512
513static int configure_clock(struct snd_soc_codec *codec)
514{
515 struct wm8993_priv *wm8993 = codec->private_data;
516 unsigned int reg;
517
518 /* This should be done on init() for bypass paths */
519 switch (wm8993->sysclk_source) {
520 case WM8993_SYSCLK_MCLK:
521 dev_dbg(codec->dev, "Using %dHz MCLK\n", wm8993->mclk_rate);
522
523 reg = wm8993_read(codec, WM8993_CLOCKING_2);
524 reg &= ~(WM8993_MCLK_DIV | WM8993_SYSCLK_SRC);
525 if (wm8993->mclk_rate > 13500000) {
526 reg |= WM8993_MCLK_DIV;
527 wm8993->sysclk_rate = wm8993->mclk_rate / 2;
528 } else {
529 reg &= ~WM8993_MCLK_DIV;
530 wm8993->sysclk_rate = wm8993->mclk_rate;
531 }
532 wm8993_write(codec, WM8993_CLOCKING_2, reg);
533 break;
534
535 case WM8993_SYSCLK_FLL:
536 dev_dbg(codec->dev, "Using %dHz FLL clock\n",
537 wm8993->fll_fout);
538
539 reg = wm8993_read(codec, WM8993_CLOCKING_2);
540 reg |= WM8993_SYSCLK_SRC;
541 if (wm8993->fll_fout > 13500000) {
542 reg |= WM8993_MCLK_DIV;
543 wm8993->sysclk_rate = wm8993->fll_fout / 2;
544 } else {
545 reg &= ~WM8993_MCLK_DIV;
546 wm8993->sysclk_rate = wm8993->fll_fout;
547 }
548 wm8993_write(codec, WM8993_CLOCKING_2, reg);
549 break;
550
551 default:
552 dev_err(codec->dev, "System clock not configured\n");
553 return -EINVAL;
554 }
555
556 dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm8993->sysclk_rate);
557
558 return 0;
559}
560
561static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
562static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
563static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
564static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
565static const unsigned int drc_max_tlv[] = {
566 TLV_DB_RANGE_HEAD(4),
567 0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
568 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
569};
570static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
571static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -1800, 300, 0);
572static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
573static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
574static const DECLARE_TLV_DB_SCALE(dac_boost_tlv, 0, 600, 0);
575
576static const char *dac_deemph_text[] = {
577 "None",
578 "32kHz",
579 "44.1kHz",
580 "48kHz",
581};
582
583static const struct soc_enum dac_deemph =
584 SOC_ENUM_SINGLE(WM8993_DAC_CTRL, 4, 4, dac_deemph_text);
585
586static const char *adc_hpf_text[] = {
587 "Hi-Fi",
588 "Voice 1",
589 "Voice 2",
590 "Voice 3",
591};
592
593static const struct soc_enum adc_hpf =
594 SOC_ENUM_SINGLE(WM8993_ADC_CTRL, 5, 4, adc_hpf_text);
595
596static const char *drc_path_text[] = {
597 "ADC",
598 "DAC"
599};
600
601static const struct soc_enum drc_path =
602 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_1, 14, 2, drc_path_text);
603
604static const char *drc_r0_text[] = {
605 "1",
606 "1/2",
607 "1/4",
608 "1/8",
609 "1/16",
610 "0",
611};
612
613static const struct soc_enum drc_r0 =
614 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 8, 6, drc_r0_text);
615
616static const char *drc_r1_text[] = {
617 "1",
618 "1/2",
619 "1/4",
620 "1/8",
621 "0",
622};
623
624static const struct soc_enum drc_r1 =
625 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_4, 13, 5, drc_r1_text);
626
627static const char *drc_attack_text[] = {
628 "Reserved",
629 "181us",
630 "363us",
631 "726us",
632 "1.45ms",
633 "2.9ms",
634 "5.8ms",
635 "11.6ms",
636 "23.2ms",
637 "46.4ms",
638 "92.8ms",
639 "185.6ms",
640};
641
642static const struct soc_enum drc_attack =
643 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_2, 12, 12, drc_attack_text);
644
645static const char *drc_decay_text[] = {
646 "186ms",
647 "372ms",
648 "743ms",
649 "1.49s",
650 "2.97ms",
651 "5.94ms",
652 "11.89ms",
653 "23.78ms",
654 "47.56ms",
655};
656
657static const struct soc_enum drc_decay =
658 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_2, 8, 9, drc_decay_text);
659
660static const char *drc_ff_text[] = {
661 "5 samples",
662 "9 samples",
663};
664
665static const struct soc_enum drc_ff =
666 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 7, 2, drc_ff_text);
667
668static const char *drc_qr_rate_text[] = {
669 "0.725ms",
670 "1.45ms",
671 "5.8ms",
672};
673
674static const struct soc_enum drc_qr_rate =
675 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_3, 0, 3, drc_qr_rate_text);
676
677static const char *drc_smooth_text[] = {
678 "Low",
679 "Medium",
680 "High",
681};
682
683static const struct soc_enum drc_smooth =
684 SOC_ENUM_SINGLE(WM8993_DRC_CONTROL_1, 4, 3, drc_smooth_text);
685
686static const struct snd_kcontrol_new wm8993_snd_controls[] = {
687SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8993_DIGITAL_SIDE_TONE,
688 5, 9, 12, 0, sidetone_tlv),
689
690SOC_SINGLE("DRC Switch", WM8993_DRC_CONTROL_1, 15, 1, 0),
691SOC_ENUM("DRC Path", drc_path),
692SOC_SINGLE_TLV("DRC Compressor Threashold Volume", WM8993_DRC_CONTROL_2,
693 2, 60, 1, drc_comp_threash),
694SOC_SINGLE_TLV("DRC Compressor Amplitude Volume", WM8993_DRC_CONTROL_3,
695 11, 30, 1, drc_comp_amp),
696SOC_ENUM("DRC R0", drc_r0),
697SOC_ENUM("DRC R1", drc_r1),
698SOC_SINGLE_TLV("DRC Minimum Volume", WM8993_DRC_CONTROL_1, 2, 3, 1,
699 drc_min_tlv),
700SOC_SINGLE_TLV("DRC Maximum Volume", WM8993_DRC_CONTROL_1, 0, 3, 0,
701 drc_max_tlv),
702SOC_ENUM("DRC Attack Rate", drc_attack),
703SOC_ENUM("DRC Decay Rate", drc_decay),
704SOC_ENUM("DRC FF Delay", drc_ff),
705SOC_SINGLE("DRC Anti-clip Switch", WM8993_DRC_CONTROL_1, 9, 1, 0),
706SOC_SINGLE("DRC Quick Release Switch", WM8993_DRC_CONTROL_1, 10, 1, 0),
707SOC_SINGLE_TLV("DRC Quick Release Volume", WM8993_DRC_CONTROL_3, 2, 3, 0,
708 drc_qr_tlv),
709SOC_ENUM("DRC Quick Release Rate", drc_qr_rate),
710SOC_SINGLE("DRC Smoothing Switch", WM8993_DRC_CONTROL_1, 11, 1, 0),
711SOC_SINGLE("DRC Smoothing Hysteresis Switch", WM8993_DRC_CONTROL_1, 8, 1, 0),
712SOC_ENUM("DRC Smoothing Hysteresis Threashold", drc_smooth),
713SOC_SINGLE_TLV("DRC Startup Volume", WM8993_DRC_CONTROL_4, 8, 18, 0,
714 drc_startup_tlv),
715
716SOC_SINGLE("EQ Switch", WM8993_EQ1, 0, 1, 0),
717
718SOC_DOUBLE_R_TLV("Capture Volume", WM8993_LEFT_ADC_DIGITAL_VOLUME,
719 WM8993_RIGHT_ADC_DIGITAL_VOLUME, 1, 96, 0, digital_tlv),
720SOC_SINGLE("ADC High Pass Filter Switch", WM8993_ADC_CTRL, 8, 1, 0),
721SOC_ENUM("ADC High Pass Filter Mode", adc_hpf),
722
723SOC_DOUBLE_R_TLV("Playback Volume", WM8993_LEFT_DAC_DIGITAL_VOLUME,
724 WM8993_RIGHT_DAC_DIGITAL_VOLUME, 1, 96, 0, digital_tlv),
725SOC_SINGLE_TLV("Playback Boost Volume", WM8993_AUDIO_INTERFACE_2, 10, 3, 0,
726 dac_boost_tlv),
727SOC_ENUM("DAC Deemphasis", dac_deemph),
728
729SOC_SINGLE_TLV("SPKL DAC Volume", WM8993_SPKMIXL_ATTENUATION,
730 2, 1, 1, wm_hubs_spkmix_tlv),
731
732SOC_SINGLE_TLV("SPKR DAC Volume", WM8993_SPKMIXR_ATTENUATION,
733 2, 1, 1, wm_hubs_spkmix_tlv),
734};
735
736static const struct snd_kcontrol_new wm8993_eq_controls[] = {
737SOC_SINGLE_TLV("EQ1 Volume", WM8993_EQ2, 0, 24, 0, eq_tlv),
738SOC_SINGLE_TLV("EQ2 Volume", WM8993_EQ3, 0, 24, 0, eq_tlv),
739SOC_SINGLE_TLV("EQ3 Volume", WM8993_EQ4, 0, 24, 0, eq_tlv),
740SOC_SINGLE_TLV("EQ4 Volume", WM8993_EQ5, 0, 24, 0, eq_tlv),
741SOC_SINGLE_TLV("EQ5 Volume", WM8993_EQ6, 0, 24, 0, eq_tlv),
742};
743
744static int clk_sys_event(struct snd_soc_dapm_widget *w,
745 struct snd_kcontrol *kcontrol, int event)
746{
747 struct snd_soc_codec *codec = w->codec;
748
749 switch (event) {
750 case SND_SOC_DAPM_PRE_PMU:
751 return configure_clock(codec);
752
753 case SND_SOC_DAPM_POST_PMD:
754 break;
755 }
756
757 return 0;
758}
759
760/*
761 * When used with DAC outputs only the WM8993 charge pump supports
762 * operation in class W mode, providing very low power consumption
763 * when used with digital sources. Enable and disable this mode
764 * automatically depending on the mixer configuration.
765 *
766 * Currently the only supported paths are the direct DAC->headphone
767 * paths (which provide minimum power consumption anyway).
768 */
769static int class_w_put(struct snd_kcontrol *kcontrol,
770 struct snd_ctl_elem_value *ucontrol)
771{
772 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
773 struct snd_soc_codec *codec = widget->codec;
774 struct wm8993_priv *wm8993 = codec->private_data;
775 int ret;
776
777 /* Turn it off if we're using the main output mixer */
778 if (ucontrol->value.integer.value[0] == 0) {
779 if (wm8993->class_w_users == 0) {
780 dev_dbg(codec->dev, "Disabling Class W\n");
781 snd_soc_update_bits(codec, WM8993_CLASS_W_0,
782 WM8993_CP_DYN_FREQ |
783 WM8993_CP_DYN_V,
784 0);
785 }
786 wm8993->class_w_users++;
787 }
788
789 /* Implement the change */
790 ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
791
792 /* Enable it if we're using the direct DAC path */
793 if (ucontrol->value.integer.value[0] == 1) {
794 if (wm8993->class_w_users == 1) {
795 dev_dbg(codec->dev, "Enabling Class W\n");
796 snd_soc_update_bits(codec, WM8993_CLASS_W_0,
797 WM8993_CP_DYN_FREQ |
798 WM8993_CP_DYN_V,
799 WM8993_CP_DYN_FREQ |
800 WM8993_CP_DYN_V);
801 }
802 wm8993->class_w_users--;
803 }
804
805 dev_dbg(codec->dev, "Indirect DAC use count now %d\n",
806 wm8993->class_w_users);
807
808 return ret;
809}
810
811#define SOC_DAPM_ENUM_W(xname, xenum) \
812{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
813 .info = snd_soc_info_enum_double, \
814 .get = snd_soc_dapm_get_enum_double, \
815 .put = class_w_put, \
816 .private_value = (unsigned long)&xenum }
817
818static const char *hp_mux_text[] = {
819 "Mixer",
820 "DAC",
821};
822
823static const struct soc_enum hpl_enum =
824 SOC_ENUM_SINGLE(WM8993_OUTPUT_MIXER1, 8, 2, hp_mux_text);
825
826static const struct snd_kcontrol_new hpl_mux =
827 SOC_DAPM_ENUM_W("Left Headphone Mux", hpl_enum);
828
829static const struct soc_enum hpr_enum =
830 SOC_ENUM_SINGLE(WM8993_OUTPUT_MIXER2, 8, 2, hp_mux_text);
831
832static const struct snd_kcontrol_new hpr_mux =
833 SOC_DAPM_ENUM_W("Right Headphone Mux", hpr_enum);
834
835static const struct snd_kcontrol_new left_speaker_mixer[] = {
836SOC_DAPM_SINGLE("Input Switch", WM8993_SPEAKER_MIXER, 7, 1, 0),
837SOC_DAPM_SINGLE("IN1LP Switch", WM8993_SPEAKER_MIXER, 5, 1, 0),
838SOC_DAPM_SINGLE("Output Switch", WM8993_SPEAKER_MIXER, 3, 1, 0),
839SOC_DAPM_SINGLE("DAC Switch", WM8993_SPEAKER_MIXER, 6, 1, 0),
840};
841
842static const struct snd_kcontrol_new right_speaker_mixer[] = {
843SOC_DAPM_SINGLE("Input Switch", WM8993_SPEAKER_MIXER, 6, 1, 0),
844SOC_DAPM_SINGLE("IN1RP Switch", WM8993_SPEAKER_MIXER, 4, 1, 0),
845SOC_DAPM_SINGLE("Output Switch", WM8993_SPEAKER_MIXER, 2, 1, 0),
846SOC_DAPM_SINGLE("DAC Switch", WM8993_SPEAKER_MIXER, 0, 1, 0),
847};
848
849static const char *aif_text[] = {
850 "Left", "Right"
851};
852
853static const struct soc_enum aifoutl_enum =
854 SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_1, 15, 2, aif_text);
855
856static const struct snd_kcontrol_new aifoutl_mux =
857 SOC_DAPM_ENUM("AIFOUTL Mux", aifoutl_enum);
858
859static const struct soc_enum aifoutr_enum =
860 SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_1, 14, 2, aif_text);
861
862static const struct snd_kcontrol_new aifoutr_mux =
863 SOC_DAPM_ENUM("AIFOUTR Mux", aifoutr_enum);
864
865static const struct soc_enum aifinl_enum =
866 SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_2, 15, 2, aif_text);
867
868static const struct snd_kcontrol_new aifinl_mux =
869 SOC_DAPM_ENUM("AIFINL Mux", aifinl_enum);
870
871static const struct soc_enum aifinr_enum =
872 SOC_ENUM_SINGLE(WM8993_AUDIO_INTERFACE_2, 14, 2, aif_text);
873
874static const struct snd_kcontrol_new aifinr_mux =
875 SOC_DAPM_ENUM("AIFINR Mux", aifinr_enum);
876
877static const char *sidetone_text[] = {
878 "None", "Left", "Right"
879};
880
881static const struct soc_enum sidetonel_enum =
882 SOC_ENUM_SINGLE(WM8993_DIGITAL_SIDE_TONE, 2, 3, sidetone_text);
883
884static const struct snd_kcontrol_new sidetonel_mux =
885 SOC_DAPM_ENUM("Left Sidetone", sidetonel_enum);
886
887static const struct soc_enum sidetoner_enum =
888 SOC_ENUM_SINGLE(WM8993_DIGITAL_SIDE_TONE, 0, 3, sidetone_text);
889
890static const struct snd_kcontrol_new sidetoner_mux =
891 SOC_DAPM_ENUM("Right Sidetone", sidetoner_enum);
892
893static const struct snd_soc_dapm_widget wm8993_dapm_widgets[] = {
894SND_SOC_DAPM_SUPPLY("CLK_SYS", WM8993_BUS_CONTROL_1, 1, 0, clk_sys_event,
895 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
896SND_SOC_DAPM_SUPPLY("TOCLK", WM8993_CLOCKING_1, 14, 0, NULL, 0),
897SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8993_CLOCKING_3, 0, 0, NULL, 0),
898
899SND_SOC_DAPM_ADC("ADCL", NULL, WM8993_POWER_MANAGEMENT_2, 1, 0),
900SND_SOC_DAPM_ADC("ADCR", NULL, WM8993_POWER_MANAGEMENT_2, 0, 0),
901
902SND_SOC_DAPM_MUX("AIFOUTL Mux", SND_SOC_NOPM, 0, 0, &aifoutl_mux),
903SND_SOC_DAPM_MUX("AIFOUTR Mux", SND_SOC_NOPM, 0, 0, &aifoutr_mux),
904
905SND_SOC_DAPM_AIF_OUT("AIFOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0),
906SND_SOC_DAPM_AIF_OUT("AIFOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0),
907
908SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
909SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
910
911SND_SOC_DAPM_MUX("DACL Mux", SND_SOC_NOPM, 0, 0, &aifinl_mux),
912SND_SOC_DAPM_MUX("DACR Mux", SND_SOC_NOPM, 0, 0, &aifinr_mux),
913
914SND_SOC_DAPM_MUX("DACL Sidetone", SND_SOC_NOPM, 0, 0, &sidetonel_mux),
915SND_SOC_DAPM_MUX("DACR Sidetone", SND_SOC_NOPM, 0, 0, &sidetoner_mux),
916
917SND_SOC_DAPM_DAC("DACL", NULL, WM8993_POWER_MANAGEMENT_3, 1, 0),
918SND_SOC_DAPM_DAC("DACR", NULL, WM8993_POWER_MANAGEMENT_3, 0, 0),
919
920SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
921SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
922
923SND_SOC_DAPM_MIXER("SPKL", WM8993_POWER_MANAGEMENT_3, 8, 0,
924 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
925SND_SOC_DAPM_MIXER("SPKR", WM8993_POWER_MANAGEMENT_3, 9, 0,
926 right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
927
928};
929
930static const struct snd_soc_dapm_route routes[] = {
931 { "ADCL", NULL, "CLK_SYS" },
932 { "ADCL", NULL, "CLK_DSP" },
933 { "ADCR", NULL, "CLK_SYS" },
934 { "ADCR", NULL, "CLK_DSP" },
935
936 { "AIFOUTL Mux", "Left", "ADCL" },
937 { "AIFOUTL Mux", "Right", "ADCR" },
938 { "AIFOUTR Mux", "Left", "ADCL" },
939 { "AIFOUTR Mux", "Right", "ADCR" },
940
941 { "AIFOUTL", NULL, "AIFOUTL Mux" },
942 { "AIFOUTR", NULL, "AIFOUTR Mux" },
943
944 { "DACL Mux", "Left", "AIFINL" },
945 { "DACL Mux", "Right", "AIFINR" },
946 { "DACR Mux", "Left", "AIFINL" },
947 { "DACR Mux", "Right", "AIFINR" },
948
949 { "DACL Sidetone", "Left", "ADCL" },
950 { "DACL Sidetone", "Right", "ADCR" },
951 { "DACR Sidetone", "Left", "ADCL" },
952 { "DACR Sidetone", "Right", "ADCR" },
953
954 { "DACL", NULL, "CLK_SYS" },
955 { "DACL", NULL, "CLK_DSP" },
956 { "DACL", NULL, "DACL Mux" },
957 { "DACL", NULL, "DACL Sidetone" },
958 { "DACR", NULL, "CLK_SYS" },
959 { "DACR", NULL, "CLK_DSP" },
960 { "DACR", NULL, "DACR Mux" },
961 { "DACR", NULL, "DACR Sidetone" },
962
963 { "Left Output Mixer", "DAC Switch", "DACL" },
964
965 { "Right Output Mixer", "DAC Switch", "DACR" },
966
967 { "Left Output PGA", NULL, "CLK_SYS" },
968
969 { "Right Output PGA", NULL, "CLK_SYS" },
970
971 { "SPKL", "DAC Switch", "DACL" },
972 { "SPKL", NULL, "CLK_SYS" },
973
974 { "SPKR", "DAC Switch", "DACR" },
975 { "SPKR", NULL, "CLK_SYS" },
976
977 { "Left Headphone Mux", "DAC", "DACL" },
978 { "Right Headphone Mux", "DAC", "DACR" },
979};
980
981static int wm8993_set_bias_level(struct snd_soc_codec *codec,
982 enum snd_soc_bias_level level)
983{
984 struct wm8993_priv *wm8993 = codec->private_data;
985
986 switch (level) {
987 case SND_SOC_BIAS_ON:
988 case SND_SOC_BIAS_PREPARE:
989 /* VMID=2*40k */
990 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
991 WM8993_VMID_SEL_MASK, 0x2);
992 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_2,
993 WM8993_TSHUT_ENA, WM8993_TSHUT_ENA);
994 break;
995
996 case SND_SOC_BIAS_STANDBY:
997 if (codec->bias_level == SND_SOC_BIAS_OFF) {
998 /* Bring up VMID with fast soft start */
999 snd_soc_update_bits(codec, WM8993_ANTIPOP2,
1000 WM8993_STARTUP_BIAS_ENA |
1001 WM8993_VMID_BUF_ENA |
1002 WM8993_VMID_RAMP_MASK |
1003 WM8993_BIAS_SRC,
1004 WM8993_STARTUP_BIAS_ENA |
1005 WM8993_VMID_BUF_ENA |
1006 WM8993_VMID_RAMP_MASK |
1007 WM8993_BIAS_SRC);
1008
1009 /* If either line output is single ended we
1010 * need the VMID buffer */
1011 if (!wm8993->pdata.lineout1_diff ||
1012 !wm8993->pdata.lineout2_diff)
1013 snd_soc_update_bits(codec, WM8993_ANTIPOP1,
1014 WM8993_LINEOUT_VMID_BUF_ENA,
1015 WM8993_LINEOUT_VMID_BUF_ENA);
1016
1017 /* VMID=2*40k */
1018 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
1019 WM8993_VMID_SEL_MASK |
1020 WM8993_BIAS_ENA,
1021 WM8993_BIAS_ENA | 0x2);
1022 msleep(32);
1023
1024 /* Switch to normal bias */
1025 snd_soc_update_bits(codec, WM8993_ANTIPOP2,
1026 WM8993_BIAS_SRC |
1027 WM8993_STARTUP_BIAS_ENA, 0);
1028 }
1029
1030 /* VMID=2*240k */
1031 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
1032 WM8993_VMID_SEL_MASK, 0x4);
1033
1034 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_2,
1035 WM8993_TSHUT_ENA, 0);
1036 break;
1037
1038 case SND_SOC_BIAS_OFF:
1039 snd_soc_update_bits(codec, WM8993_ANTIPOP1,
1040 WM8993_LINEOUT_VMID_BUF_ENA, 0);
1041
1042 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
1043 WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA,
1044 0);
1045 break;
1046 }
1047
1048 codec->bias_level = level;
1049
1050 return 0;
1051}
1052
1053static int wm8993_set_sysclk(struct snd_soc_dai *codec_dai,
1054 int clk_id, unsigned int freq, int dir)
1055{
1056 struct snd_soc_codec *codec = codec_dai->codec;
1057 struct wm8993_priv *wm8993 = codec->private_data;
1058
1059 switch (clk_id) {
1060 case WM8993_SYSCLK_MCLK:
1061 wm8993->mclk_rate = freq;
1062 case WM8993_SYSCLK_FLL:
1063 wm8993->sysclk_source = clk_id;
1064 break;
1065
1066 default:
1067 return -EINVAL;
1068 }
1069
1070 return 0;
1071}
1072
1073static int wm8993_set_dai_fmt(struct snd_soc_dai *dai,
1074 unsigned int fmt)
1075{
1076 struct snd_soc_codec *codec = dai->codec;
1077 struct wm8993_priv *wm8993 = codec->private_data;
1078 unsigned int aif1 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_1);
1079 unsigned int aif4 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_4);
1080
1081 aif1 &= ~(WM8993_BCLK_DIR | WM8993_AIF_BCLK_INV |
1082 WM8993_AIF_LRCLK_INV | WM8993_AIF_FMT_MASK);
1083 aif4 &= ~WM8993_LRCLK_DIR;
1084
1085 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
1086 case SND_SOC_DAIFMT_CBS_CFS:
1087 wm8993->master = 0;
1088 break;
1089 case SND_SOC_DAIFMT_CBS_CFM:
1090 aif4 |= WM8993_LRCLK_DIR;
1091 wm8993->master = 1;
1092 break;
1093 case SND_SOC_DAIFMT_CBM_CFS:
1094 aif1 |= WM8993_BCLK_DIR;
1095 wm8993->master = 1;
1096 break;
1097 case SND_SOC_DAIFMT_CBM_CFM:
1098 aif1 |= WM8993_BCLK_DIR;
1099 aif4 |= WM8993_LRCLK_DIR;
1100 wm8993->master = 1;
1101 break;
1102 default:
1103 return -EINVAL;
1104 }
1105
1106 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1107 case SND_SOC_DAIFMT_DSP_B:
1108 aif1 |= WM8993_AIF_LRCLK_INV;
1109 case SND_SOC_DAIFMT_DSP_A:
1110 aif1 |= 0x18;
1111 break;
1112 case SND_SOC_DAIFMT_I2S:
1113 aif1 |= 0x10;
1114 break;
1115 case SND_SOC_DAIFMT_RIGHT_J:
1116 break;
1117 case SND_SOC_DAIFMT_LEFT_J:
1118 aif1 |= 0x8;
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123
1124 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
1125 case SND_SOC_DAIFMT_DSP_A:
1126 case SND_SOC_DAIFMT_DSP_B:
1127 /* frame inversion not valid for DSP modes */
1128 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
1129 case SND_SOC_DAIFMT_NB_NF:
1130 break;
1131 case SND_SOC_DAIFMT_IB_NF:
1132 aif1 |= WM8993_AIF_BCLK_INV;
1133 break;
1134 default:
1135 return -EINVAL;
1136 }
1137 break;
1138
1139 case SND_SOC_DAIFMT_I2S:
1140 case SND_SOC_DAIFMT_RIGHT_J:
1141 case SND_SOC_DAIFMT_LEFT_J:
1142 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
1143 case SND_SOC_DAIFMT_NB_NF:
1144 break;
1145 case SND_SOC_DAIFMT_IB_IF:
1146 aif1 |= WM8993_AIF_BCLK_INV | WM8993_AIF_LRCLK_INV;
1147 break;
1148 case SND_SOC_DAIFMT_IB_NF:
1149 aif1 |= WM8993_AIF_BCLK_INV;
1150 break;
1151 case SND_SOC_DAIFMT_NB_IF:
1152 aif1 |= WM8993_AIF_LRCLK_INV;
1153 break;
1154 default:
1155 return -EINVAL;
1156 }
1157 break;
1158 default:
1159 return -EINVAL;
1160 }
1161
1162 wm8993_write(codec, WM8993_AUDIO_INTERFACE_1, aif1);
1163 wm8993_write(codec, WM8993_AUDIO_INTERFACE_4, aif4);
1164
1165 return 0;
1166}
1167
1168static int wm8993_hw_params(struct snd_pcm_substream *substream,
1169 struct snd_pcm_hw_params *params,
1170 struct snd_soc_dai *dai)
1171{
1172 struct snd_soc_codec *codec = dai->codec;
1173 struct wm8993_priv *wm8993 = codec->private_data;
1174 int ret, i, best, best_val, cur_val;
1175 unsigned int clocking1, clocking3, aif1, aif4;
1176
1177 clocking1 = wm8993_read(codec, WM8993_CLOCKING_1);
1178 clocking1 &= ~WM8993_BCLK_DIV_MASK;
1179
1180 clocking3 = wm8993_read(codec, WM8993_CLOCKING_3);
1181 clocking3 &= ~(WM8993_CLK_SYS_RATE_MASK | WM8993_SAMPLE_RATE_MASK);
1182
1183 aif1 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_1);
1184 aif1 &= ~WM8993_AIF_WL_MASK;
1185
1186 aif4 = wm8993_read(codec, WM8993_AUDIO_INTERFACE_4);
1187 aif4 &= ~WM8993_LRCLK_RATE_MASK;
1188
1189 /* What BCLK do we need? */
1190 wm8993->fs = params_rate(params);
1191 wm8993->bclk = 2 * wm8993->fs;
1192 if (wm8993->tdm_slots) {
1193 dev_dbg(codec->dev, "Configuring for %d %d bit TDM slots\n",
1194 wm8993->tdm_slots, wm8993->tdm_width);
1195 wm8993->bclk *= wm8993->tdm_width * wm8993->tdm_slots;
1196 } else {
1197 switch (params_format(params)) {
1198 case SNDRV_PCM_FORMAT_S16_LE:
1199 wm8993->bclk *= 16;
1200 break;
1201 case SNDRV_PCM_FORMAT_S20_3LE:
1202 wm8993->bclk *= 20;
1203 aif1 |= 0x8;
1204 break;
1205 case SNDRV_PCM_FORMAT_S24_LE:
1206 wm8993->bclk *= 24;
1207 aif1 |= 0x10;
1208 break;
1209 case SNDRV_PCM_FORMAT_S32_LE:
1210 wm8993->bclk *= 32;
1211 aif1 |= 0x18;
1212 break;
1213 default:
1214 return -EINVAL;
1215 }
1216 }
1217
1218 dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm8993->bclk);
1219
1220 ret = configure_clock(codec);
1221 if (ret != 0)
1222 return ret;
1223
1224 /* Select nearest CLK_SYS_RATE */
1225 best = 0;
1226 best_val = abs((wm8993->sysclk_rate / clk_sys_rates[0].ratio)
1227 - wm8993->fs);
1228 for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
1229 cur_val = abs((wm8993->sysclk_rate /
1230 clk_sys_rates[i].ratio) - wm8993->fs);;
1231 if (cur_val < best_val) {
1232 best = i;
1233 best_val = cur_val;
1234 }
1235 }
1236 dev_dbg(codec->dev, "Selected CLK_SYS_RATIO of %d\n",
1237 clk_sys_rates[best].ratio);
1238 clocking3 |= (clk_sys_rates[best].clk_sys_rate
1239 << WM8993_CLK_SYS_RATE_SHIFT);
1240
1241 /* SAMPLE_RATE */
1242 best = 0;
1243 best_val = abs(wm8993->fs - sample_rates[0].rate);
1244 for (i = 1; i < ARRAY_SIZE(sample_rates); i++) {
1245 /* Closest match */
1246 cur_val = abs(wm8993->fs - sample_rates[i].rate);
1247 if (cur_val < best_val) {
1248 best = i;
1249 best_val = cur_val;
1250 }
1251 }
1252 dev_dbg(codec->dev, "Selected SAMPLE_RATE of %dHz\n",
1253 sample_rates[best].rate);
1254 clocking3 |= (sample_rates[best].sample_rate
1255 << WM8993_SAMPLE_RATE_SHIFT);
1256
1257 /* BCLK_DIV */
1258 best = 0;
1259 best_val = INT_MAX;
1260 for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
1261 cur_val = ((wm8993->sysclk_rate * 10) / bclk_divs[i].div)
1262 - wm8993->bclk;
1263 if (cur_val < 0) /* Table is sorted */
1264 break;
1265 if (cur_val < best_val) {
1266 best = i;
1267 best_val = cur_val;
1268 }
1269 }
1270 wm8993->bclk = (wm8993->sysclk_rate * 10) / bclk_divs[best].div;
1271 dev_dbg(codec->dev, "Selected BCLK_DIV of %d for %dHz BCLK\n",
1272 bclk_divs[best].div, wm8993->bclk);
1273 clocking1 |= bclk_divs[best].bclk_div << WM8993_BCLK_DIV_SHIFT;
1274
1275 /* LRCLK is a simple fraction of BCLK */
1276 dev_dbg(codec->dev, "LRCLK_RATE is %d\n", wm8993->bclk / wm8993->fs);
1277 aif4 |= wm8993->bclk / wm8993->fs;
1278
1279 wm8993_write(codec, WM8993_CLOCKING_1, clocking1);
1280 wm8993_write(codec, WM8993_CLOCKING_3, clocking3);
1281 wm8993_write(codec, WM8993_AUDIO_INTERFACE_1, aif1);
1282 wm8993_write(codec, WM8993_AUDIO_INTERFACE_4, aif4);
1283
1284 /* ReTune Mobile? */
1285 if (wm8993->pdata.num_retune_configs) {
1286 u16 eq1 = wm8993_read(codec, WM8993_EQ1);
1287 struct wm8993_retune_mobile_setting *s;
1288
1289 best = 0;
1290 best_val = abs(wm8993->pdata.retune_configs[0].rate
1291 - wm8993->fs);
1292 for (i = 0; i < wm8993->pdata.num_retune_configs; i++) {
1293 cur_val = abs(wm8993->pdata.retune_configs[i].rate
1294 - wm8993->fs);
1295 if (cur_val < best_val) {
1296 best_val = cur_val;
1297 best = i;
1298 }
1299 }
1300 s = &wm8993->pdata.retune_configs[best];
1301
1302 dev_dbg(codec->dev, "ReTune Mobile %s tuned for %dHz\n",
1303 s->name, s->rate);
1304
1305 /* Disable EQ while we reconfigure */
1306 snd_soc_update_bits(codec, WM8993_EQ1, WM8993_EQ_ENA, 0);
1307
1308 for (i = 1; i < ARRAY_SIZE(s->config); i++)
1309 wm8993_write(codec, WM8993_EQ1 + i, s->config[i]);
1310
1311 snd_soc_update_bits(codec, WM8993_EQ1, WM8993_EQ_ENA, eq1);
1312 }
1313
1314 return 0;
1315}
1316
1317static int wm8993_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1318{
1319 struct snd_soc_codec *codec = codec_dai->codec;
1320 unsigned int reg;
1321
1322 reg = wm8993_read(codec, WM8993_DAC_CTRL);
1323
1324 if (mute)
1325 reg |= WM8993_DAC_MUTE;
1326 else
1327 reg &= ~WM8993_DAC_MUTE;
1328
1329 wm8993_write(codec, WM8993_DAC_CTRL, reg);
1330
1331 return 0;
1332}
1333
1334static int wm8993_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
1335 unsigned int rx_mask, int slots, int slot_width)
1336{
1337 struct snd_soc_codec *codec = dai->codec;
1338 struct wm8993_priv *wm8993 = codec->private_data;
1339 int aif1 = 0;
1340 int aif2 = 0;
1341
1342 /* Don't need to validate anything if we're turning off TDM */
1343 if (slots == 0) {
1344 wm8993->tdm_slots = 0;
1345 goto out;
1346 }
1347
1348 /* Note that we allow configurations we can't handle ourselves -
1349 * for example, we can generate clocks for slots 2 and up even if
1350 * we can't use those slots ourselves.
1351 */
1352 aif1 |= WM8993_AIFADC_TDM;
1353 aif2 |= WM8993_AIFDAC_TDM;
1354
1355 switch (rx_mask) {
1356 case 3:
1357 break;
1358 case 0xc:
1359 aif1 |= WM8993_AIFADC_TDM_CHAN;
1360 break;
1361 default:
1362 return -EINVAL;
1363 }
1364
1365
1366 switch (tx_mask) {
1367 case 3:
1368 break;
1369 case 0xc:
1370 aif2 |= WM8993_AIFDAC_TDM_CHAN;
1371 break;
1372 default:
1373 return -EINVAL;
1374 }
1375
1376out:
1377 wm8993->tdm_width = slot_width;
1378 wm8993->tdm_slots = slots / 2;
1379
1380 snd_soc_update_bits(codec, WM8993_AUDIO_INTERFACE_1,
1381 WM8993_AIFADC_TDM | WM8993_AIFADC_TDM_CHAN, aif1);
1382 snd_soc_update_bits(codec, WM8993_AUDIO_INTERFACE_2,
1383 WM8993_AIFDAC_TDM | WM8993_AIFDAC_TDM_CHAN, aif2);
1384
1385 return 0;
1386}
1387
1388static struct snd_soc_dai_ops wm8993_ops = {
1389 .set_sysclk = wm8993_set_sysclk,
1390 .set_fmt = wm8993_set_dai_fmt,
1391 .hw_params = wm8993_hw_params,
1392 .digital_mute = wm8993_digital_mute,
1393 .set_pll = wm8993_set_fll,
1394 .set_tdm_slot = wm8993_set_tdm_slot,
1395};
1396
1397#define WM8993_RATES SNDRV_PCM_RATE_8000_48000
1398
1399#define WM8993_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
1400 SNDRV_PCM_FMTBIT_S20_3LE |\
1401 SNDRV_PCM_FMTBIT_S24_LE |\
1402 SNDRV_PCM_FMTBIT_S32_LE)
1403
1404struct snd_soc_dai wm8993_dai = {
1405 .name = "WM8993",
1406 .playback = {
1407 .stream_name = "Playback",
1408 .channels_min = 1,
1409 .channels_max = 2,
1410 .rates = WM8993_RATES,
1411 .formats = WM8993_FORMATS,
1412 },
1413 .capture = {
1414 .stream_name = "Capture",
1415 .channels_min = 1,
1416 .channels_max = 2,
1417 .rates = WM8993_RATES,
1418 .formats = WM8993_FORMATS,
1419 },
1420 .ops = &wm8993_ops,
1421 .symmetric_rates = 1,
1422};
1423EXPORT_SYMBOL_GPL(wm8993_dai);
1424
1425static struct snd_soc_codec *wm8993_codec;
1426
1427static int wm8993_probe(struct platform_device *pdev)
1428{
1429 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1430 struct snd_soc_codec *codec;
1431 struct wm8993_priv *wm8993;
1432 int ret = 0;
1433
1434 if (!wm8993_codec) {
1435 dev_err(&pdev->dev, "I2C device not yet probed\n");
1436 goto err;
1437 }
1438
1439 socdev->card->codec = wm8993_codec;
1440 codec = wm8993_codec;
1441 wm8993 = codec->private_data;
1442
1443 ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
1444 if (ret < 0) {
1445 dev_err(codec->dev, "failed to create pcms\n");
1446 goto err;
1447 }
1448
1449 snd_soc_add_controls(codec, wm8993_snd_controls,
1450 ARRAY_SIZE(wm8993_snd_controls));
1451 if (wm8993->pdata.num_retune_configs != 0) {
1452 dev_dbg(codec->dev, "Using ReTune Mobile\n");
1453 } else {
1454 dev_dbg(codec->dev, "No ReTune Mobile, using normal EQ\n");
1455 snd_soc_add_controls(codec, wm8993_eq_controls,
1456 ARRAY_SIZE(wm8993_eq_controls));
1457 }
1458
1459 snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets,
1460 ARRAY_SIZE(wm8993_dapm_widgets));
1461 wm_hubs_add_analogue_controls(codec);
1462
1463 snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
1464 wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff,
1465 wm8993->pdata.lineout2_diff);
1466
1467 snd_soc_dapm_new_widgets(codec);
1468
1469 ret = snd_soc_init_card(socdev);
1470 if (ret < 0) {
1471 dev_err(codec->dev, "failed to register card\n");
1472 goto card_err;
1473 }
1474
1475 return ret;
1476
1477card_err:
1478 snd_soc_free_pcms(socdev);
1479 snd_soc_dapm_free(socdev);
1480err:
1481 return ret;
1482}
1483
1484static int wm8993_remove(struct platform_device *pdev)
1485{
1486 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1487
1488 snd_soc_free_pcms(socdev);
1489 snd_soc_dapm_free(socdev);
1490
1491 return 0;
1492}
1493
1494struct snd_soc_codec_device soc_codec_dev_wm8993 = {
1495 .probe = wm8993_probe,
1496 .remove = wm8993_remove,
1497};
1498EXPORT_SYMBOL_GPL(soc_codec_dev_wm8993);
1499
1500static int wm8993_i2c_probe(struct i2c_client *i2c,
1501 const struct i2c_device_id *id)
1502{
1503 struct wm8993_priv *wm8993;
1504 struct snd_soc_codec *codec;
1505 unsigned int val;
1506 int ret;
1507
1508 if (wm8993_codec) {
1509 dev_err(&i2c->dev, "A WM8993 is already registered\n");
1510 return -EINVAL;
1511 }
1512
1513 wm8993 = kzalloc(sizeof(struct wm8993_priv), GFP_KERNEL);
1514 if (wm8993 == NULL)
1515 return -ENOMEM;
1516
1517 codec = &wm8993->codec;
1518 if (i2c->dev.platform_data)
1519 memcpy(&wm8993->pdata, i2c->dev.platform_data,
1520 sizeof(wm8993->pdata));
1521
1522 mutex_init(&codec->mutex);
1523 INIT_LIST_HEAD(&codec->dapm_widgets);
1524 INIT_LIST_HEAD(&codec->dapm_paths);
1525
1526 codec->name = "WM8993";
1527 codec->read = wm8993_read;
1528 codec->write = wm8993_write;
1529 codec->hw_write = (hw_write_t)i2c_master_send;
1530 codec->reg_cache = wm8993->reg_cache;
1531 codec->reg_cache_size = ARRAY_SIZE(wm8993->reg_cache);
1532 codec->bias_level = SND_SOC_BIAS_OFF;
1533 codec->set_bias_level = wm8993_set_bias_level;
1534 codec->dai = &wm8993_dai;
1535 codec->num_dai = 1;
1536 codec->private_data = wm8993;
1537
1538 memcpy(wm8993->reg_cache, wm8993_reg_defaults,
1539 sizeof(wm8993->reg_cache));
1540
1541 i2c_set_clientdata(i2c, wm8993);
1542 codec->control_data = i2c;
1543 wm8993_codec = codec;
1544
1545 codec->dev = &i2c->dev;
1546
1547 val = wm8993_read_hw(codec, WM8993_SOFTWARE_RESET);
1548 if (val != wm8993_reg_defaults[WM8993_SOFTWARE_RESET]) {
1549 dev_err(codec->dev, "Invalid ID register value %x\n", val);
1550 ret = -EINVAL;
1551 goto err;
1552 }
1553
1554 ret = wm8993_write(codec, WM8993_SOFTWARE_RESET, 0xffff);
1555 if (ret != 0)
1556 goto err;
1557
1558 /* By default we're using the output mixers */
1559 wm8993->class_w_users = 2;
1560
1561 /* Latch volume update bits and default ZC on */
1562 snd_soc_update_bits(codec, WM8993_RIGHT_DAC_DIGITAL_VOLUME,
1563 WM8993_DAC_VU, WM8993_DAC_VU);
1564 snd_soc_update_bits(codec, WM8993_RIGHT_ADC_DIGITAL_VOLUME,
1565 WM8993_ADC_VU, WM8993_ADC_VU);
1566
1567 /* Manualy manage the HPOUT sequencing for independent stereo
1568 * control. */
1569 snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
1570 WM8993_HPOUT1_AUTO_PU, 0);
1571
1572 /* Use automatic clock configuration */
1573 snd_soc_update_bits(codec, WM8993_CLOCKING_4, WM8993_SR_MODE, 0);
1574
1575 if (!wm8993->pdata.lineout1_diff)
1576 snd_soc_update_bits(codec, WM8993_LINE_MIXER1,
1577 WM8993_LINEOUT1_MODE,
1578 WM8993_LINEOUT1_MODE);
1579 if (!wm8993->pdata.lineout2_diff)
1580 snd_soc_update_bits(codec, WM8993_LINE_MIXER2,
1581 WM8993_LINEOUT2_MODE,
1582 WM8993_LINEOUT2_MODE);
1583
1584 if (wm8993->pdata.lineout1fb)
1585 snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
1586 WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
1587
1588 if (wm8993->pdata.lineout2fb)
1589 snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
1590 WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
1591
1592 /* Apply the microphone bias/detection configuration - the
1593 * platform data is directly applicable to the register. */
1594 snd_soc_update_bits(codec, WM8993_MICBIAS,
1595 WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
1596 WM8993_MICB1_LVL | WM8993_MICB2_LVL,
1597 wm8993->pdata.jd_scthr << WM8993_JD_SCTHR_SHIFT |
1598 wm8993->pdata.jd_thr << WM8993_JD_THR_SHIFT |
1599 wm8993->pdata.micbias1_lvl |
1600 wm8993->pdata.micbias1_lvl << 1);
1601
1602 ret = wm8993_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1603 if (ret != 0)
1604 goto err;
1605
1606 wm8993_dai.dev = codec->dev;
1607
1608 ret = snd_soc_register_dai(&wm8993_dai);
1609 if (ret != 0)
1610 goto err_bias;
1611
1612 ret = snd_soc_register_codec(codec);
1613
1614 return 0;
1615
1616err_bias:
1617 wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
1618err:
1619 wm8993_codec = NULL;
1620 kfree(wm8993);
1621 return ret;
1622}
1623
1624static int wm8993_i2c_remove(struct i2c_client *client)
1625{
1626 struct wm8993_priv *wm8993 = i2c_get_clientdata(client);
1627
1628 snd_soc_unregister_codec(&wm8993->codec);
1629 snd_soc_unregister_dai(&wm8993_dai);
1630
1631 wm8993_set_bias_level(&wm8993->codec, SND_SOC_BIAS_OFF);
1632 kfree(wm8993);
1633
1634 return 0;
1635}
1636
1637static const struct i2c_device_id wm8993_i2c_id[] = {
1638 { "wm8993", 0 },
1639 { }
1640};
1641MODULE_DEVICE_TABLE(i2c, wm8993_i2c_id);
1642
1643static struct i2c_driver wm8993_i2c_driver = {
1644 .driver = {
1645 .name = "WM8993",
1646 .owner = THIS_MODULE,
1647 },
1648 .probe = wm8993_i2c_probe,
1649 .remove = wm8993_i2c_remove,
1650 .id_table = wm8993_i2c_id,
1651};
1652
1653
1654static int __init wm8993_modinit(void)
1655{
1656 int ret;
1657
1658 ret = i2c_add_driver(&wm8993_i2c_driver);
1659 if (ret != 0)
1660 pr_err("WM8993: Unable to register I2C driver: %d\n", ret);
1661
1662 return ret;
1663}
1664module_init(wm8993_modinit);
1665
1666static void __exit wm8993_exit(void)
1667{
1668 i2c_del_driver(&wm8993_i2c_driver);
1669}
1670module_exit(wm8993_exit);
1671
1672
1673MODULE_DESCRIPTION("ASoC WM8993 driver");
1674MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
1675MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8993.h b/sound/soc/codecs/wm8993.h
new file mode 100644
index 000000000000..30e71ca88dad
--- /dev/null
+++ b/sound/soc/codecs/wm8993.h
@@ -0,0 +1,2132 @@
1#ifndef WM8993_H
2#define WM8993_H
3
4extern struct snd_soc_dai wm8993_dai;
5extern struct snd_soc_codec_device soc_codec_dev_wm8993;
6
7#define WM8993_SYSCLK_MCLK 1
8#define WM8993_SYSCLK_FLL 2
9
10#define WM8993_FLL_MCLK 1
11#define WM8993_FLL_BCLK 2
12#define WM8993_FLL_LRCLK 3
13
14/*
15 * Register values.
16 */
17#define WM8993_SOFTWARE_RESET 0x00
18#define WM8993_POWER_MANAGEMENT_1 0x01
19#define WM8993_POWER_MANAGEMENT_2 0x02
20#define WM8993_POWER_MANAGEMENT_3 0x03
21#define WM8993_AUDIO_INTERFACE_1 0x04
22#define WM8993_AUDIO_INTERFACE_2 0x05
23#define WM8993_CLOCKING_1 0x06
24#define WM8993_CLOCKING_2 0x07
25#define WM8993_AUDIO_INTERFACE_3 0x08
26#define WM8993_AUDIO_INTERFACE_4 0x09
27#define WM8993_DAC_CTRL 0x0A
28#define WM8993_LEFT_DAC_DIGITAL_VOLUME 0x0B
29#define WM8993_RIGHT_DAC_DIGITAL_VOLUME 0x0C
30#define WM8993_DIGITAL_SIDE_TONE 0x0D
31#define WM8993_ADC_CTRL 0x0E
32#define WM8993_LEFT_ADC_DIGITAL_VOLUME 0x0F
33#define WM8993_RIGHT_ADC_DIGITAL_VOLUME 0x10
34#define WM8993_GPIO_CTRL_1 0x12
35#define WM8993_GPIO1 0x13
36#define WM8993_IRQ_DEBOUNCE 0x14
37#define WM8993_GPIOCTRL_2 0x16
38#define WM8993_GPIO_POL 0x17
39#define WM8993_LEFT_LINE_INPUT_1_2_VOLUME 0x18
40#define WM8993_LEFT_LINE_INPUT_3_4_VOLUME 0x19
41#define WM8993_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A
42#define WM8993_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B
43#define WM8993_LEFT_OUTPUT_VOLUME 0x1C
44#define WM8993_RIGHT_OUTPUT_VOLUME 0x1D
45#define WM8993_LINE_OUTPUTS_VOLUME 0x1E
46#define WM8993_HPOUT2_VOLUME 0x1F
47#define WM8993_LEFT_OPGA_VOLUME 0x20
48#define WM8993_RIGHT_OPGA_VOLUME 0x21
49#define WM8993_SPKMIXL_ATTENUATION 0x22
50#define WM8993_SPKMIXR_ATTENUATION 0x23
51#define WM8993_SPKOUT_MIXERS 0x24
52#define WM8993_SPKOUT_BOOST 0x25
53#define WM8993_SPEAKER_VOLUME_LEFT 0x26
54#define WM8993_SPEAKER_VOLUME_RIGHT 0x27
55#define WM8993_INPUT_MIXER2 0x28
56#define WM8993_INPUT_MIXER3 0x29
57#define WM8993_INPUT_MIXER4 0x2A
58#define WM8993_INPUT_MIXER5 0x2B
59#define WM8993_INPUT_MIXER6 0x2C
60#define WM8993_OUTPUT_MIXER1 0x2D
61#define WM8993_OUTPUT_MIXER2 0x2E
62#define WM8993_OUTPUT_MIXER3 0x2F
63#define WM8993_OUTPUT_MIXER4 0x30
64#define WM8993_OUTPUT_MIXER5 0x31
65#define WM8993_OUTPUT_MIXER6 0x32
66#define WM8993_HPOUT2_MIXER 0x33
67#define WM8993_LINE_MIXER1 0x34
68#define WM8993_LINE_MIXER2 0x35
69#define WM8993_SPEAKER_MIXER 0x36
70#define WM8993_ADDITIONAL_CONTROL 0x37
71#define WM8993_ANTIPOP1 0x38
72#define WM8993_ANTIPOP2 0x39
73#define WM8993_MICBIAS 0x3A
74#define WM8993_FLL_CONTROL_1 0x3C
75#define WM8993_FLL_CONTROL_2 0x3D
76#define WM8993_FLL_CONTROL_3 0x3E
77#define WM8993_FLL_CONTROL_4 0x3F
78#define WM8993_FLL_CONTROL_5 0x40
79#define WM8993_CLOCKING_3 0x41
80#define WM8993_CLOCKING_4 0x42
81#define WM8993_MW_SLAVE_CONTROL 0x43
82#define WM8993_BUS_CONTROL_1 0x45
83#define WM8993_WRITE_SEQUENCER_0 0x46
84#define WM8993_WRITE_SEQUENCER_1 0x47
85#define WM8993_WRITE_SEQUENCER_2 0x48
86#define WM8993_WRITE_SEQUENCER_3 0x49
87#define WM8993_WRITE_SEQUENCER_4 0x4A
88#define WM8993_WRITE_SEQUENCER_5 0x4B
89#define WM8993_CHARGE_PUMP_1 0x4C
90#define WM8993_CLASS_W_0 0x51
91#define WM8993_DC_SERVO_0 0x54
92#define WM8993_DC_SERVO_1 0x55
93#define WM8993_DC_SERVO_3 0x57
94#define WM8993_DC_SERVO_READBACK_0 0x58
95#define WM8993_DC_SERVO_READBACK_1 0x59
96#define WM8993_DC_SERVO_READBACK_2 0x5A
97#define WM8993_ANALOGUE_HP_0 0x60
98#define WM8993_EQ1 0x62
99#define WM8993_EQ2 0x63
100#define WM8993_EQ3 0x64
101#define WM8993_EQ4 0x65
102#define WM8993_EQ5 0x66
103#define WM8993_EQ6 0x67
104#define WM8993_EQ7 0x68
105#define WM8993_EQ8 0x69
106#define WM8993_EQ9 0x6A
107#define WM8993_EQ10 0x6B
108#define WM8993_EQ11 0x6C
109#define WM8993_EQ12 0x6D
110#define WM8993_EQ13 0x6E
111#define WM8993_EQ14 0x6F
112#define WM8993_EQ15 0x70
113#define WM8993_EQ16 0x71
114#define WM8993_EQ17 0x72
115#define WM8993_EQ18 0x73
116#define WM8993_EQ19 0x74
117#define WM8993_EQ20 0x75
118#define WM8993_EQ21 0x76
119#define WM8993_EQ22 0x77
120#define WM8993_EQ23 0x78
121#define WM8993_EQ24 0x79
122#define WM8993_DIGITAL_PULLS 0x7A
123#define WM8993_DRC_CONTROL_1 0x7B
124#define WM8993_DRC_CONTROL_2 0x7C
125#define WM8993_DRC_CONTROL_3 0x7D
126#define WM8993_DRC_CONTROL_4 0x7E
127
128#define WM8993_REGISTER_COUNT 0x7F
129#define WM8993_MAX_REGISTER 0x7E
130
131/*
132 * Field Definitions.
133 */
134
135/*
136 * R0 (0x00) - Software Reset
137 */
138#define WM8993_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */
139#define WM8993_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */
140#define WM8993_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */
141
142/*
143 * R1 (0x01) - Power Management (1)
144 */
145#define WM8993_SPKOUTR_ENA 0x2000 /* SPKOUTR_ENA */
146#define WM8993_SPKOUTR_ENA_MASK 0x2000 /* SPKOUTR_ENA */
147#define WM8993_SPKOUTR_ENA_SHIFT 13 /* SPKOUTR_ENA */
148#define WM8993_SPKOUTR_ENA_WIDTH 1 /* SPKOUTR_ENA */
149#define WM8993_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */
150#define WM8993_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */
151#define WM8993_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */
152#define WM8993_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
153#define WM8993_HPOUT2_ENA 0x0800 /* HPOUT2_ENA */
154#define WM8993_HPOUT2_ENA_MASK 0x0800 /* HPOUT2_ENA */
155#define WM8993_HPOUT2_ENA_SHIFT 11 /* HPOUT2_ENA */
156#define WM8993_HPOUT2_ENA_WIDTH 1 /* HPOUT2_ENA */
157#define WM8993_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */
158#define WM8993_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */
159#define WM8993_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */
160#define WM8993_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */
161#define WM8993_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */
162#define WM8993_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */
163#define WM8993_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */
164#define WM8993_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */
165#define WM8993_MICB2_ENA 0x0020 /* MICB2_ENA */
166#define WM8993_MICB2_ENA_MASK 0x0020 /* MICB2_ENA */
167#define WM8993_MICB2_ENA_SHIFT 5 /* MICB2_ENA */
168#define WM8993_MICB2_ENA_WIDTH 1 /* MICB2_ENA */
169#define WM8993_MICB1_ENA 0x0010 /* MICB1_ENA */
170#define WM8993_MICB1_ENA_MASK 0x0010 /* MICB1_ENA */
171#define WM8993_MICB1_ENA_SHIFT 4 /* MICB1_ENA */
172#define WM8993_MICB1_ENA_WIDTH 1 /* MICB1_ENA */
173#define WM8993_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */
174#define WM8993_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */
175#define WM8993_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */
176#define WM8993_BIAS_ENA 0x0001 /* BIAS_ENA */
177#define WM8993_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */
178#define WM8993_BIAS_ENA_SHIFT 0 /* BIAS_ENA */
179#define WM8993_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
180
181/*
182 * R2 (0x02) - Power Management (2)
183 */
184#define WM8993_TSHUT_ENA 0x4000 /* TSHUT_ENA */
185#define WM8993_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */
186#define WM8993_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */
187#define WM8993_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
188#define WM8993_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
189#define WM8993_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */
190#define WM8993_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */
191#define WM8993_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */
192#define WM8993_OPCLK_ENA 0x0800 /* OPCLK_ENA */
193#define WM8993_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */
194#define WM8993_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */
195#define WM8993_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
196#define WM8993_MIXINL_ENA 0x0200 /* MIXINL_ENA */
197#define WM8993_MIXINL_ENA_MASK 0x0200 /* MIXINL_ENA */
198#define WM8993_MIXINL_ENA_SHIFT 9 /* MIXINL_ENA */
199#define WM8993_MIXINL_ENA_WIDTH 1 /* MIXINL_ENA */
200#define WM8993_MIXINR_ENA 0x0100 /* MIXINR_ENA */
201#define WM8993_MIXINR_ENA_MASK 0x0100 /* MIXINR_ENA */
202#define WM8993_MIXINR_ENA_SHIFT 8 /* MIXINR_ENA */
203#define WM8993_MIXINR_ENA_WIDTH 1 /* MIXINR_ENA */
204#define WM8993_IN2L_ENA 0x0080 /* IN2L_ENA */
205#define WM8993_IN2L_ENA_MASK 0x0080 /* IN2L_ENA */
206#define WM8993_IN2L_ENA_SHIFT 7 /* IN2L_ENA */
207#define WM8993_IN2L_ENA_WIDTH 1 /* IN2L_ENA */
208#define WM8993_IN1L_ENA 0x0040 /* IN1L_ENA */
209#define WM8993_IN1L_ENA_MASK 0x0040 /* IN1L_ENA */
210#define WM8993_IN1L_ENA_SHIFT 6 /* IN1L_ENA */
211#define WM8993_IN1L_ENA_WIDTH 1 /* IN1L_ENA */
212#define WM8993_IN2R_ENA 0x0020 /* IN2R_ENA */
213#define WM8993_IN2R_ENA_MASK 0x0020 /* IN2R_ENA */
214#define WM8993_IN2R_ENA_SHIFT 5 /* IN2R_ENA */
215#define WM8993_IN2R_ENA_WIDTH 1 /* IN2R_ENA */
216#define WM8993_IN1R_ENA 0x0010 /* IN1R_ENA */
217#define WM8993_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */
218#define WM8993_IN1R_ENA_SHIFT 4 /* IN1R_ENA */
219#define WM8993_IN1R_ENA_WIDTH 1 /* IN1R_ENA */
220#define WM8993_ADCL_ENA 0x0002 /* ADCL_ENA */
221#define WM8993_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */
222#define WM8993_ADCL_ENA_SHIFT 1 /* ADCL_ENA */
223#define WM8993_ADCL_ENA_WIDTH 1 /* ADCL_ENA */
224#define WM8993_ADCR_ENA 0x0001 /* ADCR_ENA */
225#define WM8993_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */
226#define WM8993_ADCR_ENA_SHIFT 0 /* ADCR_ENA */
227#define WM8993_ADCR_ENA_WIDTH 1 /* ADCR_ENA */
228
229/*
230 * R3 (0x03) - Power Management (3)
231 */
232#define WM8993_LINEOUT1N_ENA 0x2000 /* LINEOUT1N_ENA */
233#define WM8993_LINEOUT1N_ENA_MASK 0x2000 /* LINEOUT1N_ENA */
234#define WM8993_LINEOUT1N_ENA_SHIFT 13 /* LINEOUT1N_ENA */
235#define WM8993_LINEOUT1N_ENA_WIDTH 1 /* LINEOUT1N_ENA */
236#define WM8993_LINEOUT1P_ENA 0x1000 /* LINEOUT1P_ENA */
237#define WM8993_LINEOUT1P_ENA_MASK 0x1000 /* LINEOUT1P_ENA */
238#define WM8993_LINEOUT1P_ENA_SHIFT 12 /* LINEOUT1P_ENA */
239#define WM8993_LINEOUT1P_ENA_WIDTH 1 /* LINEOUT1P_ENA */
240#define WM8993_LINEOUT2N_ENA 0x0800 /* LINEOUT2N_ENA */
241#define WM8993_LINEOUT2N_ENA_MASK 0x0800 /* LINEOUT2N_ENA */
242#define WM8993_LINEOUT2N_ENA_SHIFT 11 /* LINEOUT2N_ENA */
243#define WM8993_LINEOUT2N_ENA_WIDTH 1 /* LINEOUT2N_ENA */
244#define WM8993_LINEOUT2P_ENA 0x0400 /* LINEOUT2P_ENA */
245#define WM8993_LINEOUT2P_ENA_MASK 0x0400 /* LINEOUT2P_ENA */
246#define WM8993_LINEOUT2P_ENA_SHIFT 10 /* LINEOUT2P_ENA */
247#define WM8993_LINEOUT2P_ENA_WIDTH 1 /* LINEOUT2P_ENA */
248#define WM8993_SPKRVOL_ENA 0x0200 /* SPKRVOL_ENA */
249#define WM8993_SPKRVOL_ENA_MASK 0x0200 /* SPKRVOL_ENA */
250#define WM8993_SPKRVOL_ENA_SHIFT 9 /* SPKRVOL_ENA */
251#define WM8993_SPKRVOL_ENA_WIDTH 1 /* SPKRVOL_ENA */
252#define WM8993_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */
253#define WM8993_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */
254#define WM8993_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */
255#define WM8993_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */
256#define WM8993_MIXOUTLVOL_ENA 0x0080 /* MIXOUTLVOL_ENA */
257#define WM8993_MIXOUTLVOL_ENA_MASK 0x0080 /* MIXOUTLVOL_ENA */
258#define WM8993_MIXOUTLVOL_ENA_SHIFT 7 /* MIXOUTLVOL_ENA */
259#define WM8993_MIXOUTLVOL_ENA_WIDTH 1 /* MIXOUTLVOL_ENA */
260#define WM8993_MIXOUTRVOL_ENA 0x0040 /* MIXOUTRVOL_ENA */
261#define WM8993_MIXOUTRVOL_ENA_MASK 0x0040 /* MIXOUTRVOL_ENA */
262#define WM8993_MIXOUTRVOL_ENA_SHIFT 6 /* MIXOUTRVOL_ENA */
263#define WM8993_MIXOUTRVOL_ENA_WIDTH 1 /* MIXOUTRVOL_ENA */
264#define WM8993_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */
265#define WM8993_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */
266#define WM8993_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */
267#define WM8993_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */
268#define WM8993_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */
269#define WM8993_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */
270#define WM8993_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */
271#define WM8993_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */
272#define WM8993_DACL_ENA 0x0002 /* DACL_ENA */
273#define WM8993_DACL_ENA_MASK 0x0002 /* DACL_ENA */
274#define WM8993_DACL_ENA_SHIFT 1 /* DACL_ENA */
275#define WM8993_DACL_ENA_WIDTH 1 /* DACL_ENA */
276#define WM8993_DACR_ENA 0x0001 /* DACR_ENA */
277#define WM8993_DACR_ENA_MASK 0x0001 /* DACR_ENA */
278#define WM8993_DACR_ENA_SHIFT 0 /* DACR_ENA */
279#define WM8993_DACR_ENA_WIDTH 1 /* DACR_ENA */
280
281/*
282 * R4 (0x04) - Audio Interface (1)
283 */
284#define WM8993_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */
285#define WM8993_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */
286#define WM8993_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */
287#define WM8993_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */
288#define WM8993_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */
289#define WM8993_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */
290#define WM8993_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */
291#define WM8993_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */
292#define WM8993_AIFADC_TDM 0x2000 /* AIFADC_TDM */
293#define WM8993_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */
294#define WM8993_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */
295#define WM8993_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */
296#define WM8993_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */
297#define WM8993_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */
298#define WM8993_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */
299#define WM8993_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */
300#define WM8993_BCLK_DIR 0x0200 /* BCLK_DIR */
301#define WM8993_BCLK_DIR_MASK 0x0200 /* BCLK_DIR */
302#define WM8993_BCLK_DIR_SHIFT 9 /* BCLK_DIR */
303#define WM8993_BCLK_DIR_WIDTH 1 /* BCLK_DIR */
304#define WM8993_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */
305#define WM8993_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */
306#define WM8993_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */
307#define WM8993_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */
308#define WM8993_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */
309#define WM8993_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */
310#define WM8993_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */
311#define WM8993_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */
312#define WM8993_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */
313#define WM8993_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */
314#define WM8993_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */
315#define WM8993_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */
316#define WM8993_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */
317#define WM8993_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */
318
319/*
320 * R5 (0x05) - Audio Interface (2)
321 */
322#define WM8993_AIFDACL_SRC 0x8000 /* AIFDACL_SRC */
323#define WM8993_AIFDACL_SRC_MASK 0x8000 /* AIFDACL_SRC */
324#define WM8993_AIFDACL_SRC_SHIFT 15 /* AIFDACL_SRC */
325#define WM8993_AIFDACL_SRC_WIDTH 1 /* AIFDACL_SRC */
326#define WM8993_AIFDACR_SRC 0x4000 /* AIFDACR_SRC */
327#define WM8993_AIFDACR_SRC_MASK 0x4000 /* AIFDACR_SRC */
328#define WM8993_AIFDACR_SRC_SHIFT 14 /* AIFDACR_SRC */
329#define WM8993_AIFDACR_SRC_WIDTH 1 /* AIFDACR_SRC */
330#define WM8993_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */
331#define WM8993_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */
332#define WM8993_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */
333#define WM8993_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */
334#define WM8993_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */
335#define WM8993_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */
336#define WM8993_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */
337#define WM8993_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */
338#define WM8993_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */
339#define WM8993_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */
340#define WM8993_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */
341#define WM8993_DAC_COMP 0x0010 /* DAC_COMP */
342#define WM8993_DAC_COMP_MASK 0x0010 /* DAC_COMP */
343#define WM8993_DAC_COMP_SHIFT 4 /* DAC_COMP */
344#define WM8993_DAC_COMP_WIDTH 1 /* DAC_COMP */
345#define WM8993_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */
346#define WM8993_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */
347#define WM8993_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */
348#define WM8993_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */
349#define WM8993_ADC_COMP 0x0004 /* ADC_COMP */
350#define WM8993_ADC_COMP_MASK 0x0004 /* ADC_COMP */
351#define WM8993_ADC_COMP_SHIFT 2 /* ADC_COMP */
352#define WM8993_ADC_COMP_WIDTH 1 /* ADC_COMP */
353#define WM8993_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */
354#define WM8993_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */
355#define WM8993_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */
356#define WM8993_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */
357#define WM8993_LOOPBACK 0x0001 /* LOOPBACK */
358#define WM8993_LOOPBACK_MASK 0x0001 /* LOOPBACK */
359#define WM8993_LOOPBACK_SHIFT 0 /* LOOPBACK */
360#define WM8993_LOOPBACK_WIDTH 1 /* LOOPBACK */
361
362/*
363 * R6 (0x06) - Clocking 1
364 */
365#define WM8993_TOCLK_RATE 0x8000 /* TOCLK_RATE */
366#define WM8993_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */
367#define WM8993_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */
368#define WM8993_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */
369#define WM8993_TOCLK_ENA 0x4000 /* TOCLK_ENA */
370#define WM8993_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */
371#define WM8993_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */
372#define WM8993_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
373#define WM8993_OPCLK_DIV_MASK 0x1E00 /* OPCLK_DIV - [12:9] */
374#define WM8993_OPCLK_DIV_SHIFT 9 /* OPCLK_DIV - [12:9] */
375#define WM8993_OPCLK_DIV_WIDTH 4 /* OPCLK_DIV - [12:9] */
376#define WM8993_DCLK_DIV_MASK 0x01C0 /* DCLK_DIV - [8:6] */
377#define WM8993_DCLK_DIV_SHIFT 6 /* DCLK_DIV - [8:6] */
378#define WM8993_DCLK_DIV_WIDTH 3 /* DCLK_DIV - [8:6] */
379#define WM8993_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */
380#define WM8993_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */
381#define WM8993_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */
382
383/*
384 * R7 (0x07) - Clocking 2
385 */
386#define WM8993_MCLK_SRC 0x8000 /* MCLK_SRC */
387#define WM8993_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */
388#define WM8993_MCLK_SRC_SHIFT 15 /* MCLK_SRC */
389#define WM8993_MCLK_SRC_WIDTH 1 /* MCLK_SRC */
390#define WM8993_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */
391#define WM8993_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */
392#define WM8993_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */
393#define WM8993_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */
394#define WM8993_MCLK_DIV 0x1000 /* MCLK_DIV */
395#define WM8993_MCLK_DIV_MASK 0x1000 /* MCLK_DIV */
396#define WM8993_MCLK_DIV_SHIFT 12 /* MCLK_DIV */
397#define WM8993_MCLK_DIV_WIDTH 1 /* MCLK_DIV */
398#define WM8993_MCLK_INV 0x0400 /* MCLK_INV */
399#define WM8993_MCLK_INV_MASK 0x0400 /* MCLK_INV */
400#define WM8993_MCLK_INV_SHIFT 10 /* MCLK_INV */
401#define WM8993_MCLK_INV_WIDTH 1 /* MCLK_INV */
402#define WM8993_ADC_DIV_MASK 0x00E0 /* ADC_DIV - [7:5] */
403#define WM8993_ADC_DIV_SHIFT 5 /* ADC_DIV - [7:5] */
404#define WM8993_ADC_DIV_WIDTH 3 /* ADC_DIV - [7:5] */
405#define WM8993_DAC_DIV_MASK 0x001C /* DAC_DIV - [4:2] */
406#define WM8993_DAC_DIV_SHIFT 2 /* DAC_DIV - [4:2] */
407#define WM8993_DAC_DIV_WIDTH 3 /* DAC_DIV - [4:2] */
408
409/*
410 * R8 (0x08) - Audio Interface (3)
411 */
412#define WM8993_AIF_MSTR1 0x8000 /* AIF_MSTR1 */
413#define WM8993_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */
414#define WM8993_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */
415#define WM8993_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */
416
417/*
418 * R9 (0x09) - Audio Interface (4)
419 */
420#define WM8993_AIF_TRIS 0x2000 /* AIF_TRIS */
421#define WM8993_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */
422#define WM8993_AIF_TRIS_SHIFT 13 /* AIF_TRIS */
423#define WM8993_AIF_TRIS_WIDTH 1 /* AIF_TRIS */
424#define WM8993_LRCLK_DIR 0x0800 /* LRCLK_DIR */
425#define WM8993_LRCLK_DIR_MASK 0x0800 /* LRCLK_DIR */
426#define WM8993_LRCLK_DIR_SHIFT 11 /* LRCLK_DIR */
427#define WM8993_LRCLK_DIR_WIDTH 1 /* LRCLK_DIR */
428#define WM8993_LRCLK_RATE_MASK 0x07FF /* LRCLK_RATE - [10:0] */
429#define WM8993_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [10:0] */
430#define WM8993_LRCLK_RATE_WIDTH 11 /* LRCLK_RATE - [10:0] */
431
432/*
433 * R10 (0x0A) - DAC CTRL
434 */
435#define WM8993_DAC_OSR128 0x2000 /* DAC_OSR128 */
436#define WM8993_DAC_OSR128_MASK 0x2000 /* DAC_OSR128 */
437#define WM8993_DAC_OSR128_SHIFT 13 /* DAC_OSR128 */
438#define WM8993_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
439#define WM8993_DAC_MONO 0x0200 /* DAC_MONO */
440#define WM8993_DAC_MONO_MASK 0x0200 /* DAC_MONO */
441#define WM8993_DAC_MONO_SHIFT 9 /* DAC_MONO */
442#define WM8993_DAC_MONO_WIDTH 1 /* DAC_MONO */
443#define WM8993_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */
444#define WM8993_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */
445#define WM8993_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */
446#define WM8993_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */
447#define WM8993_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */
448#define WM8993_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */
449#define WM8993_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */
450#define WM8993_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
451#define WM8993_DAC_UNMUTE_RAMP 0x0040 /* DAC_UNMUTE_RAMP */
452#define WM8993_DAC_UNMUTE_RAMP_MASK 0x0040 /* DAC_UNMUTE_RAMP */
453#define WM8993_DAC_UNMUTE_RAMP_SHIFT 6 /* DAC_UNMUTE_RAMP */
454#define WM8993_DAC_UNMUTE_RAMP_WIDTH 1 /* DAC_UNMUTE_RAMP */
455#define WM8993_DEEMPH_MASK 0x0030 /* DEEMPH - [5:4] */
456#define WM8993_DEEMPH_SHIFT 4 /* DEEMPH - [5:4] */
457#define WM8993_DEEMPH_WIDTH 2 /* DEEMPH - [5:4] */
458#define WM8993_DAC_MUTE 0x0004 /* DAC_MUTE */
459#define WM8993_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */
460#define WM8993_DAC_MUTE_SHIFT 2 /* DAC_MUTE */
461#define WM8993_DAC_MUTE_WIDTH 1 /* DAC_MUTE */
462#define WM8993_DACL_DATINV 0x0002 /* DACL_DATINV */
463#define WM8993_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */
464#define WM8993_DACL_DATINV_SHIFT 1 /* DACL_DATINV */
465#define WM8993_DACL_DATINV_WIDTH 1 /* DACL_DATINV */
466#define WM8993_DACR_DATINV 0x0001 /* DACR_DATINV */
467#define WM8993_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */
468#define WM8993_DACR_DATINV_SHIFT 0 /* DACR_DATINV */
469#define WM8993_DACR_DATINV_WIDTH 1 /* DACR_DATINV */
470
471/*
472 * R11 (0x0B) - Left DAC Digital Volume
473 */
474#define WM8993_DAC_VU 0x0100 /* DAC_VU */
475#define WM8993_DAC_VU_MASK 0x0100 /* DAC_VU */
476#define WM8993_DAC_VU_SHIFT 8 /* DAC_VU */
477#define WM8993_DAC_VU_WIDTH 1 /* DAC_VU */
478#define WM8993_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */
479#define WM8993_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */
480#define WM8993_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */
481
482/*
483 * R12 (0x0C) - Right DAC Digital Volume
484 */
485#define WM8993_DAC_VU 0x0100 /* DAC_VU */
486#define WM8993_DAC_VU_MASK 0x0100 /* DAC_VU */
487#define WM8993_DAC_VU_SHIFT 8 /* DAC_VU */
488#define WM8993_DAC_VU_WIDTH 1 /* DAC_VU */
489#define WM8993_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */
490#define WM8993_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */
491#define WM8993_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */
492
493/*
494 * R13 (0x0D) - Digital Side Tone
495 */
496#define WM8993_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */
497#define WM8993_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */
498#define WM8993_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */
499#define WM8993_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */
500#define WM8993_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */
501#define WM8993_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */
502#define WM8993_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
503#define WM8993_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
504#define WM8993_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
505#define WM8993_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */
506#define WM8993_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */
507#define WM8993_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */
508
509/*
510 * R14 (0x0E) - ADC CTRL
511 */
512#define WM8993_ADC_OSR128 0x0200 /* ADC_OSR128 */
513#define WM8993_ADC_OSR128_MASK 0x0200 /* ADC_OSR128 */
514#define WM8993_ADC_OSR128_SHIFT 9 /* ADC_OSR128 */
515#define WM8993_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */
516#define WM8993_ADC_HPF 0x0100 /* ADC_HPF */
517#define WM8993_ADC_HPF_MASK 0x0100 /* ADC_HPF */
518#define WM8993_ADC_HPF_SHIFT 8 /* ADC_HPF */
519#define WM8993_ADC_HPF_WIDTH 1 /* ADC_HPF */
520#define WM8993_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */
521#define WM8993_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */
522#define WM8993_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */
523#define WM8993_ADCL_DATINV 0x0002 /* ADCL_DATINV */
524#define WM8993_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */
525#define WM8993_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */
526#define WM8993_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */
527#define WM8993_ADCR_DATINV 0x0001 /* ADCR_DATINV */
528#define WM8993_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */
529#define WM8993_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */
530#define WM8993_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */
531
532/*
533 * R15 (0x0F) - Left ADC Digital Volume
534 */
535#define WM8993_ADC_VU 0x0100 /* ADC_VU */
536#define WM8993_ADC_VU_MASK 0x0100 /* ADC_VU */
537#define WM8993_ADC_VU_SHIFT 8 /* ADC_VU */
538#define WM8993_ADC_VU_WIDTH 1 /* ADC_VU */
539#define WM8993_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */
540#define WM8993_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */
541#define WM8993_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */
542
543/*
544 * R16 (0x10) - Right ADC Digital Volume
545 */
546#define WM8993_ADC_VU 0x0100 /* ADC_VU */
547#define WM8993_ADC_VU_MASK 0x0100 /* ADC_VU */
548#define WM8993_ADC_VU_SHIFT 8 /* ADC_VU */
549#define WM8993_ADC_VU_WIDTH 1 /* ADC_VU */
550#define WM8993_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */
551#define WM8993_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */
552#define WM8993_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */
553
554/*
555 * R18 (0x12) - GPIO CTRL 1
556 */
557#define WM8993_JD2_SC_EINT 0x8000 /* JD2_SC_EINT */
558#define WM8993_JD2_SC_EINT_MASK 0x8000 /* JD2_SC_EINT */
559#define WM8993_JD2_SC_EINT_SHIFT 15 /* JD2_SC_EINT */
560#define WM8993_JD2_SC_EINT_WIDTH 1 /* JD2_SC_EINT */
561#define WM8993_JD2_EINT 0x4000 /* JD2_EINT */
562#define WM8993_JD2_EINT_MASK 0x4000 /* JD2_EINT */
563#define WM8993_JD2_EINT_SHIFT 14 /* JD2_EINT */
564#define WM8993_JD2_EINT_WIDTH 1 /* JD2_EINT */
565#define WM8993_WSEQ_EINT 0x2000 /* WSEQ_EINT */
566#define WM8993_WSEQ_EINT_MASK 0x2000 /* WSEQ_EINT */
567#define WM8993_WSEQ_EINT_SHIFT 13 /* WSEQ_EINT */
568#define WM8993_WSEQ_EINT_WIDTH 1 /* WSEQ_EINT */
569#define WM8993_IRQ 0x1000 /* IRQ */
570#define WM8993_IRQ_MASK 0x1000 /* IRQ */
571#define WM8993_IRQ_SHIFT 12 /* IRQ */
572#define WM8993_IRQ_WIDTH 1 /* IRQ */
573#define WM8993_TEMPOK_EINT 0x0800 /* TEMPOK_EINT */
574#define WM8993_TEMPOK_EINT_MASK 0x0800 /* TEMPOK_EINT */
575#define WM8993_TEMPOK_EINT_SHIFT 11 /* TEMPOK_EINT */
576#define WM8993_TEMPOK_EINT_WIDTH 1 /* TEMPOK_EINT */
577#define WM8993_JD1_SC_EINT 0x0400 /* JD1_SC_EINT */
578#define WM8993_JD1_SC_EINT_MASK 0x0400 /* JD1_SC_EINT */
579#define WM8993_JD1_SC_EINT_SHIFT 10 /* JD1_SC_EINT */
580#define WM8993_JD1_SC_EINT_WIDTH 1 /* JD1_SC_EINT */
581#define WM8993_JD1_EINT 0x0200 /* JD1_EINT */
582#define WM8993_JD1_EINT_MASK 0x0200 /* JD1_EINT */
583#define WM8993_JD1_EINT_SHIFT 9 /* JD1_EINT */
584#define WM8993_JD1_EINT_WIDTH 1 /* JD1_EINT */
585#define WM8993_FLL_LOCK_EINT 0x0100 /* FLL_LOCK_EINT */
586#define WM8993_FLL_LOCK_EINT_MASK 0x0100 /* FLL_LOCK_EINT */
587#define WM8993_FLL_LOCK_EINT_SHIFT 8 /* FLL_LOCK_EINT */
588#define WM8993_FLL_LOCK_EINT_WIDTH 1 /* FLL_LOCK_EINT */
589#define WM8993_GPI8_EINT 0x0080 /* GPI8_EINT */
590#define WM8993_GPI8_EINT_MASK 0x0080 /* GPI8_EINT */
591#define WM8993_GPI8_EINT_SHIFT 7 /* GPI8_EINT */
592#define WM8993_GPI8_EINT_WIDTH 1 /* GPI8_EINT */
593#define WM8993_GPI7_EINT 0x0040 /* GPI7_EINT */
594#define WM8993_GPI7_EINT_MASK 0x0040 /* GPI7_EINT */
595#define WM8993_GPI7_EINT_SHIFT 6 /* GPI7_EINT */
596#define WM8993_GPI7_EINT_WIDTH 1 /* GPI7_EINT */
597#define WM8993_GPIO1_EINT 0x0001 /* GPIO1_EINT */
598#define WM8993_GPIO1_EINT_MASK 0x0001 /* GPIO1_EINT */
599#define WM8993_GPIO1_EINT_SHIFT 0 /* GPIO1_EINT */
600#define WM8993_GPIO1_EINT_WIDTH 1 /* GPIO1_EINT */
601
602/*
603 * R19 (0x13) - GPIO1
604 */
605#define WM8993_GPIO1_PU 0x0020 /* GPIO1_PU */
606#define WM8993_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
607#define WM8993_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
608#define WM8993_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
609#define WM8993_GPIO1_PD 0x0010 /* GPIO1_PD */
610#define WM8993_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
611#define WM8993_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
612#define WM8993_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
613#define WM8993_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
614#define WM8993_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
615#define WM8993_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
616
617/*
618 * R20 (0x14) - IRQ_DEBOUNCE
619 */
620#define WM8993_JD2_SC_DB 0x8000 /* JD2_SC_DB */
621#define WM8993_JD2_SC_DB_MASK 0x8000 /* JD2_SC_DB */
622#define WM8993_JD2_SC_DB_SHIFT 15 /* JD2_SC_DB */
623#define WM8993_JD2_SC_DB_WIDTH 1 /* JD2_SC_DB */
624#define WM8993_JD2_DB 0x4000 /* JD2_DB */
625#define WM8993_JD2_DB_MASK 0x4000 /* JD2_DB */
626#define WM8993_JD2_DB_SHIFT 14 /* JD2_DB */
627#define WM8993_JD2_DB_WIDTH 1 /* JD2_DB */
628#define WM8993_WSEQ_DB 0x2000 /* WSEQ_DB */
629#define WM8993_WSEQ_DB_MASK 0x2000 /* WSEQ_DB */
630#define WM8993_WSEQ_DB_SHIFT 13 /* WSEQ_DB */
631#define WM8993_WSEQ_DB_WIDTH 1 /* WSEQ_DB */
632#define WM8993_TEMPOK_DB 0x0800 /* TEMPOK_DB */
633#define WM8993_TEMPOK_DB_MASK 0x0800 /* TEMPOK_DB */
634#define WM8993_TEMPOK_DB_SHIFT 11 /* TEMPOK_DB */
635#define WM8993_TEMPOK_DB_WIDTH 1 /* TEMPOK_DB */
636#define WM8993_JD1_SC_DB 0x0400 /* JD1_SC_DB */
637#define WM8993_JD1_SC_DB_MASK 0x0400 /* JD1_SC_DB */
638#define WM8993_JD1_SC_DB_SHIFT 10 /* JD1_SC_DB */
639#define WM8993_JD1_SC_DB_WIDTH 1 /* JD1_SC_DB */
640#define WM8993_JD1_DB 0x0200 /* JD1_DB */
641#define WM8993_JD1_DB_MASK 0x0200 /* JD1_DB */
642#define WM8993_JD1_DB_SHIFT 9 /* JD1_DB */
643#define WM8993_JD1_DB_WIDTH 1 /* JD1_DB */
644#define WM8993_FLL_LOCK_DB 0x0100 /* FLL_LOCK_DB */
645#define WM8993_FLL_LOCK_DB_MASK 0x0100 /* FLL_LOCK_DB */
646#define WM8993_FLL_LOCK_DB_SHIFT 8 /* FLL_LOCK_DB */
647#define WM8993_FLL_LOCK_DB_WIDTH 1 /* FLL_LOCK_DB */
648#define WM8993_GPI8_DB 0x0080 /* GPI8_DB */
649#define WM8993_GPI8_DB_MASK 0x0080 /* GPI8_DB */
650#define WM8993_GPI8_DB_SHIFT 7 /* GPI8_DB */
651#define WM8993_GPI8_DB_WIDTH 1 /* GPI8_DB */
652#define WM8993_GPI7_DB 0x0008 /* GPI7_DB */
653#define WM8993_GPI7_DB_MASK 0x0008 /* GPI7_DB */
654#define WM8993_GPI7_DB_SHIFT 3 /* GPI7_DB */
655#define WM8993_GPI7_DB_WIDTH 1 /* GPI7_DB */
656#define WM8993_GPIO1_DB 0x0001 /* GPIO1_DB */
657#define WM8993_GPIO1_DB_MASK 0x0001 /* GPIO1_DB */
658#define WM8993_GPIO1_DB_SHIFT 0 /* GPIO1_DB */
659#define WM8993_GPIO1_DB_WIDTH 1 /* GPIO1_DB */
660
661/*
662 * R22 (0x16) - GPIOCTRL 2
663 */
664#define WM8993_IM_JD2_EINT 0x2000 /* IM_JD2_EINT */
665#define WM8993_IM_JD2_EINT_MASK 0x2000 /* IM_JD2_EINT */
666#define WM8993_IM_JD2_EINT_SHIFT 13 /* IM_JD2_EINT */
667#define WM8993_IM_JD2_EINT_WIDTH 1 /* IM_JD2_EINT */
668#define WM8993_IM_JD2_SC_EINT 0x1000 /* IM_JD2_SC_EINT */
669#define WM8993_IM_JD2_SC_EINT_MASK 0x1000 /* IM_JD2_SC_EINT */
670#define WM8993_IM_JD2_SC_EINT_SHIFT 12 /* IM_JD2_SC_EINT */
671#define WM8993_IM_JD2_SC_EINT_WIDTH 1 /* IM_JD2_SC_EINT */
672#define WM8993_IM_TEMPOK_EINT 0x0800 /* IM_TEMPOK_EINT */
673#define WM8993_IM_TEMPOK_EINT_MASK 0x0800 /* IM_TEMPOK_EINT */
674#define WM8993_IM_TEMPOK_EINT_SHIFT 11 /* IM_TEMPOK_EINT */
675#define WM8993_IM_TEMPOK_EINT_WIDTH 1 /* IM_TEMPOK_EINT */
676#define WM8993_IM_JD1_SC_EINT 0x0400 /* IM_JD1_SC_EINT */
677#define WM8993_IM_JD1_SC_EINT_MASK 0x0400 /* IM_JD1_SC_EINT */
678#define WM8993_IM_JD1_SC_EINT_SHIFT 10 /* IM_JD1_SC_EINT */
679#define WM8993_IM_JD1_SC_EINT_WIDTH 1 /* IM_JD1_SC_EINT */
680#define WM8993_IM_JD1_EINT 0x0200 /* IM_JD1_EINT */
681#define WM8993_IM_JD1_EINT_MASK 0x0200 /* IM_JD1_EINT */
682#define WM8993_IM_JD1_EINT_SHIFT 9 /* IM_JD1_EINT */
683#define WM8993_IM_JD1_EINT_WIDTH 1 /* IM_JD1_EINT */
684#define WM8993_IM_FLL_LOCK_EINT 0x0100 /* IM_FLL_LOCK_EINT */
685#define WM8993_IM_FLL_LOCK_EINT_MASK 0x0100 /* IM_FLL_LOCK_EINT */
686#define WM8993_IM_FLL_LOCK_EINT_SHIFT 8 /* IM_FLL_LOCK_EINT */
687#define WM8993_IM_FLL_LOCK_EINT_WIDTH 1 /* IM_FLL_LOCK_EINT */
688#define WM8993_IM_GPI8_EINT 0x0040 /* IM_GPI8_EINT */
689#define WM8993_IM_GPI8_EINT_MASK 0x0040 /* IM_GPI8_EINT */
690#define WM8993_IM_GPI8_EINT_SHIFT 6 /* IM_GPI8_EINT */
691#define WM8993_IM_GPI8_EINT_WIDTH 1 /* IM_GPI8_EINT */
692#define WM8993_IM_GPIO1_EINT 0x0020 /* IM_GPIO1_EINT */
693#define WM8993_IM_GPIO1_EINT_MASK 0x0020 /* IM_GPIO1_EINT */
694#define WM8993_IM_GPIO1_EINT_SHIFT 5 /* IM_GPIO1_EINT */
695#define WM8993_IM_GPIO1_EINT_WIDTH 1 /* IM_GPIO1_EINT */
696#define WM8993_GPI8_ENA 0x0010 /* GPI8_ENA */
697#define WM8993_GPI8_ENA_MASK 0x0010 /* GPI8_ENA */
698#define WM8993_GPI8_ENA_SHIFT 4 /* GPI8_ENA */
699#define WM8993_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
700#define WM8993_IM_GPI7_EINT 0x0004 /* IM_GPI7_EINT */
701#define WM8993_IM_GPI7_EINT_MASK 0x0004 /* IM_GPI7_EINT */
702#define WM8993_IM_GPI7_EINT_SHIFT 2 /* IM_GPI7_EINT */
703#define WM8993_IM_GPI7_EINT_WIDTH 1 /* IM_GPI7_EINT */
704#define WM8993_IM_WSEQ_EINT 0x0002 /* IM_WSEQ_EINT */
705#define WM8993_IM_WSEQ_EINT_MASK 0x0002 /* IM_WSEQ_EINT */
706#define WM8993_IM_WSEQ_EINT_SHIFT 1 /* IM_WSEQ_EINT */
707#define WM8993_IM_WSEQ_EINT_WIDTH 1 /* IM_WSEQ_EINT */
708#define WM8993_GPI7_ENA 0x0001 /* GPI7_ENA */
709#define WM8993_GPI7_ENA_MASK 0x0001 /* GPI7_ENA */
710#define WM8993_GPI7_ENA_SHIFT 0 /* GPI7_ENA */
711#define WM8993_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
712
713/*
714 * R23 (0x17) - GPIO_POL
715 */
716#define WM8993_JD2_SC_POL 0x8000 /* JD2_SC_POL */
717#define WM8993_JD2_SC_POL_MASK 0x8000 /* JD2_SC_POL */
718#define WM8993_JD2_SC_POL_SHIFT 15 /* JD2_SC_POL */
719#define WM8993_JD2_SC_POL_WIDTH 1 /* JD2_SC_POL */
720#define WM8993_JD2_POL 0x4000 /* JD2_POL */
721#define WM8993_JD2_POL_MASK 0x4000 /* JD2_POL */
722#define WM8993_JD2_POL_SHIFT 14 /* JD2_POL */
723#define WM8993_JD2_POL_WIDTH 1 /* JD2_POL */
724#define WM8993_WSEQ_POL 0x2000 /* WSEQ_POL */
725#define WM8993_WSEQ_POL_MASK 0x2000 /* WSEQ_POL */
726#define WM8993_WSEQ_POL_SHIFT 13 /* WSEQ_POL */
727#define WM8993_WSEQ_POL_WIDTH 1 /* WSEQ_POL */
728#define WM8993_IRQ_POL 0x1000 /* IRQ_POL */
729#define WM8993_IRQ_POL_MASK 0x1000 /* IRQ_POL */
730#define WM8993_IRQ_POL_SHIFT 12 /* IRQ_POL */
731#define WM8993_IRQ_POL_WIDTH 1 /* IRQ_POL */
732#define WM8993_TEMPOK_POL 0x0800 /* TEMPOK_POL */
733#define WM8993_TEMPOK_POL_MASK 0x0800 /* TEMPOK_POL */
734#define WM8993_TEMPOK_POL_SHIFT 11 /* TEMPOK_POL */
735#define WM8993_TEMPOK_POL_WIDTH 1 /* TEMPOK_POL */
736#define WM8993_JD1_SC_POL 0x0400 /* JD1_SC_POL */
737#define WM8993_JD1_SC_POL_MASK 0x0400 /* JD1_SC_POL */
738#define WM8993_JD1_SC_POL_SHIFT 10 /* JD1_SC_POL */
739#define WM8993_JD1_SC_POL_WIDTH 1 /* JD1_SC_POL */
740#define WM8993_JD1_POL 0x0200 /* JD1_POL */
741#define WM8993_JD1_POL_MASK 0x0200 /* JD1_POL */
742#define WM8993_JD1_POL_SHIFT 9 /* JD1_POL */
743#define WM8993_JD1_POL_WIDTH 1 /* JD1_POL */
744#define WM8993_FLL_LOCK_POL 0x0100 /* FLL_LOCK_POL */
745#define WM8993_FLL_LOCK_POL_MASK 0x0100 /* FLL_LOCK_POL */
746#define WM8993_FLL_LOCK_POL_SHIFT 8 /* FLL_LOCK_POL */
747#define WM8993_FLL_LOCK_POL_WIDTH 1 /* FLL_LOCK_POL */
748#define WM8993_GPI8_POL 0x0080 /* GPI8_POL */
749#define WM8993_GPI8_POL_MASK 0x0080 /* GPI8_POL */
750#define WM8993_GPI8_POL_SHIFT 7 /* GPI8_POL */
751#define WM8993_GPI8_POL_WIDTH 1 /* GPI8_POL */
752#define WM8993_GPI7_POL 0x0040 /* GPI7_POL */
753#define WM8993_GPI7_POL_MASK 0x0040 /* GPI7_POL */
754#define WM8993_GPI7_POL_SHIFT 6 /* GPI7_POL */
755#define WM8993_GPI7_POL_WIDTH 1 /* GPI7_POL */
756#define WM8993_GPIO1_POL 0x0001 /* GPIO1_POL */
757#define WM8993_GPIO1_POL_MASK 0x0001 /* GPIO1_POL */
758#define WM8993_GPIO1_POL_SHIFT 0 /* GPIO1_POL */
759#define WM8993_GPIO1_POL_WIDTH 1 /* GPIO1_POL */
760
761/*
762 * R24 (0x18) - Left Line Input 1&2 Volume
763 */
764#define WM8993_IN1_VU 0x0100 /* IN1_VU */
765#define WM8993_IN1_VU_MASK 0x0100 /* IN1_VU */
766#define WM8993_IN1_VU_SHIFT 8 /* IN1_VU */
767#define WM8993_IN1_VU_WIDTH 1 /* IN1_VU */
768#define WM8993_IN1L_MUTE 0x0080 /* IN1L_MUTE */
769#define WM8993_IN1L_MUTE_MASK 0x0080 /* IN1L_MUTE */
770#define WM8993_IN1L_MUTE_SHIFT 7 /* IN1L_MUTE */
771#define WM8993_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */
772#define WM8993_IN1L_ZC 0x0040 /* IN1L_ZC */
773#define WM8993_IN1L_ZC_MASK 0x0040 /* IN1L_ZC */
774#define WM8993_IN1L_ZC_SHIFT 6 /* IN1L_ZC */
775#define WM8993_IN1L_ZC_WIDTH 1 /* IN1L_ZC */
776#define WM8993_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */
777#define WM8993_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */
778#define WM8993_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */
779
780/*
781 * R25 (0x19) - Left Line Input 3&4 Volume
782 */
783#define WM8993_IN2_VU 0x0100 /* IN2_VU */
784#define WM8993_IN2_VU_MASK 0x0100 /* IN2_VU */
785#define WM8993_IN2_VU_SHIFT 8 /* IN2_VU */
786#define WM8993_IN2_VU_WIDTH 1 /* IN2_VU */
787#define WM8993_IN2L_MUTE 0x0080 /* IN2L_MUTE */
788#define WM8993_IN2L_MUTE_MASK 0x0080 /* IN2L_MUTE */
789#define WM8993_IN2L_MUTE_SHIFT 7 /* IN2L_MUTE */
790#define WM8993_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */
791#define WM8993_IN2L_ZC 0x0040 /* IN2L_ZC */
792#define WM8993_IN2L_ZC_MASK 0x0040 /* IN2L_ZC */
793#define WM8993_IN2L_ZC_SHIFT 6 /* IN2L_ZC */
794#define WM8993_IN2L_ZC_WIDTH 1 /* IN2L_ZC */
795#define WM8993_IN2L_VOL_MASK 0x001F /* IN2L_VOL - [4:0] */
796#define WM8993_IN2L_VOL_SHIFT 0 /* IN2L_VOL - [4:0] */
797#define WM8993_IN2L_VOL_WIDTH 5 /* IN2L_VOL - [4:0] */
798
799/*
800 * R26 (0x1A) - Right Line Input 1&2 Volume
801 */
802#define WM8993_IN1_VU 0x0100 /* IN1_VU */
803#define WM8993_IN1_VU_MASK 0x0100 /* IN1_VU */
804#define WM8993_IN1_VU_SHIFT 8 /* IN1_VU */
805#define WM8993_IN1_VU_WIDTH 1 /* IN1_VU */
806#define WM8993_IN1R_MUTE 0x0080 /* IN1R_MUTE */
807#define WM8993_IN1R_MUTE_MASK 0x0080 /* IN1R_MUTE */
808#define WM8993_IN1R_MUTE_SHIFT 7 /* IN1R_MUTE */
809#define WM8993_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */
810#define WM8993_IN1R_ZC 0x0040 /* IN1R_ZC */
811#define WM8993_IN1R_ZC_MASK 0x0040 /* IN1R_ZC */
812#define WM8993_IN1R_ZC_SHIFT 6 /* IN1R_ZC */
813#define WM8993_IN1R_ZC_WIDTH 1 /* IN1R_ZC */
814#define WM8993_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */
815#define WM8993_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */
816#define WM8993_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */
817
818/*
819 * R27 (0x1B) - Right Line Input 3&4 Volume
820 */
821#define WM8993_IN2_VU 0x0100 /* IN2_VU */
822#define WM8993_IN2_VU_MASK 0x0100 /* IN2_VU */
823#define WM8993_IN2_VU_SHIFT 8 /* IN2_VU */
824#define WM8993_IN2_VU_WIDTH 1 /* IN2_VU */
825#define WM8993_IN2R_MUTE 0x0080 /* IN2R_MUTE */
826#define WM8993_IN2R_MUTE_MASK 0x0080 /* IN2R_MUTE */
827#define WM8993_IN2R_MUTE_SHIFT 7 /* IN2R_MUTE */
828#define WM8993_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */
829#define WM8993_IN2R_ZC 0x0040 /* IN2R_ZC */
830#define WM8993_IN2R_ZC_MASK 0x0040 /* IN2R_ZC */
831#define WM8993_IN2R_ZC_SHIFT 6 /* IN2R_ZC */
832#define WM8993_IN2R_ZC_WIDTH 1 /* IN2R_ZC */
833#define WM8993_IN2R_VOL_MASK 0x001F /* IN2R_VOL - [4:0] */
834#define WM8993_IN2R_VOL_SHIFT 0 /* IN2R_VOL - [4:0] */
835#define WM8993_IN2R_VOL_WIDTH 5 /* IN2R_VOL - [4:0] */
836
837/*
838 * R28 (0x1C) - Left Output Volume
839 */
840#define WM8993_HPOUT1_VU 0x0100 /* HPOUT1_VU */
841#define WM8993_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
842#define WM8993_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
843#define WM8993_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
844#define WM8993_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */
845#define WM8993_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */
846#define WM8993_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */
847#define WM8993_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */
848#define WM8993_HPOUT1L_MUTE_N 0x0040 /* HPOUT1L_MUTE_N */
849#define WM8993_HPOUT1L_MUTE_N_MASK 0x0040 /* HPOUT1L_MUTE_N */
850#define WM8993_HPOUT1L_MUTE_N_SHIFT 6 /* HPOUT1L_MUTE_N */
851#define WM8993_HPOUT1L_MUTE_N_WIDTH 1 /* HPOUT1L_MUTE_N */
852#define WM8993_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */
853#define WM8993_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */
854#define WM8993_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */
855
856/*
857 * R29 (0x1D) - Right Output Volume
858 */
859#define WM8993_HPOUT1_VU 0x0100 /* HPOUT1_VU */
860#define WM8993_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
861#define WM8993_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
862#define WM8993_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
863#define WM8993_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */
864#define WM8993_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */
865#define WM8993_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */
866#define WM8993_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */
867#define WM8993_HPOUT1R_MUTE_N 0x0040 /* HPOUT1R_MUTE_N */
868#define WM8993_HPOUT1R_MUTE_N_MASK 0x0040 /* HPOUT1R_MUTE_N */
869#define WM8993_HPOUT1R_MUTE_N_SHIFT 6 /* HPOUT1R_MUTE_N */
870#define WM8993_HPOUT1R_MUTE_N_WIDTH 1 /* HPOUT1R_MUTE_N */
871#define WM8993_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */
872#define WM8993_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */
873#define WM8993_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */
874
875/*
876 * R30 (0x1E) - Line Outputs Volume
877 */
878#define WM8993_LINEOUT1N_MUTE 0x0040 /* LINEOUT1N_MUTE */
879#define WM8993_LINEOUT1N_MUTE_MASK 0x0040 /* LINEOUT1N_MUTE */
880#define WM8993_LINEOUT1N_MUTE_SHIFT 6 /* LINEOUT1N_MUTE */
881#define WM8993_LINEOUT1N_MUTE_WIDTH 1 /* LINEOUT1N_MUTE */
882#define WM8993_LINEOUT1P_MUTE 0x0020 /* LINEOUT1P_MUTE */
883#define WM8993_LINEOUT1P_MUTE_MASK 0x0020 /* LINEOUT1P_MUTE */
884#define WM8993_LINEOUT1P_MUTE_SHIFT 5 /* LINEOUT1P_MUTE */
885#define WM8993_LINEOUT1P_MUTE_WIDTH 1 /* LINEOUT1P_MUTE */
886#define WM8993_LINEOUT1_VOL 0x0010 /* LINEOUT1_VOL */
887#define WM8993_LINEOUT1_VOL_MASK 0x0010 /* LINEOUT1_VOL */
888#define WM8993_LINEOUT1_VOL_SHIFT 4 /* LINEOUT1_VOL */
889#define WM8993_LINEOUT1_VOL_WIDTH 1 /* LINEOUT1_VOL */
890#define WM8993_LINEOUT2N_MUTE 0x0004 /* LINEOUT2N_MUTE */
891#define WM8993_LINEOUT2N_MUTE_MASK 0x0004 /* LINEOUT2N_MUTE */
892#define WM8993_LINEOUT2N_MUTE_SHIFT 2 /* LINEOUT2N_MUTE */
893#define WM8993_LINEOUT2N_MUTE_WIDTH 1 /* LINEOUT2N_MUTE */
894#define WM8993_LINEOUT2P_MUTE 0x0002 /* LINEOUT2P_MUTE */
895#define WM8993_LINEOUT2P_MUTE_MASK 0x0002 /* LINEOUT2P_MUTE */
896#define WM8993_LINEOUT2P_MUTE_SHIFT 1 /* LINEOUT2P_MUTE */
897#define WM8993_LINEOUT2P_MUTE_WIDTH 1 /* LINEOUT2P_MUTE */
898#define WM8993_LINEOUT2_VOL 0x0001 /* LINEOUT2_VOL */
899#define WM8993_LINEOUT2_VOL_MASK 0x0001 /* LINEOUT2_VOL */
900#define WM8993_LINEOUT2_VOL_SHIFT 0 /* LINEOUT2_VOL */
901#define WM8993_LINEOUT2_VOL_WIDTH 1 /* LINEOUT2_VOL */
902
903/*
904 * R31 (0x1F) - HPOUT2 Volume
905 */
906#define WM8993_HPOUT2_MUTE 0x0020 /* HPOUT2_MUTE */
907#define WM8993_HPOUT2_MUTE_MASK 0x0020 /* HPOUT2_MUTE */
908#define WM8993_HPOUT2_MUTE_SHIFT 5 /* HPOUT2_MUTE */
909#define WM8993_HPOUT2_MUTE_WIDTH 1 /* HPOUT2_MUTE */
910#define WM8993_HPOUT2_VOL 0x0010 /* HPOUT2_VOL */
911#define WM8993_HPOUT2_VOL_MASK 0x0010 /* HPOUT2_VOL */
912#define WM8993_HPOUT2_VOL_SHIFT 4 /* HPOUT2_VOL */
913#define WM8993_HPOUT2_VOL_WIDTH 1 /* HPOUT2_VOL */
914
915/*
916 * R32 (0x20) - Left OPGA Volume
917 */
918#define WM8993_MIXOUT_VU 0x0100 /* MIXOUT_VU */
919#define WM8993_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
920#define WM8993_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
921#define WM8993_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
922#define WM8993_MIXOUTL_ZC 0x0080 /* MIXOUTL_ZC */
923#define WM8993_MIXOUTL_ZC_MASK 0x0080 /* MIXOUTL_ZC */
924#define WM8993_MIXOUTL_ZC_SHIFT 7 /* MIXOUTL_ZC */
925#define WM8993_MIXOUTL_ZC_WIDTH 1 /* MIXOUTL_ZC */
926#define WM8993_MIXOUTL_MUTE_N 0x0040 /* MIXOUTL_MUTE_N */
927#define WM8993_MIXOUTL_MUTE_N_MASK 0x0040 /* MIXOUTL_MUTE_N */
928#define WM8993_MIXOUTL_MUTE_N_SHIFT 6 /* MIXOUTL_MUTE_N */
929#define WM8993_MIXOUTL_MUTE_N_WIDTH 1 /* MIXOUTL_MUTE_N */
930#define WM8993_MIXOUTL_VOL_MASK 0x003F /* MIXOUTL_VOL - [5:0] */
931#define WM8993_MIXOUTL_VOL_SHIFT 0 /* MIXOUTL_VOL - [5:0] */
932#define WM8993_MIXOUTL_VOL_WIDTH 6 /* MIXOUTL_VOL - [5:0] */
933
934/*
935 * R33 (0x21) - Right OPGA Volume
936 */
937#define WM8993_MIXOUT_VU 0x0100 /* MIXOUT_VU */
938#define WM8993_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
939#define WM8993_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
940#define WM8993_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
941#define WM8993_MIXOUTR_ZC 0x0080 /* MIXOUTR_ZC */
942#define WM8993_MIXOUTR_ZC_MASK 0x0080 /* MIXOUTR_ZC */
943#define WM8993_MIXOUTR_ZC_SHIFT 7 /* MIXOUTR_ZC */
944#define WM8993_MIXOUTR_ZC_WIDTH 1 /* MIXOUTR_ZC */
945#define WM8993_MIXOUTR_MUTE_N 0x0040 /* MIXOUTR_MUTE_N */
946#define WM8993_MIXOUTR_MUTE_N_MASK 0x0040 /* MIXOUTR_MUTE_N */
947#define WM8993_MIXOUTR_MUTE_N_SHIFT 6 /* MIXOUTR_MUTE_N */
948#define WM8993_MIXOUTR_MUTE_N_WIDTH 1 /* MIXOUTR_MUTE_N */
949#define WM8993_MIXOUTR_VOL_MASK 0x003F /* MIXOUTR_VOL - [5:0] */
950#define WM8993_MIXOUTR_VOL_SHIFT 0 /* MIXOUTR_VOL - [5:0] */
951#define WM8993_MIXOUTR_VOL_WIDTH 6 /* MIXOUTR_VOL - [5:0] */
952
953/*
954 * R34 (0x22) - SPKMIXL Attenuation
955 */
956#define WM8993_MIXINL_SPKMIXL_VOL 0x0020 /* MIXINL_SPKMIXL_VOL */
957#define WM8993_MIXINL_SPKMIXL_VOL_MASK 0x0020 /* MIXINL_SPKMIXL_VOL */
958#define WM8993_MIXINL_SPKMIXL_VOL_SHIFT 5 /* MIXINL_SPKMIXL_VOL */
959#define WM8993_MIXINL_SPKMIXL_VOL_WIDTH 1 /* MIXINL_SPKMIXL_VOL */
960#define WM8993_IN1LP_SPKMIXL_VOL 0x0010 /* IN1LP_SPKMIXL_VOL */
961#define WM8993_IN1LP_SPKMIXL_VOL_MASK 0x0010 /* IN1LP_SPKMIXL_VOL */
962#define WM8993_IN1LP_SPKMIXL_VOL_SHIFT 4 /* IN1LP_SPKMIXL_VOL */
963#define WM8993_IN1LP_SPKMIXL_VOL_WIDTH 1 /* IN1LP_SPKMIXL_VOL */
964#define WM8993_MIXOUTL_SPKMIXL_VOL 0x0008 /* MIXOUTL_SPKMIXL_VOL */
965#define WM8993_MIXOUTL_SPKMIXL_VOL_MASK 0x0008 /* MIXOUTL_SPKMIXL_VOL */
966#define WM8993_MIXOUTL_SPKMIXL_VOL_SHIFT 3 /* MIXOUTL_SPKMIXL_VOL */
967#define WM8993_MIXOUTL_SPKMIXL_VOL_WIDTH 1 /* MIXOUTL_SPKMIXL_VOL */
968#define WM8993_DACL_SPKMIXL_VOL 0x0004 /* DACL_SPKMIXL_VOL */
969#define WM8993_DACL_SPKMIXL_VOL_MASK 0x0004 /* DACL_SPKMIXL_VOL */
970#define WM8993_DACL_SPKMIXL_VOL_SHIFT 2 /* DACL_SPKMIXL_VOL */
971#define WM8993_DACL_SPKMIXL_VOL_WIDTH 1 /* DACL_SPKMIXL_VOL */
972#define WM8993_SPKMIXL_VOL_MASK 0x0003 /* SPKMIXL_VOL - [1:0] */
973#define WM8993_SPKMIXL_VOL_SHIFT 0 /* SPKMIXL_VOL - [1:0] */
974#define WM8993_SPKMIXL_VOL_WIDTH 2 /* SPKMIXL_VOL - [1:0] */
975
976/*
977 * R35 (0x23) - SPKMIXR Attenuation
978 */
979#define WM8993_SPKOUT_CLASSAB_MODE 0x0100 /* SPKOUT_CLASSAB_MODE */
980#define WM8993_SPKOUT_CLASSAB_MODE_MASK 0x0100 /* SPKOUT_CLASSAB_MODE */
981#define WM8993_SPKOUT_CLASSAB_MODE_SHIFT 8 /* SPKOUT_CLASSAB_MODE */
982#define WM8993_SPKOUT_CLASSAB_MODE_WIDTH 1 /* SPKOUT_CLASSAB_MODE */
983#define WM8993_MIXINR_SPKMIXR_VOL 0x0020 /* MIXINR_SPKMIXR_VOL */
984#define WM8993_MIXINR_SPKMIXR_VOL_MASK 0x0020 /* MIXINR_SPKMIXR_VOL */
985#define WM8993_MIXINR_SPKMIXR_VOL_SHIFT 5 /* MIXINR_SPKMIXR_VOL */
986#define WM8993_MIXINR_SPKMIXR_VOL_WIDTH 1 /* MIXINR_SPKMIXR_VOL */
987#define WM8993_IN1RP_SPKMIXR_VOL 0x0010 /* IN1RP_SPKMIXR_VOL */
988#define WM8993_IN1RP_SPKMIXR_VOL_MASK 0x0010 /* IN1RP_SPKMIXR_VOL */
989#define WM8993_IN1RP_SPKMIXR_VOL_SHIFT 4 /* IN1RP_SPKMIXR_VOL */
990#define WM8993_IN1RP_SPKMIXR_VOL_WIDTH 1 /* IN1RP_SPKMIXR_VOL */
991#define WM8993_MIXOUTR_SPKMIXR_VOL 0x0008 /* MIXOUTR_SPKMIXR_VOL */
992#define WM8993_MIXOUTR_SPKMIXR_VOL_MASK 0x0008 /* MIXOUTR_SPKMIXR_VOL */
993#define WM8993_MIXOUTR_SPKMIXR_VOL_SHIFT 3 /* MIXOUTR_SPKMIXR_VOL */
994#define WM8993_MIXOUTR_SPKMIXR_VOL_WIDTH 1 /* MIXOUTR_SPKMIXR_VOL */
995#define WM8993_DACR_SPKMIXR_VOL 0x0004 /* DACR_SPKMIXR_VOL */
996#define WM8993_DACR_SPKMIXR_VOL_MASK 0x0004 /* DACR_SPKMIXR_VOL */
997#define WM8993_DACR_SPKMIXR_VOL_SHIFT 2 /* DACR_SPKMIXR_VOL */
998#define WM8993_DACR_SPKMIXR_VOL_WIDTH 1 /* DACR_SPKMIXR_VOL */
999#define WM8993_SPKMIXR_VOL_MASK 0x0003 /* SPKMIXR_VOL - [1:0] */
1000#define WM8993_SPKMIXR_VOL_SHIFT 0 /* SPKMIXR_VOL - [1:0] */
1001#define WM8993_SPKMIXR_VOL_WIDTH 2 /* SPKMIXR_VOL - [1:0] */
1002
1003/*
1004 * R36 (0x24) - SPKOUT Mixers
1005 */
1006#define WM8993_VRX_TO_SPKOUTL 0x0020 /* VRX_TO_SPKOUTL */
1007#define WM8993_VRX_TO_SPKOUTL_MASK 0x0020 /* VRX_TO_SPKOUTL */
1008#define WM8993_VRX_TO_SPKOUTL_SHIFT 5 /* VRX_TO_SPKOUTL */
1009#define WM8993_VRX_TO_SPKOUTL_WIDTH 1 /* VRX_TO_SPKOUTL */
1010#define WM8993_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */
1011#define WM8993_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */
1012#define WM8993_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */
1013#define WM8993_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */
1014#define WM8993_SPKMIXR_TO_SPKOUTL 0x0008 /* SPKMIXR_TO_SPKOUTL */
1015#define WM8993_SPKMIXR_TO_SPKOUTL_MASK 0x0008 /* SPKMIXR_TO_SPKOUTL */
1016#define WM8993_SPKMIXR_TO_SPKOUTL_SHIFT 3 /* SPKMIXR_TO_SPKOUTL */
1017#define WM8993_SPKMIXR_TO_SPKOUTL_WIDTH 1 /* SPKMIXR_TO_SPKOUTL */
1018#define WM8993_VRX_TO_SPKOUTR 0x0004 /* VRX_TO_SPKOUTR */
1019#define WM8993_VRX_TO_SPKOUTR_MASK 0x0004 /* VRX_TO_SPKOUTR */
1020#define WM8993_VRX_TO_SPKOUTR_SHIFT 2 /* VRX_TO_SPKOUTR */
1021#define WM8993_VRX_TO_SPKOUTR_WIDTH 1 /* VRX_TO_SPKOUTR */
1022#define WM8993_SPKMIXL_TO_SPKOUTR 0x0002 /* SPKMIXL_TO_SPKOUTR */
1023#define WM8993_SPKMIXL_TO_SPKOUTR_MASK 0x0002 /* SPKMIXL_TO_SPKOUTR */
1024#define WM8993_SPKMIXL_TO_SPKOUTR_SHIFT 1 /* SPKMIXL_TO_SPKOUTR */
1025#define WM8993_SPKMIXL_TO_SPKOUTR_WIDTH 1 /* SPKMIXL_TO_SPKOUTR */
1026#define WM8993_SPKMIXR_TO_SPKOUTR 0x0001 /* SPKMIXR_TO_SPKOUTR */
1027#define WM8993_SPKMIXR_TO_SPKOUTR_MASK 0x0001 /* SPKMIXR_TO_SPKOUTR */
1028#define WM8993_SPKMIXR_TO_SPKOUTR_SHIFT 0 /* SPKMIXR_TO_SPKOUTR */
1029#define WM8993_SPKMIXR_TO_SPKOUTR_WIDTH 1 /* SPKMIXR_TO_SPKOUTR */
1030
1031/*
1032 * R37 (0x25) - SPKOUT Boost
1033 */
1034#define WM8993_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */
1035#define WM8993_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */
1036#define WM8993_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */
1037#define WM8993_SPKOUTR_BOOST_MASK 0x0007 /* SPKOUTR_BOOST - [2:0] */
1038#define WM8993_SPKOUTR_BOOST_SHIFT 0 /* SPKOUTR_BOOST - [2:0] */
1039#define WM8993_SPKOUTR_BOOST_WIDTH 3 /* SPKOUTR_BOOST - [2:0] */
1040
1041/*
1042 * R38 (0x26) - Speaker Volume Left
1043 */
1044#define WM8993_SPKOUT_VU 0x0100 /* SPKOUT_VU */
1045#define WM8993_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
1046#define WM8993_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
1047#define WM8993_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
1048#define WM8993_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */
1049#define WM8993_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */
1050#define WM8993_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */
1051#define WM8993_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */
1052#define WM8993_SPKOUTL_MUTE_N 0x0040 /* SPKOUTL_MUTE_N */
1053#define WM8993_SPKOUTL_MUTE_N_MASK 0x0040 /* SPKOUTL_MUTE_N */
1054#define WM8993_SPKOUTL_MUTE_N_SHIFT 6 /* SPKOUTL_MUTE_N */
1055#define WM8993_SPKOUTL_MUTE_N_WIDTH 1 /* SPKOUTL_MUTE_N */
1056#define WM8993_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */
1057#define WM8993_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */
1058#define WM8993_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */
1059
1060/*
1061 * R39 (0x27) - Speaker Volume Right
1062 */
1063#define WM8993_SPKOUT_VU 0x0100 /* SPKOUT_VU */
1064#define WM8993_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
1065#define WM8993_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
1066#define WM8993_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
1067#define WM8993_SPKOUTR_ZC 0x0080 /* SPKOUTR_ZC */
1068#define WM8993_SPKOUTR_ZC_MASK 0x0080 /* SPKOUTR_ZC */
1069#define WM8993_SPKOUTR_ZC_SHIFT 7 /* SPKOUTR_ZC */
1070#define WM8993_SPKOUTR_ZC_WIDTH 1 /* SPKOUTR_ZC */
1071#define WM8993_SPKOUTR_MUTE_N 0x0040 /* SPKOUTR_MUTE_N */
1072#define WM8993_SPKOUTR_MUTE_N_MASK 0x0040 /* SPKOUTR_MUTE_N */
1073#define WM8993_SPKOUTR_MUTE_N_SHIFT 6 /* SPKOUTR_MUTE_N */
1074#define WM8993_SPKOUTR_MUTE_N_WIDTH 1 /* SPKOUTR_MUTE_N */
1075#define WM8993_SPKOUTR_VOL_MASK 0x003F /* SPKOUTR_VOL - [5:0] */
1076#define WM8993_SPKOUTR_VOL_SHIFT 0 /* SPKOUTR_VOL - [5:0] */
1077#define WM8993_SPKOUTR_VOL_WIDTH 6 /* SPKOUTR_VOL - [5:0] */
1078
1079/*
1080 * R40 (0x28) - Input Mixer2
1081 */
1082#define WM8993_IN2LP_TO_IN2L 0x0080 /* IN2LP_TO_IN2L */
1083#define WM8993_IN2LP_TO_IN2L_MASK 0x0080 /* IN2LP_TO_IN2L */
1084#define WM8993_IN2LP_TO_IN2L_SHIFT 7 /* IN2LP_TO_IN2L */
1085#define WM8993_IN2LP_TO_IN2L_WIDTH 1 /* IN2LP_TO_IN2L */
1086#define WM8993_IN2LN_TO_IN2L 0x0040 /* IN2LN_TO_IN2L */
1087#define WM8993_IN2LN_TO_IN2L_MASK 0x0040 /* IN2LN_TO_IN2L */
1088#define WM8993_IN2LN_TO_IN2L_SHIFT 6 /* IN2LN_TO_IN2L */
1089#define WM8993_IN2LN_TO_IN2L_WIDTH 1 /* IN2LN_TO_IN2L */
1090#define WM8993_IN1LP_TO_IN1L 0x0020 /* IN1LP_TO_IN1L */
1091#define WM8993_IN1LP_TO_IN1L_MASK 0x0020 /* IN1LP_TO_IN1L */
1092#define WM8993_IN1LP_TO_IN1L_SHIFT 5 /* IN1LP_TO_IN1L */
1093#define WM8993_IN1LP_TO_IN1L_WIDTH 1 /* IN1LP_TO_IN1L */
1094#define WM8993_IN1LN_TO_IN1L 0x0010 /* IN1LN_TO_IN1L */
1095#define WM8993_IN1LN_TO_IN1L_MASK 0x0010 /* IN1LN_TO_IN1L */
1096#define WM8993_IN1LN_TO_IN1L_SHIFT 4 /* IN1LN_TO_IN1L */
1097#define WM8993_IN1LN_TO_IN1L_WIDTH 1 /* IN1LN_TO_IN1L */
1098#define WM8993_IN2RP_TO_IN2R 0x0008 /* IN2RP_TO_IN2R */
1099#define WM8993_IN2RP_TO_IN2R_MASK 0x0008 /* IN2RP_TO_IN2R */
1100#define WM8993_IN2RP_TO_IN2R_SHIFT 3 /* IN2RP_TO_IN2R */
1101#define WM8993_IN2RP_TO_IN2R_WIDTH 1 /* IN2RP_TO_IN2R */
1102#define WM8993_IN2RN_TO_IN2R 0x0004 /* IN2RN_TO_IN2R */
1103#define WM8993_IN2RN_TO_IN2R_MASK 0x0004 /* IN2RN_TO_IN2R */
1104#define WM8993_IN2RN_TO_IN2R_SHIFT 2 /* IN2RN_TO_IN2R */
1105#define WM8993_IN2RN_TO_IN2R_WIDTH 1 /* IN2RN_TO_IN2R */
1106#define WM8993_IN1RP_TO_IN1R 0x0002 /* IN1RP_TO_IN1R */
1107#define WM8993_IN1RP_TO_IN1R_MASK 0x0002 /* IN1RP_TO_IN1R */
1108#define WM8993_IN1RP_TO_IN1R_SHIFT 1 /* IN1RP_TO_IN1R */
1109#define WM8993_IN1RP_TO_IN1R_WIDTH 1 /* IN1RP_TO_IN1R */
1110#define WM8993_IN1RN_TO_IN1R 0x0001 /* IN1RN_TO_IN1R */
1111#define WM8993_IN1RN_TO_IN1R_MASK 0x0001 /* IN1RN_TO_IN1R */
1112#define WM8993_IN1RN_TO_IN1R_SHIFT 0 /* IN1RN_TO_IN1R */
1113#define WM8993_IN1RN_TO_IN1R_WIDTH 1 /* IN1RN_TO_IN1R */
1114
1115/*
1116 * R41 (0x29) - Input Mixer3
1117 */
1118#define WM8993_IN2L_TO_MIXINL 0x0100 /* IN2L_TO_MIXINL */
1119#define WM8993_IN2L_TO_MIXINL_MASK 0x0100 /* IN2L_TO_MIXINL */
1120#define WM8993_IN2L_TO_MIXINL_SHIFT 8 /* IN2L_TO_MIXINL */
1121#define WM8993_IN2L_TO_MIXINL_WIDTH 1 /* IN2L_TO_MIXINL */
1122#define WM8993_IN2L_MIXINL_VOL 0x0080 /* IN2L_MIXINL_VOL */
1123#define WM8993_IN2L_MIXINL_VOL_MASK 0x0080 /* IN2L_MIXINL_VOL */
1124#define WM8993_IN2L_MIXINL_VOL_SHIFT 7 /* IN2L_MIXINL_VOL */
1125#define WM8993_IN2L_MIXINL_VOL_WIDTH 1 /* IN2L_MIXINL_VOL */
1126#define WM8993_IN1L_TO_MIXINL 0x0020 /* IN1L_TO_MIXINL */
1127#define WM8993_IN1L_TO_MIXINL_MASK 0x0020 /* IN1L_TO_MIXINL */
1128#define WM8993_IN1L_TO_MIXINL_SHIFT 5 /* IN1L_TO_MIXINL */
1129#define WM8993_IN1L_TO_MIXINL_WIDTH 1 /* IN1L_TO_MIXINL */
1130#define WM8993_IN1L_MIXINL_VOL 0x0010 /* IN1L_MIXINL_VOL */
1131#define WM8993_IN1L_MIXINL_VOL_MASK 0x0010 /* IN1L_MIXINL_VOL */
1132#define WM8993_IN1L_MIXINL_VOL_SHIFT 4 /* IN1L_MIXINL_VOL */
1133#define WM8993_IN1L_MIXINL_VOL_WIDTH 1 /* IN1L_MIXINL_VOL */
1134#define WM8993_MIXOUTL_MIXINL_VOL_MASK 0x0007 /* MIXOUTL_MIXINL_VOL - [2:0] */
1135#define WM8993_MIXOUTL_MIXINL_VOL_SHIFT 0 /* MIXOUTL_MIXINL_VOL - [2:0] */
1136#define WM8993_MIXOUTL_MIXINL_VOL_WIDTH 3 /* MIXOUTL_MIXINL_VOL - [2:0] */
1137
1138/*
1139 * R42 (0x2A) - Input Mixer4
1140 */
1141#define WM8993_IN2R_TO_MIXINR 0x0100 /* IN2R_TO_MIXINR */
1142#define WM8993_IN2R_TO_MIXINR_MASK 0x0100 /* IN2R_TO_MIXINR */
1143#define WM8993_IN2R_TO_MIXINR_SHIFT 8 /* IN2R_TO_MIXINR */
1144#define WM8993_IN2R_TO_MIXINR_WIDTH 1 /* IN2R_TO_MIXINR */
1145#define WM8993_IN2R_MIXINR_VOL 0x0080 /* IN2R_MIXINR_VOL */
1146#define WM8993_IN2R_MIXINR_VOL_MASK 0x0080 /* IN2R_MIXINR_VOL */
1147#define WM8993_IN2R_MIXINR_VOL_SHIFT 7 /* IN2R_MIXINR_VOL */
1148#define WM8993_IN2R_MIXINR_VOL_WIDTH 1 /* IN2R_MIXINR_VOL */
1149#define WM8993_IN1R_TO_MIXINR 0x0020 /* IN1R_TO_MIXINR */
1150#define WM8993_IN1R_TO_MIXINR_MASK 0x0020 /* IN1R_TO_MIXINR */
1151#define WM8993_IN1R_TO_MIXINR_SHIFT 5 /* IN1R_TO_MIXINR */
1152#define WM8993_IN1R_TO_MIXINR_WIDTH 1 /* IN1R_TO_MIXINR */
1153#define WM8993_IN1R_MIXINR_VOL 0x0010 /* IN1R_MIXINR_VOL */
1154#define WM8993_IN1R_MIXINR_VOL_MASK 0x0010 /* IN1R_MIXINR_VOL */
1155#define WM8993_IN1R_MIXINR_VOL_SHIFT 4 /* IN1R_MIXINR_VOL */
1156#define WM8993_IN1R_MIXINR_VOL_WIDTH 1 /* IN1R_MIXINR_VOL */
1157#define WM8993_MIXOUTR_MIXINR_VOL_MASK 0x0007 /* MIXOUTR_MIXINR_VOL - [2:0] */
1158#define WM8993_MIXOUTR_MIXINR_VOL_SHIFT 0 /* MIXOUTR_MIXINR_VOL - [2:0] */
1159#define WM8993_MIXOUTR_MIXINR_VOL_WIDTH 3 /* MIXOUTR_MIXINR_VOL - [2:0] */
1160
1161/*
1162 * R43 (0x2B) - Input Mixer5
1163 */
1164#define WM8993_IN1LP_MIXINL_VOL_MASK 0x01C0 /* IN1LP_MIXINL_VOL - [8:6] */
1165#define WM8993_IN1LP_MIXINL_VOL_SHIFT 6 /* IN1LP_MIXINL_VOL - [8:6] */
1166#define WM8993_IN1LP_MIXINL_VOL_WIDTH 3 /* IN1LP_MIXINL_VOL - [8:6] */
1167#define WM8993_VRX_MIXINL_VOL_MASK 0x0007 /* VRX_MIXINL_VOL - [2:0] */
1168#define WM8993_VRX_MIXINL_VOL_SHIFT 0 /* VRX_MIXINL_VOL - [2:0] */
1169#define WM8993_VRX_MIXINL_VOL_WIDTH 3 /* VRX_MIXINL_VOL - [2:0] */
1170
1171/*
1172 * R44 (0x2C) - Input Mixer6
1173 */
1174#define WM8993_IN1RP_MIXINR_VOL_MASK 0x01C0 /* IN1RP_MIXINR_VOL - [8:6] */
1175#define WM8993_IN1RP_MIXINR_VOL_SHIFT 6 /* IN1RP_MIXINR_VOL - [8:6] */
1176#define WM8993_IN1RP_MIXINR_VOL_WIDTH 3 /* IN1RP_MIXINR_VOL - [8:6] */
1177#define WM8993_VRX_MIXINR_VOL_MASK 0x0007 /* VRX_MIXINR_VOL - [2:0] */
1178#define WM8993_VRX_MIXINR_VOL_SHIFT 0 /* VRX_MIXINR_VOL - [2:0] */
1179#define WM8993_VRX_MIXINR_VOL_WIDTH 3 /* VRX_MIXINR_VOL - [2:0] */
1180
1181/*
1182 * R45 (0x2D) - Output Mixer1
1183 */
1184#define WM8993_DACL_TO_HPOUT1L 0x0100 /* DACL_TO_HPOUT1L */
1185#define WM8993_DACL_TO_HPOUT1L_MASK 0x0100 /* DACL_TO_HPOUT1L */
1186#define WM8993_DACL_TO_HPOUT1L_SHIFT 8 /* DACL_TO_HPOUT1L */
1187#define WM8993_DACL_TO_HPOUT1L_WIDTH 1 /* DACL_TO_HPOUT1L */
1188#define WM8993_MIXINR_TO_MIXOUTL 0x0080 /* MIXINR_TO_MIXOUTL */
1189#define WM8993_MIXINR_TO_MIXOUTL_MASK 0x0080 /* MIXINR_TO_MIXOUTL */
1190#define WM8993_MIXINR_TO_MIXOUTL_SHIFT 7 /* MIXINR_TO_MIXOUTL */
1191#define WM8993_MIXINR_TO_MIXOUTL_WIDTH 1 /* MIXINR_TO_MIXOUTL */
1192#define WM8993_MIXINL_TO_MIXOUTL 0x0040 /* MIXINL_TO_MIXOUTL */
1193#define WM8993_MIXINL_TO_MIXOUTL_MASK 0x0040 /* MIXINL_TO_MIXOUTL */
1194#define WM8993_MIXINL_TO_MIXOUTL_SHIFT 6 /* MIXINL_TO_MIXOUTL */
1195#define WM8993_MIXINL_TO_MIXOUTL_WIDTH 1 /* MIXINL_TO_MIXOUTL */
1196#define WM8993_IN2RN_TO_MIXOUTL 0x0020 /* IN2RN_TO_MIXOUTL */
1197#define WM8993_IN2RN_TO_MIXOUTL_MASK 0x0020 /* IN2RN_TO_MIXOUTL */
1198#define WM8993_IN2RN_TO_MIXOUTL_SHIFT 5 /* IN2RN_TO_MIXOUTL */
1199#define WM8993_IN2RN_TO_MIXOUTL_WIDTH 1 /* IN2RN_TO_MIXOUTL */
1200#define WM8993_IN2LN_TO_MIXOUTL 0x0010 /* IN2LN_TO_MIXOUTL */
1201#define WM8993_IN2LN_TO_MIXOUTL_MASK 0x0010 /* IN2LN_TO_MIXOUTL */
1202#define WM8993_IN2LN_TO_MIXOUTL_SHIFT 4 /* IN2LN_TO_MIXOUTL */
1203#define WM8993_IN2LN_TO_MIXOUTL_WIDTH 1 /* IN2LN_TO_MIXOUTL */
1204#define WM8993_IN1R_TO_MIXOUTL 0x0008 /* IN1R_TO_MIXOUTL */
1205#define WM8993_IN1R_TO_MIXOUTL_MASK 0x0008 /* IN1R_TO_MIXOUTL */
1206#define WM8993_IN1R_TO_MIXOUTL_SHIFT 3 /* IN1R_TO_MIXOUTL */
1207#define WM8993_IN1R_TO_MIXOUTL_WIDTH 1 /* IN1R_TO_MIXOUTL */
1208#define WM8993_IN1L_TO_MIXOUTL 0x0004 /* IN1L_TO_MIXOUTL */
1209#define WM8993_IN1L_TO_MIXOUTL_MASK 0x0004 /* IN1L_TO_MIXOUTL */
1210#define WM8993_IN1L_TO_MIXOUTL_SHIFT 2 /* IN1L_TO_MIXOUTL */
1211#define WM8993_IN1L_TO_MIXOUTL_WIDTH 1 /* IN1L_TO_MIXOUTL */
1212#define WM8993_IN2LP_TO_MIXOUTL 0x0002 /* IN2LP_TO_MIXOUTL */
1213#define WM8993_IN2LP_TO_MIXOUTL_MASK 0x0002 /* IN2LP_TO_MIXOUTL */
1214#define WM8993_IN2LP_TO_MIXOUTL_SHIFT 1 /* IN2LP_TO_MIXOUTL */
1215#define WM8993_IN2LP_TO_MIXOUTL_WIDTH 1 /* IN2LP_TO_MIXOUTL */
1216#define WM8993_DACL_TO_MIXOUTL 0x0001 /* DACL_TO_MIXOUTL */
1217#define WM8993_DACL_TO_MIXOUTL_MASK 0x0001 /* DACL_TO_MIXOUTL */
1218#define WM8993_DACL_TO_MIXOUTL_SHIFT 0 /* DACL_TO_MIXOUTL */
1219#define WM8993_DACL_TO_MIXOUTL_WIDTH 1 /* DACL_TO_MIXOUTL */
1220
1221/*
1222 * R46 (0x2E) - Output Mixer2
1223 */
1224#define WM8993_DACR_TO_HPOUT1R 0x0100 /* DACR_TO_HPOUT1R */
1225#define WM8993_DACR_TO_HPOUT1R_MASK 0x0100 /* DACR_TO_HPOUT1R */
1226#define WM8993_DACR_TO_HPOUT1R_SHIFT 8 /* DACR_TO_HPOUT1R */
1227#define WM8993_DACR_TO_HPOUT1R_WIDTH 1 /* DACR_TO_HPOUT1R */
1228#define WM8993_MIXINL_TO_MIXOUTR 0x0080 /* MIXINL_TO_MIXOUTR */
1229#define WM8993_MIXINL_TO_MIXOUTR_MASK 0x0080 /* MIXINL_TO_MIXOUTR */
1230#define WM8993_MIXINL_TO_MIXOUTR_SHIFT 7 /* MIXINL_TO_MIXOUTR */
1231#define WM8993_MIXINL_TO_MIXOUTR_WIDTH 1 /* MIXINL_TO_MIXOUTR */
1232#define WM8993_MIXINR_TO_MIXOUTR 0x0040 /* MIXINR_TO_MIXOUTR */
1233#define WM8993_MIXINR_TO_MIXOUTR_MASK 0x0040 /* MIXINR_TO_MIXOUTR */
1234#define WM8993_MIXINR_TO_MIXOUTR_SHIFT 6 /* MIXINR_TO_MIXOUTR */
1235#define WM8993_MIXINR_TO_MIXOUTR_WIDTH 1 /* MIXINR_TO_MIXOUTR */
1236#define WM8993_IN2LN_TO_MIXOUTR 0x0020 /* IN2LN_TO_MIXOUTR */
1237#define WM8993_IN2LN_TO_MIXOUTR_MASK 0x0020 /* IN2LN_TO_MIXOUTR */
1238#define WM8993_IN2LN_TO_MIXOUTR_SHIFT 5 /* IN2LN_TO_MIXOUTR */
1239#define WM8993_IN2LN_TO_MIXOUTR_WIDTH 1 /* IN2LN_TO_MIXOUTR */
1240#define WM8993_IN2RN_TO_MIXOUTR 0x0010 /* IN2RN_TO_MIXOUTR */
1241#define WM8993_IN2RN_TO_MIXOUTR_MASK 0x0010 /* IN2RN_TO_MIXOUTR */
1242#define WM8993_IN2RN_TO_MIXOUTR_SHIFT 4 /* IN2RN_TO_MIXOUTR */
1243#define WM8993_IN2RN_TO_MIXOUTR_WIDTH 1 /* IN2RN_TO_MIXOUTR */
1244#define WM8993_IN1L_TO_MIXOUTR 0x0008 /* IN1L_TO_MIXOUTR */
1245#define WM8993_IN1L_TO_MIXOUTR_MASK 0x0008 /* IN1L_TO_MIXOUTR */
1246#define WM8993_IN1L_TO_MIXOUTR_SHIFT 3 /* IN1L_TO_MIXOUTR */
1247#define WM8993_IN1L_TO_MIXOUTR_WIDTH 1 /* IN1L_TO_MIXOUTR */
1248#define WM8993_IN1R_TO_MIXOUTR 0x0004 /* IN1R_TO_MIXOUTR */
1249#define WM8993_IN1R_TO_MIXOUTR_MASK 0x0004 /* IN1R_TO_MIXOUTR */
1250#define WM8993_IN1R_TO_MIXOUTR_SHIFT 2 /* IN1R_TO_MIXOUTR */
1251#define WM8993_IN1R_TO_MIXOUTR_WIDTH 1 /* IN1R_TO_MIXOUTR */
1252#define WM8993_IN2RP_TO_MIXOUTR 0x0002 /* IN2RP_TO_MIXOUTR */
1253#define WM8993_IN2RP_TO_MIXOUTR_MASK 0x0002 /* IN2RP_TO_MIXOUTR */
1254#define WM8993_IN2RP_TO_MIXOUTR_SHIFT 1 /* IN2RP_TO_MIXOUTR */
1255#define WM8993_IN2RP_TO_MIXOUTR_WIDTH 1 /* IN2RP_TO_MIXOUTR */
1256#define WM8993_DACR_TO_MIXOUTR 0x0001 /* DACR_TO_MIXOUTR */
1257#define WM8993_DACR_TO_MIXOUTR_MASK 0x0001 /* DACR_TO_MIXOUTR */
1258#define WM8993_DACR_TO_MIXOUTR_SHIFT 0 /* DACR_TO_MIXOUTR */
1259#define WM8993_DACR_TO_MIXOUTR_WIDTH 1 /* DACR_TO_MIXOUTR */
1260
1261/*
1262 * R47 (0x2F) - Output Mixer3
1263 */
1264#define WM8993_IN2LP_MIXOUTL_VOL_MASK 0x0E00 /* IN2LP_MIXOUTL_VOL - [11:9] */
1265#define WM8993_IN2LP_MIXOUTL_VOL_SHIFT 9 /* IN2LP_MIXOUTL_VOL - [11:9] */
1266#define WM8993_IN2LP_MIXOUTL_VOL_WIDTH 3 /* IN2LP_MIXOUTL_VOL - [11:9] */
1267#define WM8993_IN2LN_MIXOUTL_VOL_MASK 0x01C0 /* IN2LN_MIXOUTL_VOL - [8:6] */
1268#define WM8993_IN2LN_MIXOUTL_VOL_SHIFT 6 /* IN2LN_MIXOUTL_VOL - [8:6] */
1269#define WM8993_IN2LN_MIXOUTL_VOL_WIDTH 3 /* IN2LN_MIXOUTL_VOL - [8:6] */
1270#define WM8993_IN1R_MIXOUTL_VOL_MASK 0x0038 /* IN1R_MIXOUTL_VOL - [5:3] */
1271#define WM8993_IN1R_MIXOUTL_VOL_SHIFT 3 /* IN1R_MIXOUTL_VOL - [5:3] */
1272#define WM8993_IN1R_MIXOUTL_VOL_WIDTH 3 /* IN1R_MIXOUTL_VOL - [5:3] */
1273#define WM8993_IN1L_MIXOUTL_VOL_MASK 0x0007 /* IN1L_MIXOUTL_VOL - [2:0] */
1274#define WM8993_IN1L_MIXOUTL_VOL_SHIFT 0 /* IN1L_MIXOUTL_VOL - [2:0] */
1275#define WM8993_IN1L_MIXOUTL_VOL_WIDTH 3 /* IN1L_MIXOUTL_VOL - [2:0] */
1276
1277/*
1278 * R48 (0x30) - Output Mixer4
1279 */
1280#define WM8993_IN2RP_MIXOUTR_VOL_MASK 0x0E00 /* IN2RP_MIXOUTR_VOL - [11:9] */
1281#define WM8993_IN2RP_MIXOUTR_VOL_SHIFT 9 /* IN2RP_MIXOUTR_VOL - [11:9] */
1282#define WM8993_IN2RP_MIXOUTR_VOL_WIDTH 3 /* IN2RP_MIXOUTR_VOL - [11:9] */
1283#define WM8993_IN2RN_MIXOUTR_VOL_MASK 0x01C0 /* IN2RN_MIXOUTR_VOL - [8:6] */
1284#define WM8993_IN2RN_MIXOUTR_VOL_SHIFT 6 /* IN2RN_MIXOUTR_VOL - [8:6] */
1285#define WM8993_IN2RN_MIXOUTR_VOL_WIDTH 3 /* IN2RN_MIXOUTR_VOL - [8:6] */
1286#define WM8993_IN1L_MIXOUTR_VOL_MASK 0x0038 /* IN1L_MIXOUTR_VOL - [5:3] */
1287#define WM8993_IN1L_MIXOUTR_VOL_SHIFT 3 /* IN1L_MIXOUTR_VOL - [5:3] */
1288#define WM8993_IN1L_MIXOUTR_VOL_WIDTH 3 /* IN1L_MIXOUTR_VOL - [5:3] */
1289#define WM8993_IN1R_MIXOUTR_VOL_MASK 0x0007 /* IN1R_MIXOUTR_VOL - [2:0] */
1290#define WM8993_IN1R_MIXOUTR_VOL_SHIFT 0 /* IN1R_MIXOUTR_VOL - [2:0] */
1291#define WM8993_IN1R_MIXOUTR_VOL_WIDTH 3 /* IN1R_MIXOUTR_VOL - [2:0] */
1292
1293/*
1294 * R49 (0x31) - Output Mixer5
1295 */
1296#define WM8993_DACL_MIXOUTL_VOL_MASK 0x0E00 /* DACL_MIXOUTL_VOL - [11:9] */
1297#define WM8993_DACL_MIXOUTL_VOL_SHIFT 9 /* DACL_MIXOUTL_VOL - [11:9] */
1298#define WM8993_DACL_MIXOUTL_VOL_WIDTH 3 /* DACL_MIXOUTL_VOL - [11:9] */
1299#define WM8993_IN2RN_MIXOUTL_VOL_MASK 0x01C0 /* IN2RN_MIXOUTL_VOL - [8:6] */
1300#define WM8993_IN2RN_MIXOUTL_VOL_SHIFT 6 /* IN2RN_MIXOUTL_VOL - [8:6] */
1301#define WM8993_IN2RN_MIXOUTL_VOL_WIDTH 3 /* IN2RN_MIXOUTL_VOL - [8:6] */
1302#define WM8993_MIXINR_MIXOUTL_VOL_MASK 0x0038 /* MIXINR_MIXOUTL_VOL - [5:3] */
1303#define WM8993_MIXINR_MIXOUTL_VOL_SHIFT 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
1304#define WM8993_MIXINR_MIXOUTL_VOL_WIDTH 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
1305#define WM8993_MIXINL_MIXOUTL_VOL_MASK 0x0007 /* MIXINL_MIXOUTL_VOL - [2:0] */
1306#define WM8993_MIXINL_MIXOUTL_VOL_SHIFT 0 /* MIXINL_MIXOUTL_VOL - [2:0] */
1307#define WM8993_MIXINL_MIXOUTL_VOL_WIDTH 3 /* MIXINL_MIXOUTL_VOL - [2:0] */
1308
1309/*
1310 * R50 (0x32) - Output Mixer6
1311 */
1312#define WM8993_DACR_MIXOUTR_VOL_MASK 0x0E00 /* DACR_MIXOUTR_VOL - [11:9] */
1313#define WM8993_DACR_MIXOUTR_VOL_SHIFT 9 /* DACR_MIXOUTR_VOL - [11:9] */
1314#define WM8993_DACR_MIXOUTR_VOL_WIDTH 3 /* DACR_MIXOUTR_VOL - [11:9] */
1315#define WM8993_IN2LN_MIXOUTR_VOL_MASK 0x01C0 /* IN2LN_MIXOUTR_VOL - [8:6] */
1316#define WM8993_IN2LN_MIXOUTR_VOL_SHIFT 6 /* IN2LN_MIXOUTR_VOL - [8:6] */
1317#define WM8993_IN2LN_MIXOUTR_VOL_WIDTH 3 /* IN2LN_MIXOUTR_VOL - [8:6] */
1318#define WM8993_MIXINL_MIXOUTR_VOL_MASK 0x0038 /* MIXINL_MIXOUTR_VOL - [5:3] */
1319#define WM8993_MIXINL_MIXOUTR_VOL_SHIFT 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
1320#define WM8993_MIXINL_MIXOUTR_VOL_WIDTH 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
1321#define WM8993_MIXINR_MIXOUTR_VOL_MASK 0x0007 /* MIXINR_MIXOUTR_VOL - [2:0] */
1322#define WM8993_MIXINR_MIXOUTR_VOL_SHIFT 0 /* MIXINR_MIXOUTR_VOL - [2:0] */
1323#define WM8993_MIXINR_MIXOUTR_VOL_WIDTH 3 /* MIXINR_MIXOUTR_VOL - [2:0] */
1324
1325/*
1326 * R51 (0x33) - HPOUT2 Mixer
1327 */
1328#define WM8993_VRX_TO_HPOUT2 0x0020 /* VRX_TO_HPOUT2 */
1329#define WM8993_VRX_TO_HPOUT2_MASK 0x0020 /* VRX_TO_HPOUT2 */
1330#define WM8993_VRX_TO_HPOUT2_SHIFT 5 /* VRX_TO_HPOUT2 */
1331#define WM8993_VRX_TO_HPOUT2_WIDTH 1 /* VRX_TO_HPOUT2 */
1332#define WM8993_MIXOUTLVOL_TO_HPOUT2 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
1333#define WM8993_MIXOUTLVOL_TO_HPOUT2_MASK 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
1334#define WM8993_MIXOUTLVOL_TO_HPOUT2_SHIFT 4 /* MIXOUTLVOL_TO_HPOUT2 */
1335#define WM8993_MIXOUTLVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTLVOL_TO_HPOUT2 */
1336#define WM8993_MIXOUTRVOL_TO_HPOUT2 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
1337#define WM8993_MIXOUTRVOL_TO_HPOUT2_MASK 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
1338#define WM8993_MIXOUTRVOL_TO_HPOUT2_SHIFT 3 /* MIXOUTRVOL_TO_HPOUT2 */
1339#define WM8993_MIXOUTRVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTRVOL_TO_HPOUT2 */
1340
1341/*
1342 * R52 (0x34) - Line Mixer1
1343 */
1344#define WM8993_MIXOUTL_TO_LINEOUT1N 0x0040 /* MIXOUTL_TO_LINEOUT1N */
1345#define WM8993_MIXOUTL_TO_LINEOUT1N_MASK 0x0040 /* MIXOUTL_TO_LINEOUT1N */
1346#define WM8993_MIXOUTL_TO_LINEOUT1N_SHIFT 6 /* MIXOUTL_TO_LINEOUT1N */
1347#define WM8993_MIXOUTL_TO_LINEOUT1N_WIDTH 1 /* MIXOUTL_TO_LINEOUT1N */
1348#define WM8993_MIXOUTR_TO_LINEOUT1N 0x0020 /* MIXOUTR_TO_LINEOUT1N */
1349#define WM8993_MIXOUTR_TO_LINEOUT1N_MASK 0x0020 /* MIXOUTR_TO_LINEOUT1N */
1350#define WM8993_MIXOUTR_TO_LINEOUT1N_SHIFT 5 /* MIXOUTR_TO_LINEOUT1N */
1351#define WM8993_MIXOUTR_TO_LINEOUT1N_WIDTH 1 /* MIXOUTR_TO_LINEOUT1N */
1352#define WM8993_LINEOUT1_MODE 0x0010 /* LINEOUT1_MODE */
1353#define WM8993_LINEOUT1_MODE_MASK 0x0010 /* LINEOUT1_MODE */
1354#define WM8993_LINEOUT1_MODE_SHIFT 4 /* LINEOUT1_MODE */
1355#define WM8993_LINEOUT1_MODE_WIDTH 1 /* LINEOUT1_MODE */
1356#define WM8993_IN1R_TO_LINEOUT1P 0x0004 /* IN1R_TO_LINEOUT1P */
1357#define WM8993_IN1R_TO_LINEOUT1P_MASK 0x0004 /* IN1R_TO_LINEOUT1P */
1358#define WM8993_IN1R_TO_LINEOUT1P_SHIFT 2 /* IN1R_TO_LINEOUT1P */
1359#define WM8993_IN1R_TO_LINEOUT1P_WIDTH 1 /* IN1R_TO_LINEOUT1P */
1360#define WM8993_IN1L_TO_LINEOUT1P 0x0002 /* IN1L_TO_LINEOUT1P */
1361#define WM8993_IN1L_TO_LINEOUT1P_MASK 0x0002 /* IN1L_TO_LINEOUT1P */
1362#define WM8993_IN1L_TO_LINEOUT1P_SHIFT 1 /* IN1L_TO_LINEOUT1P */
1363#define WM8993_IN1L_TO_LINEOUT1P_WIDTH 1 /* IN1L_TO_LINEOUT1P */
1364#define WM8993_MIXOUTL_TO_LINEOUT1P 0x0001 /* MIXOUTL_TO_LINEOUT1P */
1365#define WM8993_MIXOUTL_TO_LINEOUT1P_MASK 0x0001 /* MIXOUTL_TO_LINEOUT1P */
1366#define WM8993_MIXOUTL_TO_LINEOUT1P_SHIFT 0 /* MIXOUTL_TO_LINEOUT1P */
1367#define WM8993_MIXOUTL_TO_LINEOUT1P_WIDTH 1 /* MIXOUTL_TO_LINEOUT1P */
1368
1369/*
1370 * R53 (0x35) - Line Mixer2
1371 */
1372#define WM8993_MIXOUTR_TO_LINEOUT2N 0x0040 /* MIXOUTR_TO_LINEOUT2N */
1373#define WM8993_MIXOUTR_TO_LINEOUT2N_MASK 0x0040 /* MIXOUTR_TO_LINEOUT2N */
1374#define WM8993_MIXOUTR_TO_LINEOUT2N_SHIFT 6 /* MIXOUTR_TO_LINEOUT2N */
1375#define WM8993_MIXOUTR_TO_LINEOUT2N_WIDTH 1 /* MIXOUTR_TO_LINEOUT2N */
1376#define WM8993_MIXOUTL_TO_LINEOUT2N 0x0020 /* MIXOUTL_TO_LINEOUT2N */
1377#define WM8993_MIXOUTL_TO_LINEOUT2N_MASK 0x0020 /* MIXOUTL_TO_LINEOUT2N */
1378#define WM8993_MIXOUTL_TO_LINEOUT2N_SHIFT 5 /* MIXOUTL_TO_LINEOUT2N */
1379#define WM8993_MIXOUTL_TO_LINEOUT2N_WIDTH 1 /* MIXOUTL_TO_LINEOUT2N */
1380#define WM8993_LINEOUT2_MODE 0x0010 /* LINEOUT2_MODE */
1381#define WM8993_LINEOUT2_MODE_MASK 0x0010 /* LINEOUT2_MODE */
1382#define WM8993_LINEOUT2_MODE_SHIFT 4 /* LINEOUT2_MODE */
1383#define WM8993_LINEOUT2_MODE_WIDTH 1 /* LINEOUT2_MODE */
1384#define WM8993_IN1L_TO_LINEOUT2P 0x0004 /* IN1L_TO_LINEOUT2P */
1385#define WM8993_IN1L_TO_LINEOUT2P_MASK 0x0004 /* IN1L_TO_LINEOUT2P */
1386#define WM8993_IN1L_TO_LINEOUT2P_SHIFT 2 /* IN1L_TO_LINEOUT2P */
1387#define WM8993_IN1L_TO_LINEOUT2P_WIDTH 1 /* IN1L_TO_LINEOUT2P */
1388#define WM8993_IN1R_TO_LINEOUT2P 0x0002 /* IN1R_TO_LINEOUT2P */
1389#define WM8993_IN1R_TO_LINEOUT2P_MASK 0x0002 /* IN1R_TO_LINEOUT2P */
1390#define WM8993_IN1R_TO_LINEOUT2P_SHIFT 1 /* IN1R_TO_LINEOUT2P */
1391#define WM8993_IN1R_TO_LINEOUT2P_WIDTH 1 /* IN1R_TO_LINEOUT2P */
1392#define WM8993_MIXOUTR_TO_LINEOUT2P 0x0001 /* MIXOUTR_TO_LINEOUT2P */
1393#define WM8993_MIXOUTR_TO_LINEOUT2P_MASK 0x0001 /* MIXOUTR_TO_LINEOUT2P */
1394#define WM8993_MIXOUTR_TO_LINEOUT2P_SHIFT 0 /* MIXOUTR_TO_LINEOUT2P */
1395#define WM8993_MIXOUTR_TO_LINEOUT2P_WIDTH 1 /* MIXOUTR_TO_LINEOUT2P */
1396
1397/*
1398 * R54 (0x36) - Speaker Mixer
1399 */
1400#define WM8993_SPKAB_REF_SEL 0x0100 /* SPKAB_REF_SEL */
1401#define WM8993_SPKAB_REF_SEL_MASK 0x0100 /* SPKAB_REF_SEL */
1402#define WM8993_SPKAB_REF_SEL_SHIFT 8 /* SPKAB_REF_SEL */
1403#define WM8993_SPKAB_REF_SEL_WIDTH 1 /* SPKAB_REF_SEL */
1404#define WM8993_MIXINL_TO_SPKMIXL 0x0080 /* MIXINL_TO_SPKMIXL */
1405#define WM8993_MIXINL_TO_SPKMIXL_MASK 0x0080 /* MIXINL_TO_SPKMIXL */
1406#define WM8993_MIXINL_TO_SPKMIXL_SHIFT 7 /* MIXINL_TO_SPKMIXL */
1407#define WM8993_MIXINL_TO_SPKMIXL_WIDTH 1 /* MIXINL_TO_SPKMIXL */
1408#define WM8993_MIXINR_TO_SPKMIXR 0x0040 /* MIXINR_TO_SPKMIXR */
1409#define WM8993_MIXINR_TO_SPKMIXR_MASK 0x0040 /* MIXINR_TO_SPKMIXR */
1410#define WM8993_MIXINR_TO_SPKMIXR_SHIFT 6 /* MIXINR_TO_SPKMIXR */
1411#define WM8993_MIXINR_TO_SPKMIXR_WIDTH 1 /* MIXINR_TO_SPKMIXR */
1412#define WM8993_IN1LP_TO_SPKMIXL 0x0020 /* IN1LP_TO_SPKMIXL */
1413#define WM8993_IN1LP_TO_SPKMIXL_MASK 0x0020 /* IN1LP_TO_SPKMIXL */
1414#define WM8993_IN1LP_TO_SPKMIXL_SHIFT 5 /* IN1LP_TO_SPKMIXL */
1415#define WM8993_IN1LP_TO_SPKMIXL_WIDTH 1 /* IN1LP_TO_SPKMIXL */
1416#define WM8993_IN1RP_TO_SPKMIXR 0x0010 /* IN1RP_TO_SPKMIXR */
1417#define WM8993_IN1RP_TO_SPKMIXR_MASK 0x0010 /* IN1RP_TO_SPKMIXR */
1418#define WM8993_IN1RP_TO_SPKMIXR_SHIFT 4 /* IN1RP_TO_SPKMIXR */
1419#define WM8993_IN1RP_TO_SPKMIXR_WIDTH 1 /* IN1RP_TO_SPKMIXR */
1420#define WM8993_MIXOUTL_TO_SPKMIXL 0x0008 /* MIXOUTL_TO_SPKMIXL */
1421#define WM8993_MIXOUTL_TO_SPKMIXL_MASK 0x0008 /* MIXOUTL_TO_SPKMIXL */
1422#define WM8993_MIXOUTL_TO_SPKMIXL_SHIFT 3 /* MIXOUTL_TO_SPKMIXL */
1423#define WM8993_MIXOUTL_TO_SPKMIXL_WIDTH 1 /* MIXOUTL_TO_SPKMIXL */
1424#define WM8993_MIXOUTR_TO_SPKMIXR 0x0004 /* MIXOUTR_TO_SPKMIXR */
1425#define WM8993_MIXOUTR_TO_SPKMIXR_MASK 0x0004 /* MIXOUTR_TO_SPKMIXR */
1426#define WM8993_MIXOUTR_TO_SPKMIXR_SHIFT 2 /* MIXOUTR_TO_SPKMIXR */
1427#define WM8993_MIXOUTR_TO_SPKMIXR_WIDTH 1 /* MIXOUTR_TO_SPKMIXR */
1428#define WM8993_DACL_TO_SPKMIXL 0x0002 /* DACL_TO_SPKMIXL */
1429#define WM8993_DACL_TO_SPKMIXL_MASK 0x0002 /* DACL_TO_SPKMIXL */
1430#define WM8993_DACL_TO_SPKMIXL_SHIFT 1 /* DACL_TO_SPKMIXL */
1431#define WM8993_DACL_TO_SPKMIXL_WIDTH 1 /* DACL_TO_SPKMIXL */
1432#define WM8993_DACR_TO_SPKMIXR 0x0001 /* DACR_TO_SPKMIXR */
1433#define WM8993_DACR_TO_SPKMIXR_MASK 0x0001 /* DACR_TO_SPKMIXR */
1434#define WM8993_DACR_TO_SPKMIXR_SHIFT 0 /* DACR_TO_SPKMIXR */
1435#define WM8993_DACR_TO_SPKMIXR_WIDTH 1 /* DACR_TO_SPKMIXR */
1436
1437/*
1438 * R55 (0x37) - Additional Control
1439 */
1440#define WM8993_LINEOUT1_FB 0x0080 /* LINEOUT1_FB */
1441#define WM8993_LINEOUT1_FB_MASK 0x0080 /* LINEOUT1_FB */
1442#define WM8993_LINEOUT1_FB_SHIFT 7 /* LINEOUT1_FB */
1443#define WM8993_LINEOUT1_FB_WIDTH 1 /* LINEOUT1_FB */
1444#define WM8993_LINEOUT2_FB 0x0040 /* LINEOUT2_FB */
1445#define WM8993_LINEOUT2_FB_MASK 0x0040 /* LINEOUT2_FB */
1446#define WM8993_LINEOUT2_FB_SHIFT 6 /* LINEOUT2_FB */
1447#define WM8993_LINEOUT2_FB_WIDTH 1 /* LINEOUT2_FB */
1448#define WM8993_VROI 0x0001 /* VROI */
1449#define WM8993_VROI_MASK 0x0001 /* VROI */
1450#define WM8993_VROI_SHIFT 0 /* VROI */
1451#define WM8993_VROI_WIDTH 1 /* VROI */
1452
1453/*
1454 * R56 (0x38) - AntiPOP1
1455 */
1456#define WM8993_LINEOUT_VMID_BUF_ENA 0x0080 /* LINEOUT_VMID_BUF_ENA */
1457#define WM8993_LINEOUT_VMID_BUF_ENA_MASK 0x0080 /* LINEOUT_VMID_BUF_ENA */
1458#define WM8993_LINEOUT_VMID_BUF_ENA_SHIFT 7 /* LINEOUT_VMID_BUF_ENA */
1459#define WM8993_LINEOUT_VMID_BUF_ENA_WIDTH 1 /* LINEOUT_VMID_BUF_ENA */
1460#define WM8993_HPOUT2_IN_ENA 0x0040 /* HPOUT2_IN_ENA */
1461#define WM8993_HPOUT2_IN_ENA_MASK 0x0040 /* HPOUT2_IN_ENA */
1462#define WM8993_HPOUT2_IN_ENA_SHIFT 6 /* HPOUT2_IN_ENA */
1463#define WM8993_HPOUT2_IN_ENA_WIDTH 1 /* HPOUT2_IN_ENA */
1464#define WM8993_LINEOUT1_DISCH 0x0020 /* LINEOUT1_DISCH */
1465#define WM8993_LINEOUT1_DISCH_MASK 0x0020 /* LINEOUT1_DISCH */
1466#define WM8993_LINEOUT1_DISCH_SHIFT 5 /* LINEOUT1_DISCH */
1467#define WM8993_LINEOUT1_DISCH_WIDTH 1 /* LINEOUT1_DISCH */
1468#define WM8993_LINEOUT2_DISCH 0x0010 /* LINEOUT2_DISCH */
1469#define WM8993_LINEOUT2_DISCH_MASK 0x0010 /* LINEOUT2_DISCH */
1470#define WM8993_LINEOUT2_DISCH_SHIFT 4 /* LINEOUT2_DISCH */
1471#define WM8993_LINEOUT2_DISCH_WIDTH 1 /* LINEOUT2_DISCH */
1472
1473/*
1474 * R57 (0x39) - AntiPOP2
1475 */
1476#define WM8993_VMID_RAMP_MASK 0x0060 /* VMID_RAMP - [6:5] */
1477#define WM8993_VMID_RAMP_SHIFT 5 /* VMID_RAMP - [6:5] */
1478#define WM8993_VMID_RAMP_WIDTH 2 /* VMID_RAMP - [6:5] */
1479#define WM8993_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */
1480#define WM8993_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */
1481#define WM8993_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */
1482#define WM8993_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
1483#define WM8993_STARTUP_BIAS_ENA 0x0004 /* STARTUP_BIAS_ENA */
1484#define WM8993_STARTUP_BIAS_ENA_MASK 0x0004 /* STARTUP_BIAS_ENA */
1485#define WM8993_STARTUP_BIAS_ENA_SHIFT 2 /* STARTUP_BIAS_ENA */
1486#define WM8993_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */
1487#define WM8993_BIAS_SRC 0x0002 /* BIAS_SRC */
1488#define WM8993_BIAS_SRC_MASK 0x0002 /* BIAS_SRC */
1489#define WM8993_BIAS_SRC_SHIFT 1 /* BIAS_SRC */
1490#define WM8993_BIAS_SRC_WIDTH 1 /* BIAS_SRC */
1491#define WM8993_VMID_DISCH 0x0001 /* VMID_DISCH */
1492#define WM8993_VMID_DISCH_MASK 0x0001 /* VMID_DISCH */
1493#define WM8993_VMID_DISCH_SHIFT 0 /* VMID_DISCH */
1494#define WM8993_VMID_DISCH_WIDTH 1 /* VMID_DISCH */
1495
1496/*
1497 * R58 (0x3A) - MICBIAS
1498 */
1499#define WM8993_JD_SCTHR_MASK 0x00C0 /* JD_SCTHR - [7:6] */
1500#define WM8993_JD_SCTHR_SHIFT 6 /* JD_SCTHR - [7:6] */
1501#define WM8993_JD_SCTHR_WIDTH 2 /* JD_SCTHR - [7:6] */
1502#define WM8993_JD_THR_MASK 0x0030 /* JD_THR - [5:4] */
1503#define WM8993_JD_THR_SHIFT 4 /* JD_THR - [5:4] */
1504#define WM8993_JD_THR_WIDTH 2 /* JD_THR - [5:4] */
1505#define WM8993_JD_ENA 0x0004 /* JD_ENA */
1506#define WM8993_JD_ENA_MASK 0x0004 /* JD_ENA */
1507#define WM8993_JD_ENA_SHIFT 2 /* JD_ENA */
1508#define WM8993_JD_ENA_WIDTH 1 /* JD_ENA */
1509#define WM8993_MICB2_LVL 0x0002 /* MICB2_LVL */
1510#define WM8993_MICB2_LVL_MASK 0x0002 /* MICB2_LVL */
1511#define WM8993_MICB2_LVL_SHIFT 1 /* MICB2_LVL */
1512#define WM8993_MICB2_LVL_WIDTH 1 /* MICB2_LVL */
1513#define WM8993_MICB1_LVL 0x0001 /* MICB1_LVL */
1514#define WM8993_MICB1_LVL_MASK 0x0001 /* MICB1_LVL */
1515#define WM8993_MICB1_LVL_SHIFT 0 /* MICB1_LVL */
1516#define WM8993_MICB1_LVL_WIDTH 1 /* MICB1_LVL */
1517
1518/*
1519 * R60 (0x3C) - FLL Control 1
1520 */
1521#define WM8993_FLL_FRAC 0x0004 /* FLL_FRAC */
1522#define WM8993_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */
1523#define WM8993_FLL_FRAC_SHIFT 2 /* FLL_FRAC */
1524#define WM8993_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
1525#define WM8993_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */
1526#define WM8993_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */
1527#define WM8993_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */
1528#define WM8993_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
1529#define WM8993_FLL_ENA 0x0001 /* FLL_ENA */
1530#define WM8993_FLL_ENA_MASK 0x0001 /* FLL_ENA */
1531#define WM8993_FLL_ENA_SHIFT 0 /* FLL_ENA */
1532#define WM8993_FLL_ENA_WIDTH 1 /* FLL_ENA */
1533
1534/*
1535 * R61 (0x3D) - FLL Control 2
1536 */
1537#define WM8993_FLL_OUTDIV_MASK 0x0700 /* FLL_OUTDIV - [10:8] */
1538#define WM8993_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [10:8] */
1539#define WM8993_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [10:8] */
1540#define WM8993_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
1541#define WM8993_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
1542#define WM8993_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
1543#define WM8993_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
1544#define WM8993_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
1545#define WM8993_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
1546
1547/*
1548 * R62 (0x3E) - FLL Control 3
1549 */
1550#define WM8993_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
1551#define WM8993_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
1552#define WM8993_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
1553
1554/*
1555 * R63 (0x3F) - FLL Control 4
1556 */
1557#define WM8993_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
1558#define WM8993_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
1559#define WM8993_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
1560#define WM8993_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
1561#define WM8993_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
1562#define WM8993_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
1563
1564/*
1565 * R64 (0x40) - FLL Control 5
1566 */
1567#define WM8993_FLL_FRC_NCO_VAL_MASK 0x1F80 /* FLL_FRC_NCO_VAL - [12:7] */
1568#define WM8993_FLL_FRC_NCO_VAL_SHIFT 7 /* FLL_FRC_NCO_VAL - [12:7] */
1569#define WM8993_FLL_FRC_NCO_VAL_WIDTH 6 /* FLL_FRC_NCO_VAL - [12:7] */
1570#define WM8993_FLL_FRC_NCO 0x0040 /* FLL_FRC_NCO */
1571#define WM8993_FLL_FRC_NCO_MASK 0x0040 /* FLL_FRC_NCO */
1572#define WM8993_FLL_FRC_NCO_SHIFT 6 /* FLL_FRC_NCO */
1573#define WM8993_FLL_FRC_NCO_WIDTH 1 /* FLL_FRC_NCO */
1574#define WM8993_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
1575#define WM8993_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
1576#define WM8993_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
1577#define WM8993_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */
1578#define WM8993_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */
1579#define WM8993_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
1580
1581/*
1582 * R65 (0x41) - Clocking 3
1583 */
1584#define WM8993_CLK_DCS_DIV_MASK 0x3C00 /* CLK_DCS_DIV - [13:10] */
1585#define WM8993_CLK_DCS_DIV_SHIFT 10 /* CLK_DCS_DIV - [13:10] */
1586#define WM8993_CLK_DCS_DIV_WIDTH 4 /* CLK_DCS_DIV - [13:10] */
1587#define WM8993_SAMPLE_RATE_MASK 0x0380 /* SAMPLE_RATE - [9:7] */
1588#define WM8993_SAMPLE_RATE_SHIFT 7 /* SAMPLE_RATE - [9:7] */
1589#define WM8993_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [9:7] */
1590#define WM8993_CLK_SYS_RATE_MASK 0x001E /* CLK_SYS_RATE - [4:1] */
1591#define WM8993_CLK_SYS_RATE_SHIFT 1 /* CLK_SYS_RATE - [4:1] */
1592#define WM8993_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [4:1] */
1593#define WM8993_CLK_DSP_ENA 0x0001 /* CLK_DSP_ENA */
1594#define WM8993_CLK_DSP_ENA_MASK 0x0001 /* CLK_DSP_ENA */
1595#define WM8993_CLK_DSP_ENA_SHIFT 0 /* CLK_DSP_ENA */
1596#define WM8993_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
1597
1598/*
1599 * R66 (0x42) - Clocking 4
1600 */
1601#define WM8993_DAC_DIV4 0x0200 /* DAC_DIV4 */
1602#define WM8993_DAC_DIV4_MASK 0x0200 /* DAC_DIV4 */
1603#define WM8993_DAC_DIV4_SHIFT 9 /* DAC_DIV4 */
1604#define WM8993_DAC_DIV4_WIDTH 1 /* DAC_DIV4 */
1605#define WM8993_CLK_256K_DIV_MASK 0x007E /* CLK_256K_DIV - [6:1] */
1606#define WM8993_CLK_256K_DIV_SHIFT 1 /* CLK_256K_DIV - [6:1] */
1607#define WM8993_CLK_256K_DIV_WIDTH 6 /* CLK_256K_DIV - [6:1] */
1608#define WM8993_SR_MODE 0x0001 /* SR_MODE */
1609#define WM8993_SR_MODE_MASK 0x0001 /* SR_MODE */
1610#define WM8993_SR_MODE_SHIFT 0 /* SR_MODE */
1611#define WM8993_SR_MODE_WIDTH 1 /* SR_MODE */
1612
1613/*
1614 * R67 (0x43) - MW Slave Control
1615 */
1616#define WM8993_MASK_WRITE_ENA 0x0001 /* MASK_WRITE_ENA */
1617#define WM8993_MASK_WRITE_ENA_MASK 0x0001 /* MASK_WRITE_ENA */
1618#define WM8993_MASK_WRITE_ENA_SHIFT 0 /* MASK_WRITE_ENA */
1619#define WM8993_MASK_WRITE_ENA_WIDTH 1 /* MASK_WRITE_ENA */
1620
1621/*
1622 * R69 (0x45) - Bus Control 1
1623 */
1624#define WM8993_CLK_SYS_ENA 0x0002 /* CLK_SYS_ENA */
1625#define WM8993_CLK_SYS_ENA_MASK 0x0002 /* CLK_SYS_ENA */
1626#define WM8993_CLK_SYS_ENA_SHIFT 1 /* CLK_SYS_ENA */
1627#define WM8993_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
1628
1629/*
1630 * R70 (0x46) - Write Sequencer 0
1631 */
1632#define WM8993_WSEQ_ENA 0x0100 /* WSEQ_ENA */
1633#define WM8993_WSEQ_ENA_MASK 0x0100 /* WSEQ_ENA */
1634#define WM8993_WSEQ_ENA_SHIFT 8 /* WSEQ_ENA */
1635#define WM8993_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
1636#define WM8993_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
1637#define WM8993_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
1638#define WM8993_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
1639
1640/*
1641 * R71 (0x47) - Write Sequencer 1
1642 */
1643#define WM8993_WSEQ_DATA_WIDTH_MASK 0x7000 /* WSEQ_DATA_WIDTH - [14:12] */
1644#define WM8993_WSEQ_DATA_WIDTH_SHIFT 12 /* WSEQ_DATA_WIDTH - [14:12] */
1645#define WM8993_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [14:12] */
1646#define WM8993_WSEQ_DATA_START_MASK 0x0F00 /* WSEQ_DATA_START - [11:8] */
1647#define WM8993_WSEQ_DATA_START_SHIFT 8 /* WSEQ_DATA_START - [11:8] */
1648#define WM8993_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [11:8] */
1649#define WM8993_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
1650#define WM8993_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
1651#define WM8993_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
1652
1653/*
1654 * R72 (0x48) - Write Sequencer 2
1655 */
1656#define WM8993_WSEQ_EOS 0x4000 /* WSEQ_EOS */
1657#define WM8993_WSEQ_EOS_MASK 0x4000 /* WSEQ_EOS */
1658#define WM8993_WSEQ_EOS_SHIFT 14 /* WSEQ_EOS */
1659#define WM8993_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
1660#define WM8993_WSEQ_DELAY_MASK 0x0F00 /* WSEQ_DELAY - [11:8] */
1661#define WM8993_WSEQ_DELAY_SHIFT 8 /* WSEQ_DELAY - [11:8] */
1662#define WM8993_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [11:8] */
1663#define WM8993_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
1664#define WM8993_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
1665#define WM8993_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
1666
1667/*
1668 * R73 (0x49) - Write Sequencer 3
1669 */
1670#define WM8993_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
1671#define WM8993_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
1672#define WM8993_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
1673#define WM8993_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
1674#define WM8993_WSEQ_START 0x0100 /* WSEQ_START */
1675#define WM8993_WSEQ_START_MASK 0x0100 /* WSEQ_START */
1676#define WM8993_WSEQ_START_SHIFT 8 /* WSEQ_START */
1677#define WM8993_WSEQ_START_WIDTH 1 /* WSEQ_START */
1678#define WM8993_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
1679#define WM8993_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
1680#define WM8993_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
1681
1682/*
1683 * R74 (0x4A) - Write Sequencer 4
1684 */
1685#define WM8993_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
1686#define WM8993_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
1687#define WM8993_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
1688#define WM8993_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
1689
1690/*
1691 * R75 (0x4B) - Write Sequencer 5
1692 */
1693#define WM8993_WSEQ_CURRENT_INDEX_MASK 0x003F /* WSEQ_CURRENT_INDEX - [5:0] */
1694#define WM8993_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [5:0] */
1695#define WM8993_WSEQ_CURRENT_INDEX_WIDTH 6 /* WSEQ_CURRENT_INDEX - [5:0] */
1696
1697/*
1698 * R76 (0x4C) - Charge Pump 1
1699 */
1700#define WM8993_CP_ENA 0x8000 /* CP_ENA */
1701#define WM8993_CP_ENA_MASK 0x8000 /* CP_ENA */
1702#define WM8993_CP_ENA_SHIFT 15 /* CP_ENA */
1703#define WM8993_CP_ENA_WIDTH 1 /* CP_ENA */
1704
1705/*
1706 * R81 (0x51) - Class W 0
1707 */
1708#define WM8993_CP_DYN_FREQ 0x0002 /* CP_DYN_FREQ */
1709#define WM8993_CP_DYN_FREQ_MASK 0x0002 /* CP_DYN_FREQ */
1710#define WM8993_CP_DYN_FREQ_SHIFT 1 /* CP_DYN_FREQ */
1711#define WM8993_CP_DYN_FREQ_WIDTH 1 /* CP_DYN_FREQ */
1712#define WM8993_CP_DYN_V 0x0001 /* CP_DYN_V */
1713#define WM8993_CP_DYN_V_MASK 0x0001 /* CP_DYN_V */
1714#define WM8993_CP_DYN_V_SHIFT 0 /* CP_DYN_V */
1715#define WM8993_CP_DYN_V_WIDTH 1 /* CP_DYN_V */
1716
1717/*
1718 * R84 (0x54) - DC Servo 0
1719 */
1720#define WM8993_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */
1721#define WM8993_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */
1722#define WM8993_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */
1723#define WM8993_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */
1724#define WM8993_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */
1725#define WM8993_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */
1726#define WM8993_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */
1727#define WM8993_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */
1728#define WM8993_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */
1729#define WM8993_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */
1730#define WM8993_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */
1731#define WM8993_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */
1732#define WM8993_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */
1733#define WM8993_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */
1734#define WM8993_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */
1735#define WM8993_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */
1736#define WM8993_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */
1737#define WM8993_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */
1738#define WM8993_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */
1739#define WM8993_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */
1740#define WM8993_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */
1741#define WM8993_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */
1742#define WM8993_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */
1743#define WM8993_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */
1744#define WM8993_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */
1745#define WM8993_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */
1746#define WM8993_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */
1747#define WM8993_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */
1748#define WM8993_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */
1749#define WM8993_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */
1750#define WM8993_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */
1751#define WM8993_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */
1752#define WM8993_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */
1753#define WM8993_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */
1754#define WM8993_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */
1755#define WM8993_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */
1756#define WM8993_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */
1757#define WM8993_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */
1758#define WM8993_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */
1759#define WM8993_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */
1760
1761/*
1762 * R85 (0x55) - DC Servo 1
1763 */
1764#define WM8993_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */
1765#define WM8993_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */
1766#define WM8993_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */
1767#define WM8993_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */
1768#define WM8993_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */
1769#define WM8993_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */
1770
1771/*
1772 * R87 (0x57) - DC Servo 3
1773 */
1774#define WM8993_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */
1775#define WM8993_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
1776#define WM8993_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
1777#define WM8993_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */
1778#define WM8993_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */
1779#define WM8993_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */
1780
1781/*
1782 * R88 (0x58) - DC Servo Readback 0
1783 */
1784#define WM8993_DCS_DATAPATH_BUSY 0x4000 /* DCS_DATAPATH_BUSY */
1785#define WM8993_DCS_DATAPATH_BUSY_MASK 0x4000 /* DCS_DATAPATH_BUSY */
1786#define WM8993_DCS_DATAPATH_BUSY_SHIFT 14 /* DCS_DATAPATH_BUSY */
1787#define WM8993_DCS_DATAPATH_BUSY_WIDTH 1 /* DCS_DATAPATH_BUSY */
1788#define WM8993_DCS_CHANNEL_MASK 0x3000 /* DCS_CHANNEL - [13:12] */
1789#define WM8993_DCS_CHANNEL_SHIFT 12 /* DCS_CHANNEL - [13:12] */
1790#define WM8993_DCS_CHANNEL_WIDTH 2 /* DCS_CHANNEL - [13:12] */
1791#define WM8993_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */
1792#define WM8993_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */
1793#define WM8993_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */
1794#define WM8993_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */
1795#define WM8993_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */
1796#define WM8993_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */
1797#define WM8993_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */
1798#define WM8993_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */
1799#define WM8993_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */
1800
1801/*
1802 * R89 (0x59) - DC Servo Readback 1
1803 */
1804#define WM8993_DCS_INTEG_CHAN_1_MASK 0x00FF /* DCS_INTEG_CHAN_1 - [7:0] */
1805#define WM8993_DCS_INTEG_CHAN_1_SHIFT 0 /* DCS_INTEG_CHAN_1 - [7:0] */
1806#define WM8993_DCS_INTEG_CHAN_1_WIDTH 8 /* DCS_INTEG_CHAN_1 - [7:0] */
1807
1808/*
1809 * R90 (0x5A) - DC Servo Readback 2
1810 */
1811#define WM8993_DCS_INTEG_CHAN_0_MASK 0x00FF /* DCS_INTEG_CHAN_0 - [7:0] */
1812#define WM8993_DCS_INTEG_CHAN_0_SHIFT 0 /* DCS_INTEG_CHAN_0 - [7:0] */
1813#define WM8993_DCS_INTEG_CHAN_0_WIDTH 8 /* DCS_INTEG_CHAN_0 - [7:0] */
1814
1815/*
1816 * R96 (0x60) - Analogue HP 0
1817 */
1818#define WM8993_HPOUT1_AUTO_PU 0x0100 /* HPOUT1_AUTO_PU */
1819#define WM8993_HPOUT1_AUTO_PU_MASK 0x0100 /* HPOUT1_AUTO_PU */
1820#define WM8993_HPOUT1_AUTO_PU_SHIFT 8 /* HPOUT1_AUTO_PU */
1821#define WM8993_HPOUT1_AUTO_PU_WIDTH 1 /* HPOUT1_AUTO_PU */
1822#define WM8993_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */
1823#define WM8993_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */
1824#define WM8993_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */
1825#define WM8993_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */
1826#define WM8993_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */
1827#define WM8993_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */
1828#define WM8993_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */
1829#define WM8993_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */
1830#define WM8993_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */
1831#define WM8993_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */
1832#define WM8993_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */
1833#define WM8993_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */
1834#define WM8993_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */
1835#define WM8993_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */
1836#define WM8993_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */
1837#define WM8993_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */
1838#define WM8993_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */
1839#define WM8993_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */
1840#define WM8993_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */
1841#define WM8993_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */
1842#define WM8993_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */
1843#define WM8993_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */
1844#define WM8993_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */
1845#define WM8993_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */
1846
1847/*
1848 * R98 (0x62) - EQ1
1849 */
1850#define WM8993_EQ_ENA 0x0001 /* EQ_ENA */
1851#define WM8993_EQ_ENA_MASK 0x0001 /* EQ_ENA */
1852#define WM8993_EQ_ENA_SHIFT 0 /* EQ_ENA */
1853#define WM8993_EQ_ENA_WIDTH 1 /* EQ_ENA */
1854
1855/*
1856 * R99 (0x63) - EQ2
1857 */
1858#define WM8993_EQ_B1_GAIN_MASK 0x001F /* EQ_B1_GAIN - [4:0] */
1859#define WM8993_EQ_B1_GAIN_SHIFT 0 /* EQ_B1_GAIN - [4:0] */
1860#define WM8993_EQ_B1_GAIN_WIDTH 5 /* EQ_B1_GAIN - [4:0] */
1861
1862/*
1863 * R100 (0x64) - EQ3
1864 */
1865#define WM8993_EQ_B2_GAIN_MASK 0x001F /* EQ_B2_GAIN - [4:0] */
1866#define WM8993_EQ_B2_GAIN_SHIFT 0 /* EQ_B2_GAIN - [4:0] */
1867#define WM8993_EQ_B2_GAIN_WIDTH 5 /* EQ_B2_GAIN - [4:0] */
1868
1869/*
1870 * R101 (0x65) - EQ4
1871 */
1872#define WM8993_EQ_B3_GAIN_MASK 0x001F /* EQ_B3_GAIN - [4:0] */
1873#define WM8993_EQ_B3_GAIN_SHIFT 0 /* EQ_B3_GAIN - [4:0] */
1874#define WM8993_EQ_B3_GAIN_WIDTH 5 /* EQ_B3_GAIN - [4:0] */
1875
1876/*
1877 * R102 (0x66) - EQ5
1878 */
1879#define WM8993_EQ_B4_GAIN_MASK 0x001F /* EQ_B4_GAIN - [4:0] */
1880#define WM8993_EQ_B4_GAIN_SHIFT 0 /* EQ_B4_GAIN - [4:0] */
1881#define WM8993_EQ_B4_GAIN_WIDTH 5 /* EQ_B4_GAIN - [4:0] */
1882
1883/*
1884 * R103 (0x67) - EQ6
1885 */
1886#define WM8993_EQ_B5_GAIN_MASK 0x001F /* EQ_B5_GAIN - [4:0] */
1887#define WM8993_EQ_B5_GAIN_SHIFT 0 /* EQ_B5_GAIN - [4:0] */
1888#define WM8993_EQ_B5_GAIN_WIDTH 5 /* EQ_B5_GAIN - [4:0] */
1889
1890/*
1891 * R104 (0x68) - EQ7
1892 */
1893#define WM8993_EQ_B1_A_MASK 0xFFFF /* EQ_B1_A - [15:0] */
1894#define WM8993_EQ_B1_A_SHIFT 0 /* EQ_B1_A - [15:0] */
1895#define WM8993_EQ_B1_A_WIDTH 16 /* EQ_B1_A - [15:0] */
1896
1897/*
1898 * R105 (0x69) - EQ8
1899 */
1900#define WM8993_EQ_B1_B_MASK 0xFFFF /* EQ_B1_B - [15:0] */
1901#define WM8993_EQ_B1_B_SHIFT 0 /* EQ_B1_B - [15:0] */
1902#define WM8993_EQ_B1_B_WIDTH 16 /* EQ_B1_B - [15:0] */
1903
1904/*
1905 * R106 (0x6A) - EQ9
1906 */
1907#define WM8993_EQ_B1_PG_MASK 0xFFFF /* EQ_B1_PG - [15:0] */
1908#define WM8993_EQ_B1_PG_SHIFT 0 /* EQ_B1_PG - [15:0] */
1909#define WM8993_EQ_B1_PG_WIDTH 16 /* EQ_B1_PG - [15:0] */
1910
1911/*
1912 * R107 (0x6B) - EQ10
1913 */
1914#define WM8993_EQ_B2_A_MASK 0xFFFF /* EQ_B2_A - [15:0] */
1915#define WM8993_EQ_B2_A_SHIFT 0 /* EQ_B2_A - [15:0] */
1916#define WM8993_EQ_B2_A_WIDTH 16 /* EQ_B2_A - [15:0] */
1917
1918/*
1919 * R108 (0x6C) - EQ11
1920 */
1921#define WM8993_EQ_B2_B_MASK 0xFFFF /* EQ_B2_B - [15:0] */
1922#define WM8993_EQ_B2_B_SHIFT 0 /* EQ_B2_B - [15:0] */
1923#define WM8993_EQ_B2_B_WIDTH 16 /* EQ_B2_B - [15:0] */
1924
1925/*
1926 * R109 (0x6D) - EQ12
1927 */
1928#define WM8993_EQ_B2_C_MASK 0xFFFF /* EQ_B2_C - [15:0] */
1929#define WM8993_EQ_B2_C_SHIFT 0 /* EQ_B2_C - [15:0] */
1930#define WM8993_EQ_B2_C_WIDTH 16 /* EQ_B2_C - [15:0] */
1931
1932/*
1933 * R110 (0x6E) - EQ13
1934 */
1935#define WM8993_EQ_B2_PG_MASK 0xFFFF /* EQ_B2_PG - [15:0] */
1936#define WM8993_EQ_B2_PG_SHIFT 0 /* EQ_B2_PG - [15:0] */
1937#define WM8993_EQ_B2_PG_WIDTH 16 /* EQ_B2_PG - [15:0] */
1938
1939/*
1940 * R111 (0x6F) - EQ14
1941 */
1942#define WM8993_EQ_B3_A_MASK 0xFFFF /* EQ_B3_A - [15:0] */
1943#define WM8993_EQ_B3_A_SHIFT 0 /* EQ_B3_A - [15:0] */
1944#define WM8993_EQ_B3_A_WIDTH 16 /* EQ_B3_A - [15:0] */
1945
1946/*
1947 * R112 (0x70) - EQ15
1948 */
1949#define WM8993_EQ_B3_B_MASK 0xFFFF /* EQ_B3_B - [15:0] */
1950#define WM8993_EQ_B3_B_SHIFT 0 /* EQ_B3_B - [15:0] */
1951#define WM8993_EQ_B3_B_WIDTH 16 /* EQ_B3_B - [15:0] */
1952
1953/*
1954 * R113 (0x71) - EQ16
1955 */
1956#define WM8993_EQ_B3_C_MASK 0xFFFF /* EQ_B3_C - [15:0] */
1957#define WM8993_EQ_B3_C_SHIFT 0 /* EQ_B3_C - [15:0] */
1958#define WM8993_EQ_B3_C_WIDTH 16 /* EQ_B3_C - [15:0] */
1959
1960/*
1961 * R114 (0x72) - EQ17
1962 */
1963#define WM8993_EQ_B3_PG_MASK 0xFFFF /* EQ_B3_PG - [15:0] */
1964#define WM8993_EQ_B3_PG_SHIFT 0 /* EQ_B3_PG - [15:0] */
1965#define WM8993_EQ_B3_PG_WIDTH 16 /* EQ_B3_PG - [15:0] */
1966
1967/*
1968 * R115 (0x73) - EQ18
1969 */
1970#define WM8993_EQ_B4_A_MASK 0xFFFF /* EQ_B4_A - [15:0] */
1971#define WM8993_EQ_B4_A_SHIFT 0 /* EQ_B4_A - [15:0] */
1972#define WM8993_EQ_B4_A_WIDTH 16 /* EQ_B4_A - [15:0] */
1973
1974/*
1975 * R116 (0x74) - EQ19
1976 */
1977#define WM8993_EQ_B4_B_MASK 0xFFFF /* EQ_B4_B - [15:0] */
1978#define WM8993_EQ_B4_B_SHIFT 0 /* EQ_B4_B - [15:0] */
1979#define WM8993_EQ_B4_B_WIDTH 16 /* EQ_B4_B - [15:0] */
1980
1981/*
1982 * R117 (0x75) - EQ20
1983 */
1984#define WM8993_EQ_B4_C_MASK 0xFFFF /* EQ_B4_C - [15:0] */
1985#define WM8993_EQ_B4_C_SHIFT 0 /* EQ_B4_C - [15:0] */
1986#define WM8993_EQ_B4_C_WIDTH 16 /* EQ_B4_C - [15:0] */
1987
1988/*
1989 * R118 (0x76) - EQ21
1990 */
1991#define WM8993_EQ_B4_PG_MASK 0xFFFF /* EQ_B4_PG - [15:0] */
1992#define WM8993_EQ_B4_PG_SHIFT 0 /* EQ_B4_PG - [15:0] */
1993#define WM8993_EQ_B4_PG_WIDTH 16 /* EQ_B4_PG - [15:0] */
1994
1995/*
1996 * R119 (0x77) - EQ22
1997 */
1998#define WM8993_EQ_B5_A_MASK 0xFFFF /* EQ_B5_A - [15:0] */
1999#define WM8993_EQ_B5_A_SHIFT 0 /* EQ_B5_A - [15:0] */
2000#define WM8993_EQ_B5_A_WIDTH 16 /* EQ_B5_A - [15:0] */
2001
2002/*
2003 * R120 (0x78) - EQ23
2004 */
2005#define WM8993_EQ_B5_B_MASK 0xFFFF /* EQ_B5_B - [15:0] */
2006#define WM8993_EQ_B5_B_SHIFT 0 /* EQ_B5_B - [15:0] */
2007#define WM8993_EQ_B5_B_WIDTH 16 /* EQ_B5_B - [15:0] */
2008
2009/*
2010 * R121 (0x79) - EQ24
2011 */
2012#define WM8993_EQ_B5_PG_MASK 0xFFFF /* EQ_B5_PG - [15:0] */
2013#define WM8993_EQ_B5_PG_SHIFT 0 /* EQ_B5_PG - [15:0] */
2014#define WM8993_EQ_B5_PG_WIDTH 16 /* EQ_B5_PG - [15:0] */
2015
2016/*
2017 * R122 (0x7A) - Digital Pulls
2018 */
2019#define WM8993_MCLK_PU 0x0080 /* MCLK_PU */
2020#define WM8993_MCLK_PU_MASK 0x0080 /* MCLK_PU */
2021#define WM8993_MCLK_PU_SHIFT 7 /* MCLK_PU */
2022#define WM8993_MCLK_PU_WIDTH 1 /* MCLK_PU */
2023#define WM8993_MCLK_PD 0x0040 /* MCLK_PD */
2024#define WM8993_MCLK_PD_MASK 0x0040 /* MCLK_PD */
2025#define WM8993_MCLK_PD_SHIFT 6 /* MCLK_PD */
2026#define WM8993_MCLK_PD_WIDTH 1 /* MCLK_PD */
2027#define WM8993_DACDAT_PU 0x0020 /* DACDAT_PU */
2028#define WM8993_DACDAT_PU_MASK 0x0020 /* DACDAT_PU */
2029#define WM8993_DACDAT_PU_SHIFT 5 /* DACDAT_PU */
2030#define WM8993_DACDAT_PU_WIDTH 1 /* DACDAT_PU */
2031#define WM8993_DACDAT_PD 0x0010 /* DACDAT_PD */
2032#define WM8993_DACDAT_PD_MASK 0x0010 /* DACDAT_PD */
2033#define WM8993_DACDAT_PD_SHIFT 4 /* DACDAT_PD */
2034#define WM8993_DACDAT_PD_WIDTH 1 /* DACDAT_PD */
2035#define WM8993_LRCLK_PU 0x0008 /* LRCLK_PU */
2036#define WM8993_LRCLK_PU_MASK 0x0008 /* LRCLK_PU */
2037#define WM8993_LRCLK_PU_SHIFT 3 /* LRCLK_PU */
2038#define WM8993_LRCLK_PU_WIDTH 1 /* LRCLK_PU */
2039#define WM8993_LRCLK_PD 0x0004 /* LRCLK_PD */
2040#define WM8993_LRCLK_PD_MASK 0x0004 /* LRCLK_PD */
2041#define WM8993_LRCLK_PD_SHIFT 2 /* LRCLK_PD */
2042#define WM8993_LRCLK_PD_WIDTH 1 /* LRCLK_PD */
2043#define WM8993_BCLK_PU 0x0002 /* BCLK_PU */
2044#define WM8993_BCLK_PU_MASK 0x0002 /* BCLK_PU */
2045#define WM8993_BCLK_PU_SHIFT 1 /* BCLK_PU */
2046#define WM8993_BCLK_PU_WIDTH 1 /* BCLK_PU */
2047#define WM8993_BCLK_PD 0x0001 /* BCLK_PD */
2048#define WM8993_BCLK_PD_MASK 0x0001 /* BCLK_PD */
2049#define WM8993_BCLK_PD_SHIFT 0 /* BCLK_PD */
2050#define WM8993_BCLK_PD_WIDTH 1 /* BCLK_PD */
2051
2052/*
2053 * R123 (0x7B) - DRC Control 1
2054 */
2055#define WM8993_DRC_ENA 0x8000 /* DRC_ENA */
2056#define WM8993_DRC_ENA_MASK 0x8000 /* DRC_ENA */
2057#define WM8993_DRC_ENA_SHIFT 15 /* DRC_ENA */
2058#define WM8993_DRC_ENA_WIDTH 1 /* DRC_ENA */
2059#define WM8993_DRC_DAC_PATH 0x4000 /* DRC_DAC_PATH */
2060#define WM8993_DRC_DAC_PATH_MASK 0x4000 /* DRC_DAC_PATH */
2061#define WM8993_DRC_DAC_PATH_SHIFT 14 /* DRC_DAC_PATH */
2062#define WM8993_DRC_DAC_PATH_WIDTH 1 /* DRC_DAC_PATH */
2063#define WM8993_DRC_SMOOTH_ENA 0x0800 /* DRC_SMOOTH_ENA */
2064#define WM8993_DRC_SMOOTH_ENA_MASK 0x0800 /* DRC_SMOOTH_ENA */
2065#define WM8993_DRC_SMOOTH_ENA_SHIFT 11 /* DRC_SMOOTH_ENA */
2066#define WM8993_DRC_SMOOTH_ENA_WIDTH 1 /* DRC_SMOOTH_ENA */
2067#define WM8993_DRC_QR_ENA 0x0400 /* DRC_QR_ENA */
2068#define WM8993_DRC_QR_ENA_MASK 0x0400 /* DRC_QR_ENA */
2069#define WM8993_DRC_QR_ENA_SHIFT 10 /* DRC_QR_ENA */
2070#define WM8993_DRC_QR_ENA_WIDTH 1 /* DRC_QR_ENA */
2071#define WM8993_DRC_ANTICLIP_ENA 0x0200 /* DRC_ANTICLIP_ENA */
2072#define WM8993_DRC_ANTICLIP_ENA_MASK 0x0200 /* DRC_ANTICLIP_ENA */
2073#define WM8993_DRC_ANTICLIP_ENA_SHIFT 9 /* DRC_ANTICLIP_ENA */
2074#define WM8993_DRC_ANTICLIP_ENA_WIDTH 1 /* DRC_ANTICLIP_ENA */
2075#define WM8993_DRC_HYST_ENA 0x0100 /* DRC_HYST_ENA */
2076#define WM8993_DRC_HYST_ENA_MASK 0x0100 /* DRC_HYST_ENA */
2077#define WM8993_DRC_HYST_ENA_SHIFT 8 /* DRC_HYST_ENA */
2078#define WM8993_DRC_HYST_ENA_WIDTH 1 /* DRC_HYST_ENA */
2079#define WM8993_DRC_THRESH_HYST_MASK 0x0030 /* DRC_THRESH_HYST - [5:4] */
2080#define WM8993_DRC_THRESH_HYST_SHIFT 4 /* DRC_THRESH_HYST - [5:4] */
2081#define WM8993_DRC_THRESH_HYST_WIDTH 2 /* DRC_THRESH_HYST - [5:4] */
2082#define WM8993_DRC_MINGAIN_MASK 0x000C /* DRC_MINGAIN - [3:2] */
2083#define WM8993_DRC_MINGAIN_SHIFT 2 /* DRC_MINGAIN - [3:2] */
2084#define WM8993_DRC_MINGAIN_WIDTH 2 /* DRC_MINGAIN - [3:2] */
2085#define WM8993_DRC_MAXGAIN_MASK 0x0003 /* DRC_MAXGAIN - [1:0] */
2086#define WM8993_DRC_MAXGAIN_SHIFT 0 /* DRC_MAXGAIN - [1:0] */
2087#define WM8993_DRC_MAXGAIN_WIDTH 2 /* DRC_MAXGAIN - [1:0] */
2088
2089/*
2090 * R124 (0x7C) - DRC Control 2
2091 */
2092#define WM8993_DRC_ATTACK_RATE_MASK 0xF000 /* DRC_ATTACK_RATE - [15:12] */
2093#define WM8993_DRC_ATTACK_RATE_SHIFT 12 /* DRC_ATTACK_RATE - [15:12] */
2094#define WM8993_DRC_ATTACK_RATE_WIDTH 4 /* DRC_ATTACK_RATE - [15:12] */
2095#define WM8993_DRC_DECAY_RATE_MASK 0x0F00 /* DRC_DECAY_RATE - [11:8] */
2096#define WM8993_DRC_DECAY_RATE_SHIFT 8 /* DRC_DECAY_RATE - [11:8] */
2097#define WM8993_DRC_DECAY_RATE_WIDTH 4 /* DRC_DECAY_RATE - [11:8] */
2098#define WM8993_DRC_THRESH_COMP_MASK 0x00FC /* DRC_THRESH_COMP - [7:2] */
2099#define WM8993_DRC_THRESH_COMP_SHIFT 2 /* DRC_THRESH_COMP - [7:2] */
2100#define WM8993_DRC_THRESH_COMP_WIDTH 6 /* DRC_THRESH_COMP - [7:2] */
2101
2102/*
2103 * R125 (0x7D) - DRC Control 3
2104 */
2105#define WM8993_DRC_AMP_COMP_MASK 0xF800 /* DRC_AMP_COMP - [15:11] */
2106#define WM8993_DRC_AMP_COMP_SHIFT 11 /* DRC_AMP_COMP - [15:11] */
2107#define WM8993_DRC_AMP_COMP_WIDTH 5 /* DRC_AMP_COMP - [15:11] */
2108#define WM8993_DRC_R0_SLOPE_COMP_MASK 0x0700 /* DRC_R0_SLOPE_COMP - [10:8] */
2109#define WM8993_DRC_R0_SLOPE_COMP_SHIFT 8 /* DRC_R0_SLOPE_COMP - [10:8] */
2110#define WM8993_DRC_R0_SLOPE_COMP_WIDTH 3 /* DRC_R0_SLOPE_COMP - [10:8] */
2111#define WM8993_DRC_FF_DELAY 0x0080 /* DRC_FF_DELAY */
2112#define WM8993_DRC_FF_DELAY_MASK 0x0080 /* DRC_FF_DELAY */
2113#define WM8993_DRC_FF_DELAY_SHIFT 7 /* DRC_FF_DELAY */
2114#define WM8993_DRC_FF_DELAY_WIDTH 1 /* DRC_FF_DELAY */
2115#define WM8993_DRC_THRESH_QR_MASK 0x000C /* DRC_THRESH_QR - [3:2] */
2116#define WM8993_DRC_THRESH_QR_SHIFT 2 /* DRC_THRESH_QR - [3:2] */
2117#define WM8993_DRC_THRESH_QR_WIDTH 2 /* DRC_THRESH_QR - [3:2] */
2118#define WM8993_DRC_RATE_QR_MASK 0x0003 /* DRC_RATE_QR - [1:0] */
2119#define WM8993_DRC_RATE_QR_SHIFT 0 /* DRC_RATE_QR - [1:0] */
2120#define WM8993_DRC_RATE_QR_WIDTH 2 /* DRC_RATE_QR - [1:0] */
2121
2122/*
2123 * R126 (0x7E) - DRC Control 4
2124 */
2125#define WM8993_DRC_R1_SLOPE_COMP_MASK 0xE000 /* DRC_R1_SLOPE_COMP - [15:13] */
2126#define WM8993_DRC_R1_SLOPE_COMP_SHIFT 13 /* DRC_R1_SLOPE_COMP - [15:13] */
2127#define WM8993_DRC_R1_SLOPE_COMP_WIDTH 3 /* DRC_R1_SLOPE_COMP - [15:13] */
2128#define WM8993_DRC_STARTUP_GAIN_MASK 0x1F00 /* DRC_STARTUP_GAIN - [12:8] */
2129#define WM8993_DRC_STARTUP_GAIN_SHIFT 8 /* DRC_STARTUP_GAIN - [12:8] */
2130#define WM8993_DRC_STARTUP_GAIN_WIDTH 5 /* DRC_STARTUP_GAIN - [12:8] */
2131
2132#endif
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 86fc57e25f97..c64e55aa63b6 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -165,87 +165,23 @@ struct wm9081_priv {
165 int master; 165 int master;
166 int fll_fref; 166 int fll_fref;
167 int fll_fout; 167 int fll_fout;
168 int tdm_width;
168 struct wm9081_retune_mobile_config *retune; 169 struct wm9081_retune_mobile_config *retune;
169}; 170};
170 171
171static int wm9081_reg_is_volatile(int reg) 172static int wm9081_volatile_register(unsigned int reg)
172{ 173{
173 switch (reg) { 174 switch (reg) {
175 case WM9081_SOFTWARE_RESET:
176 return 1;
174 default: 177 default:
175 return 0; 178 return 0;
176 } 179 }
177} 180}
178 181
179static unsigned int wm9081_read_reg_cache(struct snd_soc_codec *codec,
180 unsigned int reg)
181{
182 u16 *cache = codec->reg_cache;
183 BUG_ON(reg > WM9081_MAX_REGISTER);
184 return cache[reg];
185}
186
187static unsigned int wm9081_read_hw(struct snd_soc_codec *codec, u8 reg)
188{
189 struct i2c_msg xfer[2];
190 u16 data;
191 int ret;
192 struct i2c_client *client = codec->control_data;
193
194 BUG_ON(reg > WM9081_MAX_REGISTER);
195
196 /* Write register */
197 xfer[0].addr = client->addr;
198 xfer[0].flags = 0;
199 xfer[0].len = 1;
200 xfer[0].buf = &reg;
201
202 /* Read data */
203 xfer[1].addr = client->addr;
204 xfer[1].flags = I2C_M_RD;
205 xfer[1].len = 2;
206 xfer[1].buf = (u8 *)&data;
207
208 ret = i2c_transfer(client->adapter, xfer, 2);
209 if (ret != 2) {
210 dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
211 return 0;
212 }
213
214 return (data >> 8) | ((data & 0xff) << 8);
215}
216
217static unsigned int wm9081_read(struct snd_soc_codec *codec, unsigned int reg)
218{
219 if (wm9081_reg_is_volatile(reg))
220 return wm9081_read_hw(codec, reg);
221 else
222 return wm9081_read_reg_cache(codec, reg);
223}
224
225static int wm9081_write(struct snd_soc_codec *codec, unsigned int reg,
226 unsigned int value)
227{
228 u16 *cache = codec->reg_cache;
229 u8 data[3];
230
231 BUG_ON(reg > WM9081_MAX_REGISTER);
232
233 if (!wm9081_reg_is_volatile(reg))
234 cache[reg] = value;
235
236 data[0] = reg;
237 data[1] = value >> 8;
238 data[2] = value & 0x00ff;
239
240 if (codec->hw_write(codec->control_data, data, 3) == 3)
241 return 0;
242 else
243 return -EIO;
244}
245
246static int wm9081_reset(struct snd_soc_codec *codec) 182static int wm9081_reset(struct snd_soc_codec *codec)
247{ 183{
248 return wm9081_write(codec, WM9081_SOFTWARE_RESET, 0); 184 return snd_soc_write(codec, WM9081_SOFTWARE_RESET, 0);
249} 185}
250 186
251static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0); 187static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
@@ -356,7 +292,7 @@ static int speaker_mode_get(struct snd_kcontrol *kcontrol,
356 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 292 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
357 unsigned int reg; 293 unsigned int reg;
358 294
359 reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2); 295 reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
360 if (reg & WM9081_SPK_MODE) 296 if (reg & WM9081_SPK_MODE)
361 ucontrol->value.integer.value[0] = 1; 297 ucontrol->value.integer.value[0] = 1;
362 else 298 else
@@ -375,8 +311,8 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
375 struct snd_ctl_elem_value *ucontrol) 311 struct snd_ctl_elem_value *ucontrol)
376{ 312{
377 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 313 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
378 unsigned int reg_pwr = wm9081_read(codec, WM9081_POWER_MANAGEMENT); 314 unsigned int reg_pwr = snd_soc_read(codec, WM9081_POWER_MANAGEMENT);
379 unsigned int reg2 = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_2); 315 unsigned int reg2 = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2);
380 316
381 /* Are we changing anything? */ 317 /* Are we changing anything? */
382 if (ucontrol->value.integer.value[0] == 318 if (ucontrol->value.integer.value[0] ==
@@ -397,7 +333,7 @@ static int speaker_mode_put(struct snd_kcontrol *kcontrol,
397 reg2 &= ~WM9081_SPK_MODE; 333 reg2 &= ~WM9081_SPK_MODE;
398 } 334 }
399 335
400 wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2); 336 snd_soc_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2);
401 337
402 return 0; 338 return 0;
403} 339}
@@ -456,7 +392,7 @@ static int speaker_event(struct snd_soc_dapm_widget *w,
456 struct snd_kcontrol *kcontrol, int event) 392 struct snd_kcontrol *kcontrol, int event)
457{ 393{
458 struct snd_soc_codec *codec = w->codec; 394 struct snd_soc_codec *codec = w->codec;
459 unsigned int reg = wm9081_read(codec, WM9081_POWER_MANAGEMENT); 395 unsigned int reg = snd_soc_read(codec, WM9081_POWER_MANAGEMENT);
460 396
461 switch (event) { 397 switch (event) {
462 case SND_SOC_DAPM_POST_PMU: 398 case SND_SOC_DAPM_POST_PMU:
@@ -468,7 +404,7 @@ static int speaker_event(struct snd_soc_dapm_widget *w,
468 break; 404 break;
469 } 405 }
470 406
471 wm9081_write(codec, WM9081_POWER_MANAGEMENT, reg); 407 snd_soc_write(codec, WM9081_POWER_MANAGEMENT, reg);
472 408
473 return 0; 409 return 0;
474} 410}
@@ -607,7 +543,7 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
607 if (ret != 0) 543 if (ret != 0)
608 return ret; 544 return ret;
609 545
610 reg5 = wm9081_read(codec, WM9081_FLL_CONTROL_5); 546 reg5 = snd_soc_read(codec, WM9081_FLL_CONTROL_5);
611 reg5 &= ~WM9081_FLL_CLK_SRC_MASK; 547 reg5 &= ~WM9081_FLL_CLK_SRC_MASK;
612 548
613 switch (fll_id) { 549 switch (fll_id) {
@@ -621,44 +557,44 @@ static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id,
621 } 557 }
622 558
623 /* Disable CLK_SYS while we reconfigure */ 559 /* Disable CLK_SYS while we reconfigure */
624 clk_sys_reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3); 560 clk_sys_reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3);
625 if (clk_sys_reg & WM9081_CLK_SYS_ENA) 561 if (clk_sys_reg & WM9081_CLK_SYS_ENA)
626 wm9081_write(codec, WM9081_CLOCK_CONTROL_3, 562 snd_soc_write(codec, WM9081_CLOCK_CONTROL_3,
627 clk_sys_reg & ~WM9081_CLK_SYS_ENA); 563 clk_sys_reg & ~WM9081_CLK_SYS_ENA);
628 564
629 /* Any FLL configuration change requires that the FLL be 565 /* Any FLL configuration change requires that the FLL be
630 * disabled first. */ 566 * disabled first. */
631 reg1 = wm9081_read(codec, WM9081_FLL_CONTROL_1); 567 reg1 = snd_soc_read(codec, WM9081_FLL_CONTROL_1);
632 reg1 &= ~WM9081_FLL_ENA; 568 reg1 &= ~WM9081_FLL_ENA;
633 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1); 569 snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1);
634 570
635 /* Apply the configuration */ 571 /* Apply the configuration */
636 if (fll_div.k) 572 if (fll_div.k)
637 reg1 |= WM9081_FLL_FRAC_MASK; 573 reg1 |= WM9081_FLL_FRAC_MASK;
638 else 574 else
639 reg1 &= ~WM9081_FLL_FRAC_MASK; 575 reg1 &= ~WM9081_FLL_FRAC_MASK;
640 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1); 576 snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1);
641 577
642 wm9081_write(codec, WM9081_FLL_CONTROL_2, 578 snd_soc_write(codec, WM9081_FLL_CONTROL_2,
643 (fll_div.fll_outdiv << WM9081_FLL_OUTDIV_SHIFT) | 579 (fll_div.fll_outdiv << WM9081_FLL_OUTDIV_SHIFT) |
644 (fll_div.fll_fratio << WM9081_FLL_FRATIO_SHIFT)); 580 (fll_div.fll_fratio << WM9081_FLL_FRATIO_SHIFT));
645 wm9081_write(codec, WM9081_FLL_CONTROL_3, fll_div.k); 581 snd_soc_write(codec, WM9081_FLL_CONTROL_3, fll_div.k);
646 582
647 reg4 = wm9081_read(codec, WM9081_FLL_CONTROL_4); 583 reg4 = snd_soc_read(codec, WM9081_FLL_CONTROL_4);
648 reg4 &= ~WM9081_FLL_N_MASK; 584 reg4 &= ~WM9081_FLL_N_MASK;
649 reg4 |= fll_div.n << WM9081_FLL_N_SHIFT; 585 reg4 |= fll_div.n << WM9081_FLL_N_SHIFT;
650 wm9081_write(codec, WM9081_FLL_CONTROL_4, reg4); 586 snd_soc_write(codec, WM9081_FLL_CONTROL_4, reg4);
651 587
652 reg5 &= ~WM9081_FLL_CLK_REF_DIV_MASK; 588 reg5 &= ~WM9081_FLL_CLK_REF_DIV_MASK;
653 reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT; 589 reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
654 wm9081_write(codec, WM9081_FLL_CONTROL_5, reg5); 590 snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5);
655 591
656 /* Enable the FLL */ 592 /* Enable the FLL */
657 wm9081_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA); 593 snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
658 594
659 /* Then bring CLK_SYS up again if it was disabled */ 595 /* Then bring CLK_SYS up again if it was disabled */
660 if (clk_sys_reg & WM9081_CLK_SYS_ENA) 596 if (clk_sys_reg & WM9081_CLK_SYS_ENA)
661 wm9081_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg); 597 snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg);
662 598
663 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout); 599 dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
664 600
@@ -707,6 +643,10 @@ static int configure_clock(struct snd_soc_codec *codec)
707 target > 3000000) 643 target > 3000000)
708 break; 644 break;
709 } 645 }
646
647 if (i == ARRAY_SIZE(clk_sys_rates))
648 return -EINVAL;
649
710 } else if (wm9081->fs) { 650 } else if (wm9081->fs) {
711 for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) { 651 for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) {
712 new_sysclk = clk_sys_rates[i].ratio 652 new_sysclk = clk_sys_rates[i].ratio
@@ -714,6 +654,10 @@ static int configure_clock(struct snd_soc_codec *codec)
714 if (new_sysclk > 3000000) 654 if (new_sysclk > 3000000)
715 break; 655 break;
716 } 656 }
657
658 if (i == ARRAY_SIZE(clk_sys_rates))
659 return -EINVAL;
660
717 } else { 661 } else {
718 new_sysclk = 12288000; 662 new_sysclk = 12288000;
719 } 663 }
@@ -734,19 +678,19 @@ static int configure_clock(struct snd_soc_codec *codec)
734 return -EINVAL; 678 return -EINVAL;
735 } 679 }
736 680
737 reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_1); 681 reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_1);
738 if (mclkdiv) 682 if (mclkdiv)
739 reg |= WM9081_MCLKDIV2; 683 reg |= WM9081_MCLKDIV2;
740 else 684 else
741 reg &= ~WM9081_MCLKDIV2; 685 reg &= ~WM9081_MCLKDIV2;
742 wm9081_write(codec, WM9081_CLOCK_CONTROL_1, reg); 686 snd_soc_write(codec, WM9081_CLOCK_CONTROL_1, reg);
743 687
744 reg = wm9081_read(codec, WM9081_CLOCK_CONTROL_3); 688 reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3);
745 if (fll) 689 if (fll)
746 reg |= WM9081_CLK_SRC_SEL; 690 reg |= WM9081_CLK_SRC_SEL;
747 else 691 else
748 reg &= ~WM9081_CLK_SRC_SEL; 692 reg &= ~WM9081_CLK_SRC_SEL;
749 wm9081_write(codec, WM9081_CLOCK_CONTROL_3, reg); 693 snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, reg);
750 694
751 dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm9081->sysclk_rate); 695 dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm9081->sysclk_rate);
752 696
@@ -846,76 +790,76 @@ static int wm9081_set_bias_level(struct snd_soc_codec *codec,
846 790
847 case SND_SOC_BIAS_PREPARE: 791 case SND_SOC_BIAS_PREPARE:
848 /* VMID=2*40k */ 792 /* VMID=2*40k */
849 reg = wm9081_read(codec, WM9081_VMID_CONTROL); 793 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
850 reg &= ~WM9081_VMID_SEL_MASK; 794 reg &= ~WM9081_VMID_SEL_MASK;
851 reg |= 0x2; 795 reg |= 0x2;
852 wm9081_write(codec, WM9081_VMID_CONTROL, reg); 796 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
853 797
854 /* Normal bias current */ 798 /* Normal bias current */
855 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 799 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
856 reg &= ~WM9081_STBY_BIAS_ENA; 800 reg &= ~WM9081_STBY_BIAS_ENA;
857 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg); 801 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
858 break; 802 break;
859 803
860 case SND_SOC_BIAS_STANDBY: 804 case SND_SOC_BIAS_STANDBY:
861 /* Initial cold start */ 805 /* Initial cold start */
862 if (codec->bias_level == SND_SOC_BIAS_OFF) { 806 if (codec->bias_level == SND_SOC_BIAS_OFF) {
863 /* Disable LINEOUT discharge */ 807 /* Disable LINEOUT discharge */
864 reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL); 808 reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
865 reg &= ~WM9081_LINEOUT_DISCH; 809 reg &= ~WM9081_LINEOUT_DISCH;
866 wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg); 810 snd_soc_write(codec, WM9081_ANTI_POP_CONTROL, reg);
867 811
868 /* Select startup bias source */ 812 /* Select startup bias source */
869 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 813 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
870 reg |= WM9081_BIAS_SRC | WM9081_BIAS_ENA; 814 reg |= WM9081_BIAS_SRC | WM9081_BIAS_ENA;
871 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg); 815 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
872 816
873 /* VMID 2*4k; Soft VMID ramp enable */ 817 /* VMID 2*4k; Soft VMID ramp enable */
874 reg = wm9081_read(codec, WM9081_VMID_CONTROL); 818 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
875 reg |= WM9081_VMID_RAMP | 0x6; 819 reg |= WM9081_VMID_RAMP | 0x6;
876 wm9081_write(codec, WM9081_VMID_CONTROL, reg); 820 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
877 821
878 mdelay(100); 822 mdelay(100);
879 823
880 /* Normal bias enable & soft start off */ 824 /* Normal bias enable & soft start off */
881 reg |= WM9081_BIAS_ENA; 825 reg |= WM9081_BIAS_ENA;
882 reg &= ~WM9081_VMID_RAMP; 826 reg &= ~WM9081_VMID_RAMP;
883 wm9081_write(codec, WM9081_VMID_CONTROL, reg); 827 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
884 828
885 /* Standard bias source */ 829 /* Standard bias source */
886 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 830 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
887 reg &= ~WM9081_BIAS_SRC; 831 reg &= ~WM9081_BIAS_SRC;
888 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg); 832 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
889 } 833 }
890 834
891 /* VMID 2*240k */ 835 /* VMID 2*240k */
892 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 836 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
893 reg &= ~WM9081_VMID_SEL_MASK; 837 reg &= ~WM9081_VMID_SEL_MASK;
894 reg |= 0x40; 838 reg |= 0x40;
895 wm9081_write(codec, WM9081_VMID_CONTROL, reg); 839 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
896 840
897 /* Standby bias current on */ 841 /* Standby bias current on */
898 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 842 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
899 reg |= WM9081_STBY_BIAS_ENA; 843 reg |= WM9081_STBY_BIAS_ENA;
900 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg); 844 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
901 break; 845 break;
902 846
903 case SND_SOC_BIAS_OFF: 847 case SND_SOC_BIAS_OFF:
904 /* Startup bias source */ 848 /* Startup bias source */
905 reg = wm9081_read(codec, WM9081_BIAS_CONTROL_1); 849 reg = snd_soc_read(codec, WM9081_BIAS_CONTROL_1);
906 reg |= WM9081_BIAS_SRC; 850 reg |= WM9081_BIAS_SRC;
907 wm9081_write(codec, WM9081_BIAS_CONTROL_1, reg); 851 snd_soc_write(codec, WM9081_BIAS_CONTROL_1, reg);
908 852
909 /* Disable VMID and biases with soft ramping */ 853 /* Disable VMID and biases with soft ramping */
910 reg = wm9081_read(codec, WM9081_VMID_CONTROL); 854 reg = snd_soc_read(codec, WM9081_VMID_CONTROL);
911 reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA); 855 reg &= ~(WM9081_VMID_SEL_MASK | WM9081_BIAS_ENA);
912 reg |= WM9081_VMID_RAMP; 856 reg |= WM9081_VMID_RAMP;
913 wm9081_write(codec, WM9081_VMID_CONTROL, reg); 857 snd_soc_write(codec, WM9081_VMID_CONTROL, reg);
914 858
915 /* Actively discharge LINEOUT */ 859 /* Actively discharge LINEOUT */
916 reg = wm9081_read(codec, WM9081_ANTI_POP_CONTROL); 860 reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
917 reg |= WM9081_LINEOUT_DISCH; 861 reg |= WM9081_LINEOUT_DISCH;
918 wm9081_write(codec, WM9081_ANTI_POP_CONTROL, reg); 862 snd_soc_write(codec, WM9081_ANTI_POP_CONTROL, reg);
919 break; 863 break;
920 } 864 }
921 865
@@ -929,7 +873,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
929{ 873{
930 struct snd_soc_codec *codec = dai->codec; 874 struct snd_soc_codec *codec = dai->codec;
931 struct wm9081_priv *wm9081 = codec->private_data; 875 struct wm9081_priv *wm9081 = codec->private_data;
932 unsigned int aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2); 876 unsigned int aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2);
933 877
934 aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV | 878 aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV |
935 WM9081_BCLK_DIR | WM9081_LRCLK_DIR | WM9081_AIF_FMT_MASK); 879 WM9081_BCLK_DIR | WM9081_LRCLK_DIR | WM9081_AIF_FMT_MASK);
@@ -1010,7 +954,7 @@ static int wm9081_set_dai_fmt(struct snd_soc_dai *dai,
1010 return -EINVAL; 954 return -EINVAL;
1011 } 955 }
1012 956
1013 wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2); 957 snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
1014 958
1015 return 0; 959 return 0;
1016} 960}
@@ -1024,47 +968,51 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
1024 int ret, i, best, best_val, cur_val; 968 int ret, i, best, best_val, cur_val;
1025 unsigned int clk_ctrl2, aif1, aif2, aif3, aif4; 969 unsigned int clk_ctrl2, aif1, aif2, aif3, aif4;
1026 970
1027 clk_ctrl2 = wm9081_read(codec, WM9081_CLOCK_CONTROL_2); 971 clk_ctrl2 = snd_soc_read(codec, WM9081_CLOCK_CONTROL_2);
1028 clk_ctrl2 &= ~(WM9081_CLK_SYS_RATE_MASK | WM9081_SAMPLE_RATE_MASK); 972 clk_ctrl2 &= ~(WM9081_CLK_SYS_RATE_MASK | WM9081_SAMPLE_RATE_MASK);
1029 973
1030 aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1); 974 aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1);
1031 975
1032 aif2 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_2); 976 aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2);
1033 aif2 &= ~WM9081_AIF_WL_MASK; 977 aif2 &= ~WM9081_AIF_WL_MASK;
1034 978
1035 aif3 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_3); 979 aif3 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_3);
1036 aif3 &= ~WM9081_BCLK_DIV_MASK; 980 aif3 &= ~WM9081_BCLK_DIV_MASK;
1037 981
1038 aif4 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_4); 982 aif4 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_4);
1039 aif4 &= ~WM9081_LRCLK_RATE_MASK; 983 aif4 &= ~WM9081_LRCLK_RATE_MASK;
1040 984
1041 /* What BCLK do we need? */
1042 wm9081->fs = params_rate(params); 985 wm9081->fs = params_rate(params);
1043 wm9081->bclk = 2 * wm9081->fs;
1044 switch (params_format(params)) {
1045 case SNDRV_PCM_FORMAT_S16_LE:
1046 wm9081->bclk *= 16;
1047 break;
1048 case SNDRV_PCM_FORMAT_S20_3LE:
1049 wm9081->bclk *= 20;
1050 aif2 |= 0x4;
1051 break;
1052 case SNDRV_PCM_FORMAT_S24_LE:
1053 wm9081->bclk *= 24;
1054 aif2 |= 0x8;
1055 break;
1056 case SNDRV_PCM_FORMAT_S32_LE:
1057 wm9081->bclk *= 32;
1058 aif2 |= 0xc;
1059 break;
1060 default:
1061 return -EINVAL;
1062 }
1063 986
1064 if (aif1 & WM9081_AIFDAC_TDM_MODE_MASK) { 987 if (wm9081->tdm_width) {
988 /* If TDM is set up then that fixes our BCLK. */
1065 int slots = ((aif1 & WM9081_AIFDAC_TDM_MODE_MASK) >> 989 int slots = ((aif1 & WM9081_AIFDAC_TDM_MODE_MASK) >>
1066 WM9081_AIFDAC_TDM_MODE_SHIFT) + 1; 990 WM9081_AIFDAC_TDM_MODE_SHIFT) + 1;
1067 wm9081->bclk *= slots; 991
992 wm9081->bclk = wm9081->fs * wm9081->tdm_width * slots;
993 } else {
994 /* Otherwise work out a BCLK from the sample size */
995 wm9081->bclk = 2 * wm9081->fs;
996
997 switch (params_format(params)) {
998 case SNDRV_PCM_FORMAT_S16_LE:
999 wm9081->bclk *= 16;
1000 break;
1001 case SNDRV_PCM_FORMAT_S20_3LE:
1002 wm9081->bclk *= 20;
1003 aif2 |= 0x4;
1004 break;
1005 case SNDRV_PCM_FORMAT_S24_LE:
1006 wm9081->bclk *= 24;
1007 aif2 |= 0x8;
1008 break;
1009 case SNDRV_PCM_FORMAT_S32_LE:
1010 wm9081->bclk *= 32;
1011 aif2 |= 0xc;
1012 break;
1013 default:
1014 return -EINVAL;
1015 }
1068 } 1016 }
1069 1017
1070 dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm9081->bclk); 1018 dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm9081->bclk);
@@ -1149,22 +1097,22 @@ static int wm9081_hw_params(struct snd_pcm_substream *substream,
1149 s->name, s->rate); 1097 s->name, s->rate);
1150 1098
1151 /* If the EQ is enabled then disable it while we write out */ 1099 /* If the EQ is enabled then disable it while we write out */
1152 eq1 = wm9081_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA; 1100 eq1 = snd_soc_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA;
1153 if (eq1 & WM9081_EQ_ENA) 1101 if (eq1 & WM9081_EQ_ENA)
1154 wm9081_write(codec, WM9081_EQ_1, 0); 1102 snd_soc_write(codec, WM9081_EQ_1, 0);
1155 1103
1156 /* Write out the other values */ 1104 /* Write out the other values */
1157 for (i = 1; i < ARRAY_SIZE(s->config); i++) 1105 for (i = 1; i < ARRAY_SIZE(s->config); i++)
1158 wm9081_write(codec, WM9081_EQ_1 + i, s->config[i]); 1106 snd_soc_write(codec, WM9081_EQ_1 + i, s->config[i]);
1159 1107
1160 eq1 |= (s->config[0] & ~WM9081_EQ_ENA); 1108 eq1 |= (s->config[0] & ~WM9081_EQ_ENA);
1161 wm9081_write(codec, WM9081_EQ_1, eq1); 1109 snd_soc_write(codec, WM9081_EQ_1, eq1);
1162 } 1110 }
1163 1111
1164 wm9081_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2); 1112 snd_soc_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2);
1165 wm9081_write(codec, WM9081_AUDIO_INTERFACE_2, aif2); 1113 snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2);
1166 wm9081_write(codec, WM9081_AUDIO_INTERFACE_3, aif3); 1114 snd_soc_write(codec, WM9081_AUDIO_INTERFACE_3, aif3);
1167 wm9081_write(codec, WM9081_AUDIO_INTERFACE_4, aif4); 1115 snd_soc_write(codec, WM9081_AUDIO_INTERFACE_4, aif4);
1168 1116
1169 return 0; 1117 return 0;
1170} 1118}
@@ -1174,14 +1122,14 @@ static int wm9081_digital_mute(struct snd_soc_dai *codec_dai, int mute)
1174 struct snd_soc_codec *codec = codec_dai->codec; 1122 struct snd_soc_codec *codec = codec_dai->codec;
1175 unsigned int reg; 1123 unsigned int reg;
1176 1124
1177 reg = wm9081_read(codec, WM9081_DAC_DIGITAL_2); 1125 reg = snd_soc_read(codec, WM9081_DAC_DIGITAL_2);
1178 1126
1179 if (mute) 1127 if (mute)
1180 reg |= WM9081_DAC_MUTE; 1128 reg |= WM9081_DAC_MUTE;
1181 else 1129 else
1182 reg &= ~WM9081_DAC_MUTE; 1130 reg &= ~WM9081_DAC_MUTE;
1183 1131
1184 wm9081_write(codec, WM9081_DAC_DIGITAL_2, reg); 1132 snd_soc_write(codec, WM9081_DAC_DIGITAL_2, reg);
1185 1133
1186 return 0; 1134 return 0;
1187} 1135}
@@ -1207,19 +1155,25 @@ static int wm9081_set_sysclk(struct snd_soc_dai *codec_dai,
1207} 1155}
1208 1156
1209static int wm9081_set_tdm_slot(struct snd_soc_dai *dai, 1157static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
1210 unsigned int mask, int slots) 1158 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
1211{ 1159{
1212 struct snd_soc_codec *codec = dai->codec; 1160 struct snd_soc_codec *codec = dai->codec;
1213 unsigned int aif1 = wm9081_read(codec, WM9081_AUDIO_INTERFACE_1); 1161 struct wm9081_priv *wm9081 = codec->private_data;
1162 unsigned int aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1);
1214 1163
1215 aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK); 1164 aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK);
1216 1165
1217 if (slots < 1 || slots > 4) 1166 if (slots < 0 || slots > 4)
1218 return -EINVAL; 1167 return -EINVAL;
1219 1168
1169 wm9081->tdm_width = slot_width;
1170
1171 if (slots == 0)
1172 slots = 1;
1173
1220 aif1 |= (slots - 1) << WM9081_AIFDAC_TDM_MODE_SHIFT; 1174 aif1 |= (slots - 1) << WM9081_AIFDAC_TDM_MODE_SHIFT;
1221 1175
1222 switch (mask) { 1176 switch (rx_mask) {
1223 case 1: 1177 case 1:
1224 break; 1178 break;
1225 case 2: 1179 case 2:
@@ -1235,7 +1189,7 @@ static int wm9081_set_tdm_slot(struct snd_soc_dai *dai,
1235 return -EINVAL; 1189 return -EINVAL;
1236 } 1190 }
1237 1191
1238 wm9081_write(codec, WM9081_AUDIO_INTERFACE_1, aif1); 1192 snd_soc_write(codec, WM9081_AUDIO_INTERFACE_1, aif1);
1239 1193
1240 return 0; 1194 return 0;
1241} 1195}
@@ -1357,7 +1311,7 @@ static int wm9081_resume(struct platform_device *pdev)
1357 if (i == WM9081_SOFTWARE_RESET) 1311 if (i == WM9081_SOFTWARE_RESET)
1358 continue; 1312 continue;
1359 1313
1360 wm9081_write(codec, i, reg_cache[i]); 1314 snd_soc_write(codec, i, reg_cache[i]);
1361 } 1315 }
1362 1316
1363 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1317 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1377,7 +1331,8 @@ struct snd_soc_codec_device soc_codec_dev_wm9081 = {
1377}; 1331};
1378EXPORT_SYMBOL_GPL(soc_codec_dev_wm9081); 1332EXPORT_SYMBOL_GPL(soc_codec_dev_wm9081);
1379 1333
1380static int wm9081_register(struct wm9081_priv *wm9081) 1334static int wm9081_register(struct wm9081_priv *wm9081,
1335 enum snd_soc_control_type control)
1381{ 1336{
1382 struct snd_soc_codec *codec = &wm9081->codec; 1337 struct snd_soc_codec *codec = &wm9081->codec;
1383 int ret; 1338 int ret;
@@ -1396,19 +1351,24 @@ static int wm9081_register(struct wm9081_priv *wm9081)
1396 codec->private_data = wm9081; 1351 codec->private_data = wm9081;
1397 codec->name = "WM9081"; 1352 codec->name = "WM9081";
1398 codec->owner = THIS_MODULE; 1353 codec->owner = THIS_MODULE;
1399 codec->read = wm9081_read;
1400 codec->write = wm9081_write;
1401 codec->dai = &wm9081_dai; 1354 codec->dai = &wm9081_dai;
1402 codec->num_dai = 1; 1355 codec->num_dai = 1;
1403 codec->reg_cache_size = ARRAY_SIZE(wm9081->reg_cache); 1356 codec->reg_cache_size = ARRAY_SIZE(wm9081->reg_cache);
1404 codec->reg_cache = &wm9081->reg_cache; 1357 codec->reg_cache = &wm9081->reg_cache;
1405 codec->bias_level = SND_SOC_BIAS_OFF; 1358 codec->bias_level = SND_SOC_BIAS_OFF;
1406 codec->set_bias_level = wm9081_set_bias_level; 1359 codec->set_bias_level = wm9081_set_bias_level;
1360 codec->volatile_register = wm9081_volatile_register;
1407 1361
1408 memcpy(codec->reg_cache, wm9081_reg_defaults, 1362 memcpy(codec->reg_cache, wm9081_reg_defaults,
1409 sizeof(wm9081_reg_defaults)); 1363 sizeof(wm9081_reg_defaults));
1410 1364
1411 reg = wm9081_read_hw(codec, WM9081_SOFTWARE_RESET); 1365 ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
1366 if (ret != 0) {
1367 dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
1368 return ret;
1369 }
1370
1371 reg = snd_soc_read(codec, WM9081_SOFTWARE_RESET);
1412 if (reg != 0x9081) { 1372 if (reg != 0x9081) {
1413 dev_err(codec->dev, "Device is not a WM9081: ID=0x%x\n", reg); 1373 dev_err(codec->dev, "Device is not a WM9081: ID=0x%x\n", reg);
1414 ret = -EINVAL; 1374 ret = -EINVAL;
@@ -1424,10 +1384,10 @@ static int wm9081_register(struct wm9081_priv *wm9081)
1424 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1384 wm9081_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1425 1385
1426 /* Enable zero cross by default */ 1386 /* Enable zero cross by default */
1427 reg = wm9081_read(codec, WM9081_ANALOGUE_LINEOUT); 1387 reg = snd_soc_read(codec, WM9081_ANALOGUE_LINEOUT);
1428 wm9081_write(codec, WM9081_ANALOGUE_LINEOUT, reg | WM9081_LINEOUTZC); 1388 snd_soc_write(codec, WM9081_ANALOGUE_LINEOUT, reg | WM9081_LINEOUTZC);
1429 reg = wm9081_read(codec, WM9081_ANALOGUE_SPEAKER_PGA); 1389 reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_PGA);
1430 wm9081_write(codec, WM9081_ANALOGUE_SPEAKER_PGA, 1390 snd_soc_write(codec, WM9081_ANALOGUE_SPEAKER_PGA,
1431 reg | WM9081_SPKPGAZC); 1391 reg | WM9081_SPKPGAZC);
1432 1392
1433 wm9081_dai.dev = codec->dev; 1393 wm9081_dai.dev = codec->dev;
@@ -1482,7 +1442,7 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
1482 1442
1483 codec->dev = &i2c->dev; 1443 codec->dev = &i2c->dev;
1484 1444
1485 return wm9081_register(wm9081); 1445 return wm9081_register(wm9081, SND_SOC_I2C);
1486} 1446}
1487 1447
1488static __devexit int wm9081_i2c_remove(struct i2c_client *client) 1448static __devexit int wm9081_i2c_remove(struct i2c_client *client)
@@ -1492,6 +1452,21 @@ static __devexit int wm9081_i2c_remove(struct i2c_client *client)
1492 return 0; 1452 return 0;
1493} 1453}
1494 1454
1455#ifdef CONFIG_PM
1456static int wm9081_i2c_suspend(struct i2c_client *client, pm_message_t msg)
1457{
1458 return snd_soc_suspend_device(&client->dev);
1459}
1460
1461static int wm9081_i2c_resume(struct i2c_client *client)
1462{
1463 return snd_soc_resume_device(&client->dev);
1464}
1465#else
1466#define wm9081_i2c_suspend NULL
1467#define wm9081_i2c_resume NULL
1468#endif
1469
1495static const struct i2c_device_id wm9081_i2c_id[] = { 1470static const struct i2c_device_id wm9081_i2c_id[] = {
1496 { "wm9081", 0 }, 1471 { "wm9081", 0 },
1497 { } 1472 { }
@@ -1505,6 +1480,8 @@ static struct i2c_driver wm9081_i2c_driver = {
1505 }, 1480 },
1506 .probe = wm9081_i2c_probe, 1481 .probe = wm9081_i2c_probe,
1507 .remove = __devexit_p(wm9081_i2c_remove), 1482 .remove = __devexit_p(wm9081_i2c_remove),
1483 .suspend = wm9081_i2c_suspend,
1484 .resume = wm9081_i2c_resume,
1508 .id_table = wm9081_i2c_id, 1485 .id_table = wm9081_i2c_id,
1509}; 1486};
1510 1487
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index fa88b463e71f..e7d2840d9e59 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -406,7 +406,7 @@ static int wm9705_soc_probe(struct platform_device *pdev)
406 ret = snd_soc_init_card(socdev); 406 ret = snd_soc_init_card(socdev);
407 if (ret < 0) { 407 if (ret < 0) {
408 printk(KERN_ERR "wm9705: failed to register card\n"); 408 printk(KERN_ERR "wm9705: failed to register card\n");
409 goto pcm_err; 409 goto reset_err;
410 } 410 }
411 411
412 return 0; 412 return 0;
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
new file mode 100644
index 000000000000..e542027eea89
--- /dev/null
+++ b/sound/soc/codecs/wm_hubs.c
@@ -0,0 +1,743 @@
1/*
2 * wm_hubs.c -- WM8993/4 common code
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/pm.h>
19#include <linux/i2c.h>
20#include <linux/platform_device.h>
21#include <sound/core.h>
22#include <sound/pcm.h>
23#include <sound/pcm_params.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26#include <sound/initval.h>
27#include <sound/tlv.h>
28
29#include "wm8993.h"
30#include "wm_hubs.h"
31
32const DECLARE_TLV_DB_SCALE(wm_hubs_spkmix_tlv, -300, 300, 0);
33EXPORT_SYMBOL_GPL(wm_hubs_spkmix_tlv);
34
35static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1650, 150, 0);
36static const DECLARE_TLV_DB_SCALE(inmix_sw_tlv, 0, 3000, 0);
37static const DECLARE_TLV_DB_SCALE(inmix_tlv, -1500, 300, 1);
38static const DECLARE_TLV_DB_SCALE(earpiece_tlv, -600, 600, 0);
39static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
40static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
41static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
42static const unsigned int spkboost_tlv[] = {
43 TLV_DB_RANGE_HEAD(7),
44 0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
45 7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
46};
47static const DECLARE_TLV_DB_SCALE(line_tlv, -600, 600, 0);
48
49static const char *speaker_ref_text[] = {
50 "SPKVDD/2",
51 "VMID",
52};
53
54static const struct soc_enum speaker_ref =
55 SOC_ENUM_SINGLE(WM8993_SPEAKER_MIXER, 8, 2, speaker_ref_text);
56
57static const char *speaker_mode_text[] = {
58 "Class D",
59 "Class AB",
60};
61
62static const struct soc_enum speaker_mode =
63 SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text);
64
65static void wait_for_dc_servo(struct snd_soc_codec *codec)
66{
67 unsigned int reg;
68 int count = 0;
69
70 dev_dbg(codec->dev, "Waiting for DC servo...\n");
71 do {
72 count++;
73 msleep(1);
74 reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0);
75 dev_dbg(codec->dev, "DC servo status: %x\n", reg);
76 } while ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
77 != WM8993_DCS_CAL_COMPLETE_MASK && count < 1000);
78
79 if ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
80 != WM8993_DCS_CAL_COMPLETE_MASK)
81 dev_err(codec->dev, "Timed out waiting for DC Servo\n");
82}
83
84/*
85 * Update the DC servo calibration on gain changes
86 */
87static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol,
88 struct snd_ctl_elem_value *ucontrol)
89{
90 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
91 int ret;
92
93 ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
94
95 /* Only need to do this if the outputs are active */
96 if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1)
97 & (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA))
98 snd_soc_update_bits(codec,
99 WM8993_DC_SERVO_0,
100 WM8993_DCS_TRIG_SINGLE_0 |
101 WM8993_DCS_TRIG_SINGLE_1,
102 WM8993_DCS_TRIG_SINGLE_0 |
103 WM8993_DCS_TRIG_SINGLE_1);
104
105 return ret;
106}
107
108static const struct snd_kcontrol_new analogue_snd_controls[] = {
109SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
110 inpga_tlv),
111SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
112SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
113
114SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
115 inpga_tlv),
116SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
117SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0),
118
119
120SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
121 inpga_tlv),
122SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
123SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
124
125SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
126 inpga_tlv),
127SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
128SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0),
129
130SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
131 inmix_sw_tlv),
132SOC_SINGLE_TLV("MIXINL IN1L Volume", WM8993_INPUT_MIXER3, 4, 1, 0,
133 inmix_sw_tlv),
134SOC_SINGLE_TLV("MIXINL Output Record Volume", WM8993_INPUT_MIXER3, 0, 7, 0,
135 inmix_tlv),
136SOC_SINGLE_TLV("MIXINL IN1LP Volume", WM8993_INPUT_MIXER5, 6, 7, 0, inmix_tlv),
137SOC_SINGLE_TLV("MIXINL Direct Voice Volume", WM8993_INPUT_MIXER5, 0, 6, 0,
138 inmix_tlv),
139
140SOC_SINGLE_TLV("MIXINR IN2R Volume", WM8993_INPUT_MIXER4, 7, 1, 0,
141 inmix_sw_tlv),
142SOC_SINGLE_TLV("MIXINR IN1R Volume", WM8993_INPUT_MIXER4, 4, 1, 0,
143 inmix_sw_tlv),
144SOC_SINGLE_TLV("MIXINR Output Record Volume", WM8993_INPUT_MIXER4, 0, 7, 0,
145 inmix_tlv),
146SOC_SINGLE_TLV("MIXINR IN1RP Volume", WM8993_INPUT_MIXER6, 6, 7, 0, inmix_tlv),
147SOC_SINGLE_TLV("MIXINR Direct Voice Volume", WM8993_INPUT_MIXER6, 0, 6, 0,
148 inmix_tlv),
149
150SOC_SINGLE_TLV("Left Output Mixer IN2RN Volume", WM8993_OUTPUT_MIXER5, 6, 7, 1,
151 outmix_tlv),
152SOC_SINGLE_TLV("Left Output Mixer IN2LN Volume", WM8993_OUTPUT_MIXER3, 6, 7, 1,
153 outmix_tlv),
154SOC_SINGLE_TLV("Left Output Mixer IN2LP Volume", WM8993_OUTPUT_MIXER3, 9, 7, 1,
155 outmix_tlv),
156SOC_SINGLE_TLV("Left Output Mixer IN1L Volume", WM8993_OUTPUT_MIXER3, 0, 7, 1,
157 outmix_tlv),
158SOC_SINGLE_TLV("Left Output Mixer IN1R Volume", WM8993_OUTPUT_MIXER3, 3, 7, 1,
159 outmix_tlv),
160SOC_SINGLE_TLV("Left Output Mixer Right Input Volume",
161 WM8993_OUTPUT_MIXER5, 3, 7, 1, outmix_tlv),
162SOC_SINGLE_TLV("Left Output Mixer Left Input Volume",
163 WM8993_OUTPUT_MIXER5, 0, 7, 1, outmix_tlv),
164SOC_SINGLE_TLV("Left Output Mixer DAC Volume", WM8993_OUTPUT_MIXER5, 9, 7, 1,
165 outmix_tlv),
166
167SOC_SINGLE_TLV("Right Output Mixer IN2LN Volume",
168 WM8993_OUTPUT_MIXER6, 6, 7, 1, outmix_tlv),
169SOC_SINGLE_TLV("Right Output Mixer IN2RN Volume",
170 WM8993_OUTPUT_MIXER4, 6, 7, 1, outmix_tlv),
171SOC_SINGLE_TLV("Right Output Mixer IN1L Volume",
172 WM8993_OUTPUT_MIXER4, 3, 7, 1, outmix_tlv),
173SOC_SINGLE_TLV("Right Output Mixer IN1R Volume",
174 WM8993_OUTPUT_MIXER4, 0, 7, 1, outmix_tlv),
175SOC_SINGLE_TLV("Right Output Mixer IN2RP Volume",
176 WM8993_OUTPUT_MIXER4, 9, 7, 1, outmix_tlv),
177SOC_SINGLE_TLV("Right Output Mixer Left Input Volume",
178 WM8993_OUTPUT_MIXER6, 3, 7, 1, outmix_tlv),
179SOC_SINGLE_TLV("Right Output Mixer Right Input Volume",
180 WM8993_OUTPUT_MIXER6, 6, 7, 1, outmix_tlv),
181SOC_SINGLE_TLV("Right Output Mixer DAC Volume",
182 WM8993_OUTPUT_MIXER6, 9, 7, 1, outmix_tlv),
183
184SOC_DOUBLE_R_TLV("Output Volume", WM8993_LEFT_OPGA_VOLUME,
185 WM8993_RIGHT_OPGA_VOLUME, 0, 63, 0, outpga_tlv),
186SOC_DOUBLE_R("Output Switch", WM8993_LEFT_OPGA_VOLUME,
187 WM8993_RIGHT_OPGA_VOLUME, 6, 1, 0),
188SOC_DOUBLE_R("Output ZC Switch", WM8993_LEFT_OPGA_VOLUME,
189 WM8993_RIGHT_OPGA_VOLUME, 7, 1, 0),
190
191SOC_SINGLE("Earpiece Switch", WM8993_HPOUT2_VOLUME, 5, 1, 1),
192SOC_SINGLE_TLV("Earpiece Volume", WM8993_HPOUT2_VOLUME, 4, 1, 1, earpiece_tlv),
193
194SOC_SINGLE_TLV("SPKL Input Volume", WM8993_SPKMIXL_ATTENUATION,
195 5, 1, 1, wm_hubs_spkmix_tlv),
196SOC_SINGLE_TLV("SPKL IN1LP Volume", WM8993_SPKMIXL_ATTENUATION,
197 4, 1, 1, wm_hubs_spkmix_tlv),
198SOC_SINGLE_TLV("SPKL Output Volume", WM8993_SPKMIXL_ATTENUATION,
199 3, 1, 1, wm_hubs_spkmix_tlv),
200
201SOC_SINGLE_TLV("SPKR Input Volume", WM8993_SPKMIXR_ATTENUATION,
202 5, 1, 1, wm_hubs_spkmix_tlv),
203SOC_SINGLE_TLV("SPKR IN1RP Volume", WM8993_SPKMIXR_ATTENUATION,
204 4, 1, 1, wm_hubs_spkmix_tlv),
205SOC_SINGLE_TLV("SPKR Output Volume", WM8993_SPKMIXR_ATTENUATION,
206 3, 1, 1, wm_hubs_spkmix_tlv),
207
208SOC_DOUBLE_R_TLV("Speaker Mixer Volume",
209 WM8993_SPKMIXL_ATTENUATION, WM8993_SPKMIXR_ATTENUATION,
210 0, 3, 1, spkmixout_tlv),
211SOC_DOUBLE_R_TLV("Speaker Volume",
212 WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
213 0, 63, 0, outpga_tlv),
214SOC_DOUBLE_R("Speaker Switch",
215 WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
216 6, 1, 0),
217SOC_DOUBLE_R("Speaker ZC Switch",
218 WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT,
219 7, 1, 0),
220SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0,
221 spkboost_tlv),
222SOC_ENUM("Speaker Reference", speaker_ref),
223SOC_ENUM("Speaker Mode", speaker_mode),
224
225{
226 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Headphone Volume",
227 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |
228 SNDRV_CTL_ELEM_ACCESS_READWRITE,
229 .tlv.p = outpga_tlv,
230 .info = snd_soc_info_volsw_2r,
231 .get = snd_soc_get_volsw_2r, .put = wm8993_put_dc_servo,
232 .private_value = (unsigned long)&(struct soc_mixer_control) {
233 .reg = WM8993_LEFT_OUTPUT_VOLUME,
234 .rreg = WM8993_RIGHT_OUTPUT_VOLUME,
235 .shift = 0, .max = 63
236 },
237},
238SOC_DOUBLE_R("Headphone Switch", WM8993_LEFT_OUTPUT_VOLUME,
239 WM8993_RIGHT_OUTPUT_VOLUME, 6, 1, 0),
240SOC_DOUBLE_R("Headphone ZC Switch", WM8993_LEFT_OUTPUT_VOLUME,
241 WM8993_RIGHT_OUTPUT_VOLUME, 7, 1, 0),
242
243SOC_SINGLE("LINEOUT1N Switch", WM8993_LINE_OUTPUTS_VOLUME, 6, 1, 1),
244SOC_SINGLE("LINEOUT1P Switch", WM8993_LINE_OUTPUTS_VOLUME, 5, 1, 1),
245SOC_SINGLE_TLV("LINEOUT1 Volume", WM8993_LINE_OUTPUTS_VOLUME, 4, 1, 1,
246 line_tlv),
247
248SOC_SINGLE("LINEOUT2N Switch", WM8993_LINE_OUTPUTS_VOLUME, 2, 1, 1),
249SOC_SINGLE("LINEOUT2P Switch", WM8993_LINE_OUTPUTS_VOLUME, 1, 1, 1),
250SOC_SINGLE_TLV("LINEOUT2 Volume", WM8993_LINE_OUTPUTS_VOLUME, 0, 1, 1,
251 line_tlv),
252};
253
254static int hp_event(struct snd_soc_dapm_widget *w,
255 struct snd_kcontrol *kcontrol, int event)
256{
257 struct snd_soc_codec *codec = w->codec;
258 unsigned int reg = snd_soc_read(codec, WM8993_ANALOGUE_HP_0);
259
260 switch (event) {
261 case SND_SOC_DAPM_POST_PMU:
262 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
263 WM8993_CP_ENA, WM8993_CP_ENA);
264
265 msleep(5);
266
267 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
268 WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
269 WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA);
270
271 reg |= WM8993_HPOUT1L_DLY | WM8993_HPOUT1R_DLY;
272 snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
273
274 /* Start the DC servo */
275 snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
276 0xFFFF,
277 WM8993_DCS_ENA_CHAN_0 |
278 WM8993_DCS_ENA_CHAN_1 |
279 WM8993_DCS_TRIG_STARTUP_1 |
280 WM8993_DCS_TRIG_STARTUP_0);
281 wait_for_dc_servo(codec);
282
283 reg |= WM8993_HPOUT1R_OUTP | WM8993_HPOUT1R_RMV_SHORT |
284 WM8993_HPOUT1L_OUTP | WM8993_HPOUT1L_RMV_SHORT;
285 snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
286 break;
287
288 case SND_SOC_DAPM_PRE_PMD:
289 reg &= ~(WM8993_HPOUT1L_RMV_SHORT |
290 WM8993_HPOUT1L_DLY |
291 WM8993_HPOUT1L_OUTP |
292 WM8993_HPOUT1R_RMV_SHORT |
293 WM8993_HPOUT1R_DLY |
294 WM8993_HPOUT1R_OUTP);
295
296 snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
297 0xffff, 0);
298
299 snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
300 snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
301 WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
302 0);
303
304 snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
305 WM8993_CP_ENA, 0);
306 break;
307 }
308
309 return 0;
310}
311
312static int earpiece_event(struct snd_soc_dapm_widget *w,
313 struct snd_kcontrol *control, int event)
314{
315 struct snd_soc_codec *codec = w->codec;
316 u16 reg = snd_soc_read(codec, WM8993_ANTIPOP1) & ~WM8993_HPOUT2_IN_ENA;
317
318 switch (event) {
319 case SND_SOC_DAPM_PRE_PMU:
320 reg |= WM8993_HPOUT2_IN_ENA;
321 snd_soc_write(codec, WM8993_ANTIPOP1, reg);
322 udelay(50);
323 break;
324
325 case SND_SOC_DAPM_POST_PMD:
326 snd_soc_write(codec, WM8993_ANTIPOP1, reg);
327 break;
328
329 default:
330 BUG();
331 break;
332 }
333
334 return 0;
335}
336
337static const struct snd_kcontrol_new in1l_pga[] = {
338SOC_DAPM_SINGLE("IN1LP Switch", WM8993_INPUT_MIXER2, 5, 1, 0),
339SOC_DAPM_SINGLE("IN1LN Switch", WM8993_INPUT_MIXER2, 4, 1, 0),
340};
341
342static const struct snd_kcontrol_new in1r_pga[] = {
343SOC_DAPM_SINGLE("IN1RP Switch", WM8993_INPUT_MIXER2, 1, 1, 0),
344SOC_DAPM_SINGLE("IN1RN Switch", WM8993_INPUT_MIXER2, 0, 1, 0),
345};
346
347static const struct snd_kcontrol_new in2l_pga[] = {
348SOC_DAPM_SINGLE("IN2LP Switch", WM8993_INPUT_MIXER2, 7, 1, 0),
349SOC_DAPM_SINGLE("IN2LN Switch", WM8993_INPUT_MIXER2, 6, 1, 0),
350};
351
352static const struct snd_kcontrol_new in2r_pga[] = {
353SOC_DAPM_SINGLE("IN2RP Switch", WM8993_INPUT_MIXER2, 3, 1, 0),
354SOC_DAPM_SINGLE("IN2RN Switch", WM8993_INPUT_MIXER2, 2, 1, 0),
355};
356
357static const struct snd_kcontrol_new mixinl[] = {
358SOC_DAPM_SINGLE("IN2L Switch", WM8993_INPUT_MIXER3, 8, 1, 0),
359SOC_DAPM_SINGLE("IN1L Switch", WM8993_INPUT_MIXER3, 5, 1, 0),
360};
361
362static const struct snd_kcontrol_new mixinr[] = {
363SOC_DAPM_SINGLE("IN2R Switch", WM8993_INPUT_MIXER4, 8, 1, 0),
364SOC_DAPM_SINGLE("IN1R Switch", WM8993_INPUT_MIXER4, 5, 1, 0),
365};
366
367static const struct snd_kcontrol_new left_output_mixer[] = {
368SOC_DAPM_SINGLE("Right Input Switch", WM8993_OUTPUT_MIXER1, 7, 1, 0),
369SOC_DAPM_SINGLE("Left Input Switch", WM8993_OUTPUT_MIXER1, 6, 1, 0),
370SOC_DAPM_SINGLE("IN2RN Switch", WM8993_OUTPUT_MIXER1, 5, 1, 0),
371SOC_DAPM_SINGLE("IN2LN Switch", WM8993_OUTPUT_MIXER1, 4, 1, 0),
372SOC_DAPM_SINGLE("IN2LP Switch", WM8993_OUTPUT_MIXER1, 1, 1, 0),
373SOC_DAPM_SINGLE("IN1R Switch", WM8993_OUTPUT_MIXER1, 3, 1, 0),
374SOC_DAPM_SINGLE("IN1L Switch", WM8993_OUTPUT_MIXER1, 2, 1, 0),
375SOC_DAPM_SINGLE("DAC Switch", WM8993_OUTPUT_MIXER1, 0, 1, 0),
376};
377
378static const struct snd_kcontrol_new right_output_mixer[] = {
379SOC_DAPM_SINGLE("Left Input Switch", WM8993_OUTPUT_MIXER2, 7, 1, 0),
380SOC_DAPM_SINGLE("Right Input Switch", WM8993_OUTPUT_MIXER2, 6, 1, 0),
381SOC_DAPM_SINGLE("IN2LN Switch", WM8993_OUTPUT_MIXER2, 5, 1, 0),
382SOC_DAPM_SINGLE("IN2RN Switch", WM8993_OUTPUT_MIXER2, 4, 1, 0),
383SOC_DAPM_SINGLE("IN1L Switch", WM8993_OUTPUT_MIXER2, 3, 1, 0),
384SOC_DAPM_SINGLE("IN1R Switch", WM8993_OUTPUT_MIXER2, 2, 1, 0),
385SOC_DAPM_SINGLE("IN2RP Switch", WM8993_OUTPUT_MIXER2, 1, 1, 0),
386SOC_DAPM_SINGLE("DAC Switch", WM8993_OUTPUT_MIXER2, 0, 1, 0),
387};
388
389static const struct snd_kcontrol_new earpiece_mixer[] = {
390SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_HPOUT2_MIXER, 5, 1, 0),
391SOC_DAPM_SINGLE("Left Output Switch", WM8993_HPOUT2_MIXER, 4, 1, 0),
392SOC_DAPM_SINGLE("Right Output Switch", WM8993_HPOUT2_MIXER, 3, 1, 0),
393};
394
395static const struct snd_kcontrol_new left_speaker_boost[] = {
396SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_SPKOUT_MIXERS, 5, 1, 0),
397SOC_DAPM_SINGLE("SPKL Switch", WM8993_SPKOUT_MIXERS, 4, 1, 0),
398SOC_DAPM_SINGLE("SPKR Switch", WM8993_SPKOUT_MIXERS, 3, 1, 0),
399};
400
401static const struct snd_kcontrol_new right_speaker_boost[] = {
402SOC_DAPM_SINGLE("Direct Voice Switch", WM8993_SPKOUT_MIXERS, 2, 1, 0),
403SOC_DAPM_SINGLE("SPKL Switch", WM8993_SPKOUT_MIXERS, 1, 1, 0),
404SOC_DAPM_SINGLE("SPKR Switch", WM8993_SPKOUT_MIXERS, 0, 1, 0),
405};
406
407static const struct snd_kcontrol_new line1_mix[] = {
408SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER1, 2, 1, 0),
409SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER1, 1, 1, 0),
410SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
411};
412
413static const struct snd_kcontrol_new line1n_mix[] = {
414SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 6, 1, 0),
415SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER1, 5, 1, 0),
416};
417
418static const struct snd_kcontrol_new line1p_mix[] = {
419SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
420};
421
422static const struct snd_kcontrol_new line2_mix[] = {
423SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0),
424SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0),
425SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
426};
427
428static const struct snd_kcontrol_new line2n_mix[] = {
429SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
430SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
431};
432
433static const struct snd_kcontrol_new line2p_mix[] = {
434SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
435};
436
437static const struct snd_soc_dapm_widget analogue_dapm_widgets[] = {
438SND_SOC_DAPM_INPUT("IN1LN"),
439SND_SOC_DAPM_INPUT("IN1LP"),
440SND_SOC_DAPM_INPUT("IN2LN"),
441SND_SOC_DAPM_INPUT("IN2LP/VXRN"),
442SND_SOC_DAPM_INPUT("IN1RN"),
443SND_SOC_DAPM_INPUT("IN1RP"),
444SND_SOC_DAPM_INPUT("IN2RN"),
445SND_SOC_DAPM_INPUT("IN2RP/VXRP"),
446
447SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0),
448SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0),
449
450SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
451 in1l_pga, ARRAY_SIZE(in1l_pga)),
452SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
453 in1r_pga, ARRAY_SIZE(in1r_pga)),
454
455SND_SOC_DAPM_MIXER("IN2L PGA", WM8993_POWER_MANAGEMENT_2, 7, 0,
456 in2l_pga, ARRAY_SIZE(in2l_pga)),
457SND_SOC_DAPM_MIXER("IN2R PGA", WM8993_POWER_MANAGEMENT_2, 5, 0,
458 in2r_pga, ARRAY_SIZE(in2r_pga)),
459
460/* Dummy widgets to represent differential paths */
461SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
462
463SND_SOC_DAPM_MIXER("MIXINL", WM8993_POWER_MANAGEMENT_2, 9, 0,
464 mixinl, ARRAY_SIZE(mixinl)),
465SND_SOC_DAPM_MIXER("MIXINR", WM8993_POWER_MANAGEMENT_2, 8, 0,
466 mixinr, ARRAY_SIZE(mixinr)),
467
468SND_SOC_DAPM_MIXER("Left Output Mixer", WM8993_POWER_MANAGEMENT_3, 5, 0,
469 left_output_mixer, ARRAY_SIZE(left_output_mixer)),
470SND_SOC_DAPM_MIXER("Right Output Mixer", WM8993_POWER_MANAGEMENT_3, 4, 0,
471 right_output_mixer, ARRAY_SIZE(right_output_mixer)),
472
473SND_SOC_DAPM_PGA("Left Output PGA", WM8993_POWER_MANAGEMENT_3, 7, 0, NULL, 0),
474SND_SOC_DAPM_PGA("Right Output PGA", WM8993_POWER_MANAGEMENT_3, 6, 0, NULL, 0),
475
476SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0,
477 NULL, 0,
478 hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
479
480SND_SOC_DAPM_MIXER("Earpiece Mixer", SND_SOC_NOPM, 0, 0,
481 earpiece_mixer, ARRAY_SIZE(earpiece_mixer)),
482SND_SOC_DAPM_PGA_E("Earpiece Driver", WM8993_POWER_MANAGEMENT_1, 11, 0,
483 NULL, 0, earpiece_event,
484 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
485
486SND_SOC_DAPM_MIXER("SPKL Boost", SND_SOC_NOPM, 0, 0,
487 left_speaker_boost, ARRAY_SIZE(left_speaker_boost)),
488SND_SOC_DAPM_MIXER("SPKR Boost", SND_SOC_NOPM, 0, 0,
489 right_speaker_boost, ARRAY_SIZE(right_speaker_boost)),
490
491SND_SOC_DAPM_PGA("SPKL Driver", WM8993_POWER_MANAGEMENT_1, 12, 0,
492 NULL, 0),
493SND_SOC_DAPM_PGA("SPKR Driver", WM8993_POWER_MANAGEMENT_1, 13, 0,
494 NULL, 0),
495
496SND_SOC_DAPM_MIXER("LINEOUT1 Mixer", SND_SOC_NOPM, 0, 0,
497 line1_mix, ARRAY_SIZE(line1_mix)),
498SND_SOC_DAPM_MIXER("LINEOUT2 Mixer", SND_SOC_NOPM, 0, 0,
499 line2_mix, ARRAY_SIZE(line2_mix)),
500
501SND_SOC_DAPM_MIXER("LINEOUT1N Mixer", SND_SOC_NOPM, 0, 0,
502 line1n_mix, ARRAY_SIZE(line1n_mix)),
503SND_SOC_DAPM_MIXER("LINEOUT1P Mixer", SND_SOC_NOPM, 0, 0,
504 line1p_mix, ARRAY_SIZE(line1p_mix)),
505SND_SOC_DAPM_MIXER("LINEOUT2N Mixer", SND_SOC_NOPM, 0, 0,
506 line2n_mix, ARRAY_SIZE(line2n_mix)),
507SND_SOC_DAPM_MIXER("LINEOUT2P Mixer", SND_SOC_NOPM, 0, 0,
508 line2p_mix, ARRAY_SIZE(line2p_mix)),
509
510SND_SOC_DAPM_PGA("LINEOUT1N Driver", WM8993_POWER_MANAGEMENT_3, 13, 0,
511 NULL, 0),
512SND_SOC_DAPM_PGA("LINEOUT1P Driver", WM8993_POWER_MANAGEMENT_3, 12, 0,
513 NULL, 0),
514SND_SOC_DAPM_PGA("LINEOUT2N Driver", WM8993_POWER_MANAGEMENT_3, 11, 0,
515 NULL, 0),
516SND_SOC_DAPM_PGA("LINEOUT2P Driver", WM8993_POWER_MANAGEMENT_3, 10, 0,
517 NULL, 0),
518
519SND_SOC_DAPM_OUTPUT("SPKOUTLP"),
520SND_SOC_DAPM_OUTPUT("SPKOUTLN"),
521SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
522SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
523SND_SOC_DAPM_OUTPUT("HPOUT1L"),
524SND_SOC_DAPM_OUTPUT("HPOUT1R"),
525SND_SOC_DAPM_OUTPUT("HPOUT2P"),
526SND_SOC_DAPM_OUTPUT("HPOUT2N"),
527SND_SOC_DAPM_OUTPUT("LINEOUT1P"),
528SND_SOC_DAPM_OUTPUT("LINEOUT1N"),
529SND_SOC_DAPM_OUTPUT("LINEOUT2P"),
530SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
531};
532
533static const struct snd_soc_dapm_route analogue_routes[] = {
534 { "IN1L PGA", "IN1LP Switch", "IN1LP" },
535 { "IN1L PGA", "IN1LN Switch", "IN1LN" },
536
537 { "IN1R PGA", "IN1RP Switch", "IN1RP" },
538 { "IN1R PGA", "IN1RN Switch", "IN1RN" },
539
540 { "IN2L PGA", "IN2LP Switch", "IN2LP/VXRN" },
541 { "IN2L PGA", "IN2LN Switch", "IN2LN" },
542
543 { "IN2R PGA", "IN2RP Switch", "IN2RP/VXRP" },
544 { "IN2R PGA", "IN2RN Switch", "IN2RN" },
545
546 { "Direct Voice", NULL, "IN2LP/VXRN" },
547 { "Direct Voice", NULL, "IN2RP/VXRP" },
548
549 { "MIXINL", "IN1L Switch", "IN1L PGA" },
550 { "MIXINL", "IN2L Switch", "IN2L PGA" },
551 { "MIXINL", NULL, "Direct Voice" },
552 { "MIXINL", NULL, "IN1LP" },
553 { "MIXINL", NULL, "Left Output Mixer" },
554
555 { "MIXINR", "IN1R Switch", "IN1R PGA" },
556 { "MIXINR", "IN2R Switch", "IN2R PGA" },
557 { "MIXINR", NULL, "Direct Voice" },
558 { "MIXINR", NULL, "IN1RP" },
559 { "MIXINR", NULL, "Right Output Mixer" },
560
561 { "ADCL", NULL, "MIXINL" },
562 { "ADCR", NULL, "MIXINR" },
563
564 { "Left Output Mixer", "Left Input Switch", "MIXINL" },
565 { "Left Output Mixer", "Right Input Switch", "MIXINR" },
566 { "Left Output Mixer", "IN2RN Switch", "IN2RN" },
567 { "Left Output Mixer", "IN2LN Switch", "IN2LN" },
568 { "Left Output Mixer", "IN2LP Switch", "IN2LP/VXRN" },
569 { "Left Output Mixer", "IN1L Switch", "IN1L PGA" },
570 { "Left Output Mixer", "IN1R Switch", "IN1R PGA" },
571
572 { "Right Output Mixer", "Left Input Switch", "MIXINL" },
573 { "Right Output Mixer", "Right Input Switch", "MIXINR" },
574 { "Right Output Mixer", "IN2LN Switch", "IN2LN" },
575 { "Right Output Mixer", "IN2RN Switch", "IN2RN" },
576 { "Right Output Mixer", "IN2RP Switch", "IN2RP/VXRP" },
577 { "Right Output Mixer", "IN1L Switch", "IN1L PGA" },
578 { "Right Output Mixer", "IN1R Switch", "IN1R PGA" },
579
580 { "Left Output PGA", NULL, "Left Output Mixer" },
581 { "Left Output PGA", NULL, "TOCLK" },
582
583 { "Right Output PGA", NULL, "Right Output Mixer" },
584 { "Right Output PGA", NULL, "TOCLK" },
585
586 { "Earpiece Mixer", "Direct Voice Switch", "Direct Voice" },
587 { "Earpiece Mixer", "Left Output Switch", "Left Output PGA" },
588 { "Earpiece Mixer", "Right Output Switch", "Right Output PGA" },
589
590 { "Earpiece Driver", NULL, "Earpiece Mixer" },
591 { "HPOUT2N", NULL, "Earpiece Driver" },
592 { "HPOUT2P", NULL, "Earpiece Driver" },
593
594 { "SPKL", "Input Switch", "MIXINL" },
595 { "SPKL", "IN1LP Switch", "IN1LP" },
596 { "SPKL", "Output Switch", "Left Output Mixer" },
597 { "SPKL", NULL, "TOCLK" },
598
599 { "SPKR", "Input Switch", "MIXINR" },
600 { "SPKR", "IN1RP Switch", "IN1RP" },
601 { "SPKR", "Output Switch", "Right Output Mixer" },
602 { "SPKR", NULL, "TOCLK" },
603
604 { "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
605 { "SPKL Boost", "SPKL Switch", "SPKL" },
606 { "SPKL Boost", "SPKR Switch", "SPKR" },
607
608 { "SPKR Boost", "Direct Voice Switch", "Direct Voice" },
609 { "SPKR Boost", "SPKR Switch", "SPKR" },
610 { "SPKR Boost", "SPKL Switch", "SPKL" },
611
612 { "SPKL Driver", NULL, "SPKL Boost" },
613 { "SPKL Driver", NULL, "CLK_SYS" },
614
615 { "SPKR Driver", NULL, "SPKR Boost" },
616 { "SPKR Driver", NULL, "CLK_SYS" },
617
618 { "SPKOUTLP", NULL, "SPKL Driver" },
619 { "SPKOUTLN", NULL, "SPKL Driver" },
620 { "SPKOUTRP", NULL, "SPKR Driver" },
621 { "SPKOUTRN", NULL, "SPKR Driver" },
622
623 { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
624 { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
625
626 { "Headphone PGA", NULL, "Left Headphone Mux" },
627 { "Headphone PGA", NULL, "Right Headphone Mux" },
628 { "Headphone PGA", NULL, "CLK_SYS" },
629
630 { "HPOUT1L", NULL, "Headphone PGA" },
631 { "HPOUT1R", NULL, "Headphone PGA" },
632
633 { "LINEOUT1N", NULL, "LINEOUT1N Driver" },
634 { "LINEOUT1P", NULL, "LINEOUT1P Driver" },
635 { "LINEOUT2N", NULL, "LINEOUT2N Driver" },
636 { "LINEOUT2P", NULL, "LINEOUT2P Driver" },
637};
638
639static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
640 { "LINEOUT1 Mixer", "IN1L Switch", "IN1L PGA" },
641 { "LINEOUT1 Mixer", "IN1R Switch", "IN1R PGA" },
642 { "LINEOUT1 Mixer", "Output Switch", "Left Output Mixer" },
643
644 { "LINEOUT1N Driver", NULL, "LINEOUT1 Mixer" },
645 { "LINEOUT1P Driver", NULL, "LINEOUT1 Mixer" },
646};
647
648static const struct snd_soc_dapm_route lineout1_se_routes[] = {
649 { "LINEOUT1N Mixer", "Left Output Switch", "Left Output Mixer" },
650 { "LINEOUT1N Mixer", "Right Output Switch", "Left Output Mixer" },
651
652 { "LINEOUT1P Mixer", "Left Output Switch", "Left Output Mixer" },
653
654 { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
655 { "LINEOUT1P Driver", NULL, "LINEOUT1P Mixer" },
656};
657
658static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
659 { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
660 { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
661 { "LINEOUT2 Mixer", "Output Switch", "Right Output Mixer" },
662
663 { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
664 { "LINEOUT2P Driver", NULL, "LINEOUT2 Mixer" },
665};
666
667static const struct snd_soc_dapm_route lineout2_se_routes[] = {
668 { "LINEOUT2N Mixer", "Left Output Switch", "Left Output Mixer" },
669 { "LINEOUT2N Mixer", "Right Output Switch", "Left Output Mixer" },
670
671 { "LINEOUT2P Mixer", "Right Output Switch", "Right Output Mixer" },
672
673 { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
674 { "LINEOUT2P Driver", NULL, "LINEOUT2P Mixer" },
675};
676
677int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
678{
679 /* Latch volume update bits & default ZC on */
680 snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME,
681 WM8993_IN1_VU, WM8993_IN1_VU);
682 snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_1_2_VOLUME,
683 WM8993_IN1_VU, WM8993_IN1_VU);
684 snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_3_4_VOLUME,
685 WM8993_IN2_VU, WM8993_IN2_VU);
686 snd_soc_update_bits(codec, WM8993_RIGHT_LINE_INPUT_3_4_VOLUME,
687 WM8993_IN2_VU, WM8993_IN2_VU);
688
689 snd_soc_update_bits(codec, WM8993_SPEAKER_VOLUME_RIGHT,
690 WM8993_SPKOUT_VU, WM8993_SPKOUT_VU);
691
692 snd_soc_update_bits(codec, WM8993_LEFT_OUTPUT_VOLUME,
693 WM8993_HPOUT1L_ZC, WM8993_HPOUT1L_ZC);
694 snd_soc_update_bits(codec, WM8993_RIGHT_OUTPUT_VOLUME,
695 WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC,
696 WM8993_HPOUT1_VU | WM8993_HPOUT1R_ZC);
697
698 snd_soc_update_bits(codec, WM8993_LEFT_OPGA_VOLUME,
699 WM8993_MIXOUTL_ZC, WM8993_MIXOUTL_ZC);
700 snd_soc_update_bits(codec, WM8993_RIGHT_OPGA_VOLUME,
701 WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU,
702 WM8993_MIXOUTR_ZC | WM8993_MIXOUT_VU);
703
704 snd_soc_add_controls(codec, analogue_snd_controls,
705 ARRAY_SIZE(analogue_snd_controls));
706
707 snd_soc_dapm_new_controls(codec, analogue_dapm_widgets,
708 ARRAY_SIZE(analogue_dapm_widgets));
709 return 0;
710}
711EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_controls);
712
713int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec,
714 int lineout1_diff, int lineout2_diff)
715{
716 snd_soc_dapm_add_routes(codec, analogue_routes,
717 ARRAY_SIZE(analogue_routes));
718
719 if (lineout1_diff)
720 snd_soc_dapm_add_routes(codec,
721 lineout1_diff_routes,
722 ARRAY_SIZE(lineout1_diff_routes));
723 else
724 snd_soc_dapm_add_routes(codec,
725 lineout1_se_routes,
726 ARRAY_SIZE(lineout1_se_routes));
727
728 if (lineout2_diff)
729 snd_soc_dapm_add_routes(codec,
730 lineout2_diff_routes,
731 ARRAY_SIZE(lineout2_diff_routes));
732 else
733 snd_soc_dapm_add_routes(codec,
734 lineout2_se_routes,
735 ARRAY_SIZE(lineout2_se_routes));
736
737 return 0;
738}
739EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_routes);
740
741MODULE_DESCRIPTION("Shared support for Wolfson hubs products");
742MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
743MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
new file mode 100644
index 000000000000..ec09cb6a2939
--- /dev/null
+++ b/sound/soc/codecs/wm_hubs.h
@@ -0,0 +1,24 @@
1/*
2 * wm_hubs.h -- WM899x common code
3 *
4 * Copyright 2009 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef _WM_HUBS_H
15#define _WM_HUBS_H
16
17struct snd_soc_codec;
18
19extern const unsigned int wm_hubs_spkmix_tlv[];
20
21extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *);
22extern int wm_hubs_add_analogue_routes(struct snd_soc_codec *, int, int);
23
24#endif
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
index 411a710be660..4dfd4ad9d90e 100644
--- a/sound/soc/davinci/Kconfig
+++ b/sound/soc/davinci/Kconfig
@@ -9,6 +9,9 @@ config SND_DAVINCI_SOC
9config SND_DAVINCI_SOC_I2S 9config SND_DAVINCI_SOC_I2S
10 tristate 10 tristate
11 11
12config SND_DAVINCI_SOC_MCASP
13 tristate
14
12config SND_DAVINCI_SOC_EVM 15config SND_DAVINCI_SOC_EVM
13 tristate "SoC Audio support for DaVinci DM6446 or DM355 EVM" 16 tristate "SoC Audio support for DaVinci DM6446 or DM355 EVM"
14 depends on SND_DAVINCI_SOC 17 depends on SND_DAVINCI_SOC
@@ -19,6 +22,16 @@ config SND_DAVINCI_SOC_EVM
19 Say Y if you want to add support for SoC audio on TI 22 Say Y if you want to add support for SoC audio on TI
20 DaVinci DM6446 or DM355 EVM platforms. 23 DaVinci DM6446 or DM355 EVM platforms.
21 24
25config SND_DM6467_SOC_EVM
26 tristate "SoC Audio support for DaVinci DM6467 EVM"
27 depends on SND_DAVINCI_SOC && MACH_DAVINCI_DM6467_EVM
28 select SND_DAVINCI_SOC_MCASP
29 select SND_SOC_TLV320AIC3X
30 select SND_SOC_SPDIF
31
32 help
33 Say Y if you want to add support for SoC audio on TI
34
22config SND_DAVINCI_SOC_SFFSDR 35config SND_DAVINCI_SOC_SFFSDR
23 tristate "SoC Audio support for SFFSDR" 36 tristate "SoC Audio support for SFFSDR"
24 depends on SND_DAVINCI_SOC && MACH_SFFSDR 37 depends on SND_DAVINCI_SOC && MACH_SFFSDR
@@ -28,3 +41,23 @@ config SND_DAVINCI_SOC_SFFSDR
28 help 41 help
29 Say Y if you want to add support for SoC audio on 42 Say Y if you want to add support for SoC audio on
30 Lyrtech SFFSDR board. 43 Lyrtech SFFSDR board.
44
45config SND_DA830_SOC_EVM
46 tristate "SoC Audio support for DA830/OMAP-L137 EVM"
47 depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA830_EVM
48 select SND_DAVINCI_SOC_MCASP
49 select SND_SOC_TLV320AIC3X
50
51 help
52 Say Y if you want to add support for SoC audio on TI
53 DA830/OMAP-L137 EVM
54
55config SND_DA850_SOC_EVM
56 tristate "SoC Audio support for DA850/OMAP-L138 EVM"
57 depends on SND_DAVINCI_SOC && MACH_DAVINCI_DA850_EVM
58 select SND_DAVINCI_SOC_MCASP
59 select SND_SOC_TLV320AIC3X
60 help
61 Say Y if you want to add support for SoC audio on TI
62 DA850/OMAP-L138 EVM
63
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
index ca8bae1fc3f6..a6939d71b988 100644
--- a/sound/soc/davinci/Makefile
+++ b/sound/soc/davinci/Makefile
@@ -1,13 +1,18 @@
1# DAVINCI Platform Support 1# DAVINCI Platform Support
2snd-soc-davinci-objs := davinci-pcm.o 2snd-soc-davinci-objs := davinci-pcm.o
3snd-soc-davinci-i2s-objs := davinci-i2s.o 3snd-soc-davinci-i2s-objs := davinci-i2s.o
4snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
4 5
5obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o 6obj-$(CONFIG_SND_DAVINCI_SOC) += snd-soc-davinci.o
6obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o 7obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
8obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
7 9
8# DAVINCI Machine Support 10# DAVINCI Machine Support
9snd-soc-evm-objs := davinci-evm.o 11snd-soc-evm-objs := davinci-evm.o
10snd-soc-sffsdr-objs := davinci-sffsdr.o 12snd-soc-sffsdr-objs := davinci-sffsdr.o
11 13
12obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o 14obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o
15obj-$(CONFIG_SND_DM6467_SOC_EVM) += snd-soc-evm.o
16obj-$(CONFIG_SND_DA830_SOC_EVM) += snd-soc-evm.o
17obj-$(CONFIG_SND_DA850_SOC_EVM) += snd-soc-evm.o
13obj-$(CONFIG_SND_DAVINCI_SOC_SFFSDR) += snd-soc-sffsdr.o 18obj-$(CONFIG_SND_DAVINCI_SOC_SFFSDR) += snd-soc-sffsdr.o
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 58fd1cbedd88..67414f659405 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/i2c.h>
17#include <sound/core.h> 18#include <sound/core.h>
18#include <sound/pcm.h> 19#include <sound/pcm.h>
19#include <sound/soc.h> 20#include <sound/soc.h>
@@ -27,9 +28,10 @@
27#include <mach/mux.h> 28#include <mach/mux.h>
28 29
29#include "../codecs/tlv320aic3x.h" 30#include "../codecs/tlv320aic3x.h"
31#include "../codecs/spdif_transciever.h"
30#include "davinci-pcm.h" 32#include "davinci-pcm.h"
31#include "davinci-i2s.h" 33#include "davinci-i2s.h"
32 34#include "davinci-mcasp.h"
33 35
34#define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \ 36#define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \
35 SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF) 37 SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF)
@@ -43,7 +45,7 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
43 unsigned sysclk; 45 unsigned sysclk;
44 46
45 /* ASP1 on DM355 EVM is clocked by an external oscillator */ 47 /* ASP1 on DM355 EVM is clocked by an external oscillator */
46 if (machine_is_davinci_dm355_evm()) 48 if (machine_is_davinci_dm355_evm() || machine_is_davinci_dm6467_evm())
47 sysclk = 27000000; 49 sysclk = 27000000;
48 50
49 /* ASP0 in DM6446 EVM is clocked by U55, as configured by 51 /* ASP0 in DM6446 EVM is clocked by U55, as configured by
@@ -53,6 +55,10 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
53 else if (machine_is_davinci_evm()) 55 else if (machine_is_davinci_evm())
54 sysclk = 12288000; 56 sysclk = 12288000;
55 57
58 else if (machine_is_davinci_da830_evm() ||
59 machine_is_davinci_da850_evm())
60 sysclk = 24576000;
61
56 else 62 else
57 return -EINVAL; 63 return -EINVAL;
58 64
@@ -144,6 +150,32 @@ static struct snd_soc_dai_link evm_dai = {
144 .ops = &evm_ops, 150 .ops = &evm_ops,
145}; 151};
146 152
153static struct snd_soc_dai_link dm6467_evm_dai[] = {
154 {
155 .name = "TLV320AIC3X",
156 .stream_name = "AIC3X",
157 .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_I2S_DAI],
158 .codec_dai = &aic3x_dai,
159 .init = evm_aic3x_init,
160 .ops = &evm_ops,
161 },
162 {
163 .name = "McASP",
164 .stream_name = "spdif",
165 .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_DIT_DAI],
166 .codec_dai = &dit_stub_dai,
167 .ops = &evm_ops,
168 },
169};
170static struct snd_soc_dai_link da8xx_evm_dai = {
171 .name = "TLV320AIC3X",
172 .stream_name = "AIC3X",
173 .cpu_dai = &davinci_mcasp_dai[DAVINCI_MCASP_I2S_DAI],
174 .codec_dai = &aic3x_dai,
175 .init = evm_aic3x_init,
176 .ops = &evm_ops,
177};
178
147/* davinci-evm audio machine driver */ 179/* davinci-evm audio machine driver */
148static struct snd_soc_card snd_soc_card_evm = { 180static struct snd_soc_card snd_soc_card_evm = {
149 .name = "DaVinci EVM", 181 .name = "DaVinci EVM",
@@ -152,73 +184,80 @@ static struct snd_soc_card snd_soc_card_evm = {
152 .num_links = 1, 184 .num_links = 1,
153}; 185};
154 186
155/* evm audio private data */ 187/* davinci dm6467 evm audio machine driver */
156static struct aic3x_setup_data evm_aic3x_setup = { 188static struct snd_soc_card dm6467_snd_soc_card_evm = {
157 .i2c_bus = 1, 189 .name = "DaVinci DM6467 EVM",
158 .i2c_address = 0x1b, 190 .platform = &davinci_soc_platform,
191 .dai_link = dm6467_evm_dai,
192 .num_links = ARRAY_SIZE(dm6467_evm_dai),
159}; 193};
160 194
195static struct snd_soc_card da830_snd_soc_card = {
196 .name = "DA830/OMAP-L137 EVM",
197 .dai_link = &da8xx_evm_dai,
198 .platform = &davinci_soc_platform,
199 .num_links = 1,
200};
201
202static struct snd_soc_card da850_snd_soc_card = {
203 .name = "DA850/OMAP-L138 EVM",
204 .dai_link = &da8xx_evm_dai,
205 .platform = &davinci_soc_platform,
206 .num_links = 1,
207};
208
209static struct aic3x_setup_data aic3x_setup;
210
161/* evm audio subsystem */ 211/* evm audio subsystem */
162static struct snd_soc_device evm_snd_devdata = { 212static struct snd_soc_device evm_snd_devdata = {
163 .card = &snd_soc_card_evm, 213 .card = &snd_soc_card_evm,
164 .codec_dev = &soc_codec_dev_aic3x, 214 .codec_dev = &soc_codec_dev_aic3x,
165 .codec_data = &evm_aic3x_setup, 215 .codec_data = &aic3x_setup,
166};
167
168/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
169static struct resource evm_snd_resources[] = {
170 {
171 .start = DAVINCI_ASP0_BASE,
172 .end = DAVINCI_ASP0_BASE + SZ_8K - 1,
173 .flags = IORESOURCE_MEM,
174 },
175}; 216};
176 217
177static struct evm_snd_platform_data evm_snd_data = { 218/* evm audio subsystem */
178 .tx_dma_ch = DAVINCI_DMA_ASP0_TX, 219static struct snd_soc_device dm6467_evm_snd_devdata = {
179 .rx_dma_ch = DAVINCI_DMA_ASP0_RX, 220 .card = &dm6467_snd_soc_card_evm,
221 .codec_dev = &soc_codec_dev_aic3x,
222 .codec_data = &aic3x_setup,
180}; 223};
181 224
182/* DM335 EVM uses ASP1; line-out is a stereo mini-jack */ 225/* evm audio subsystem */
183static struct resource dm335evm_snd_resources[] = { 226static struct snd_soc_device da830_evm_snd_devdata = {
184 { 227 .card = &da830_snd_soc_card,
185 .start = DAVINCI_ASP1_BASE, 228 .codec_dev = &soc_codec_dev_aic3x,
186 .end = DAVINCI_ASP1_BASE + SZ_8K - 1, 229 .codec_data = &aic3x_setup,
187 .flags = IORESOURCE_MEM,
188 },
189}; 230};
190 231
191static struct evm_snd_platform_data dm335evm_snd_data = { 232static struct snd_soc_device da850_evm_snd_devdata = {
192 .tx_dma_ch = DAVINCI_DMA_ASP1_TX, 233 .card = &da850_snd_soc_card,
193 .rx_dma_ch = DAVINCI_DMA_ASP1_RX, 234 .codec_dev = &soc_codec_dev_aic3x,
235 .codec_data = &aic3x_setup,
194}; 236};
195 237
196static struct platform_device *evm_snd_device; 238static struct platform_device *evm_snd_device;
197 239
198static int __init evm_init(void) 240static int __init evm_init(void)
199{ 241{
200 struct resource *resources; 242 struct snd_soc_device *evm_snd_dev_data;
201 unsigned num_resources;
202 struct evm_snd_platform_data *data;
203 int index; 243 int index;
204 int ret; 244 int ret;
205 245
206 if (machine_is_davinci_evm()) { 246 if (machine_is_davinci_evm()) {
207 davinci_cfg_reg(DM644X_MCBSP); 247 evm_snd_dev_data = &evm_snd_devdata;
208
209 resources = evm_snd_resources;
210 num_resources = ARRAY_SIZE(evm_snd_resources);
211 data = &evm_snd_data;
212 index = 0; 248 index = 0;
213 } else if (machine_is_davinci_dm355_evm()) { 249 } else if (machine_is_davinci_dm355_evm()) {
214 /* we don't use ASP1 IRQs, or we'd need to mux them ... */ 250 evm_snd_dev_data = &evm_snd_devdata;
215 davinci_cfg_reg(DM355_EVT8_ASP1_TX); 251 index = 1;
216 davinci_cfg_reg(DM355_EVT9_ASP1_RX); 252 } else if (machine_is_davinci_dm6467_evm()) {
217 253 evm_snd_dev_data = &dm6467_evm_snd_devdata;
218 resources = dm335evm_snd_resources; 254 index = 0;
219 num_resources = ARRAY_SIZE(dm335evm_snd_resources); 255 } else if (machine_is_davinci_da830_evm()) {
220 data = &dm335evm_snd_data; 256 evm_snd_dev_data = &da830_evm_snd_devdata;
221 index = 1; 257 index = 1;
258 } else if (machine_is_davinci_da850_evm()) {
259 evm_snd_dev_data = &da850_evm_snd_devdata;
260 index = 0;
222 } else 261 } else
223 return -EINVAL; 262 return -EINVAL;
224 263
@@ -226,17 +265,8 @@ static int __init evm_init(void)
226 if (!evm_snd_device) 265 if (!evm_snd_device)
227 return -ENOMEM; 266 return -ENOMEM;
228 267
229 platform_set_drvdata(evm_snd_device, &evm_snd_devdata); 268 platform_set_drvdata(evm_snd_device, evm_snd_dev_data);
230 evm_snd_devdata.dev = &evm_snd_device->dev; 269 evm_snd_dev_data->dev = &evm_snd_device->dev;
231 platform_device_add_data(evm_snd_device, data, sizeof(*data));
232
233 ret = platform_device_add_resources(evm_snd_device, resources,
234 num_resources);
235 if (ret) {
236 platform_device_put(evm_snd_device);
237 return ret;
238 }
239
240 ret = platform_device_add(evm_snd_device); 270 ret = platform_device_add(evm_snd_device);
241 if (ret) 271 if (ret)
242 platform_device_put(evm_snd_device); 272 platform_device_put(evm_snd_device);
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index b1ea52fc83c7..12a6c549ee6e 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -22,6 +22,8 @@
22#include <sound/initval.h> 22#include <sound/initval.h>
23#include <sound/soc.h> 23#include <sound/soc.h>
24 24
25#include <mach/asp.h>
26
25#include "davinci-pcm.h" 27#include "davinci-pcm.h"
26 28
27 29
@@ -63,6 +65,7 @@
63#define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5) 65#define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5)
64#define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8) 66#define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8)
65#define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16) 67#define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16)
68#define DAVINCI_MCBSP_RCR_RFIG (1 << 18)
66#define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21) 69#define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21)
67 70
68#define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5) 71#define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5)
@@ -85,14 +88,6 @@
85#define DAVINCI_MCBSP_PCR_FSRM (1 << 10) 88#define DAVINCI_MCBSP_PCR_FSRM (1 << 10)
86#define DAVINCI_MCBSP_PCR_FSXM (1 << 11) 89#define DAVINCI_MCBSP_PCR_FSXM (1 << 11)
87 90
88#define MOD_REG_BIT(val, mask, set) do { \
89 if (set) { \
90 val |= mask; \
91 } else { \
92 val &= ~mask; \
93 } \
94} while (0)
95
96enum { 91enum {
97 DAVINCI_MCBSP_WORD_8 = 0, 92 DAVINCI_MCBSP_WORD_8 = 0,
98 DAVINCI_MCBSP_WORD_12, 93 DAVINCI_MCBSP_WORD_12,
@@ -112,6 +107,10 @@ static struct davinci_pcm_dma_params davinci_i2s_pcm_in = {
112 107
113struct davinci_mcbsp_dev { 108struct davinci_mcbsp_dev {
114 void __iomem *base; 109 void __iomem *base;
110#define MOD_DSP_A 0
111#define MOD_DSP_B 1
112 int mode;
113 u32 pcr;
115 struct clk *clk; 114 struct clk *clk;
116 struct davinci_pcm_dma_params *dma_params[2]; 115 struct davinci_pcm_dma_params *dma_params[2];
117}; 116};
@@ -127,96 +126,100 @@ static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg)
127 return __raw_readl(dev->base + reg); 126 return __raw_readl(dev->base + reg);
128} 127}
129 128
130static void davinci_mcbsp_start(struct snd_pcm_substream *substream) 129static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback)
130{
131 u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP;
132 /* The clock needs to toggle to complete reset.
133 * So, fake it by toggling the clk polarity.
134 */
135 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m);
136 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr);
137}
138
139static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev,
140 struct snd_pcm_substream *substream)
131{ 141{
132 struct snd_soc_pcm_runtime *rtd = substream->private_data; 142 struct snd_soc_pcm_runtime *rtd = substream->private_data;
133 struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
134 struct snd_soc_device *socdev = rtd->socdev; 143 struct snd_soc_device *socdev = rtd->socdev;
135 struct snd_soc_platform *platform = socdev->card->platform; 144 struct snd_soc_platform *platform = socdev->card->platform;
136 u32 w; 145 int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
137 int ret; 146 u32 spcr;
138 147 u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST;
139 /* Start the sample generator and enable transmitter/receiver */ 148 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
140 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 149 if (spcr & mask) {
141 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_GRST, 1); 150 /* start off disabled */
142 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 151 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG,
152 spcr & ~mask);
153 toggle_clock(dev, playback);
154 }
155 if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM |
156 DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) {
157 /* Start the sample generator */
158 spcr |= DAVINCI_MCBSP_SPCR_GRST;
159 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
160 }
143 161
144 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 162 if (playback) {
145 /* Stop the DMA to avoid data loss */ 163 /* Stop the DMA to avoid data loss */
146 /* while the transmitter is out of reset to handle XSYNCERR */ 164 /* while the transmitter is out of reset to handle XSYNCERR */
147 if (platform->pcm_ops->trigger) { 165 if (platform->pcm_ops->trigger) {
148 ret = platform->pcm_ops->trigger(substream, 166 int ret = platform->pcm_ops->trigger(substream,
149 SNDRV_PCM_TRIGGER_STOP); 167 SNDRV_PCM_TRIGGER_STOP);
150 if (ret < 0) 168 if (ret < 0)
151 printk(KERN_DEBUG "Playback DMA stop failed\n"); 169 printk(KERN_DEBUG "Playback DMA stop failed\n");
152 } 170 }
153 171
154 /* Enable the transmitter */ 172 /* Enable the transmitter */
155 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 173 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
156 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 1); 174 spcr |= DAVINCI_MCBSP_SPCR_XRST;
157 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 175 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
158 176
159 /* wait for any unexpected frame sync error to occur */ 177 /* wait for any unexpected frame sync error to occur */
160 udelay(100); 178 udelay(100);
161 179
162 /* Disable the transmitter to clear any outstanding XSYNCERR */ 180 /* Disable the transmitter to clear any outstanding XSYNCERR */
163 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 181 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
164 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 0); 182 spcr &= ~DAVINCI_MCBSP_SPCR_XRST;
165 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 183 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
184 toggle_clock(dev, playback);
166 185
167 /* Restart the DMA */ 186 /* Restart the DMA */
168 if (platform->pcm_ops->trigger) { 187 if (platform->pcm_ops->trigger) {
169 ret = platform->pcm_ops->trigger(substream, 188 int ret = platform->pcm_ops->trigger(substream,
170 SNDRV_PCM_TRIGGER_START); 189 SNDRV_PCM_TRIGGER_START);
171 if (ret < 0) 190 if (ret < 0)
172 printk(KERN_DEBUG "Playback DMA start failed\n"); 191 printk(KERN_DEBUG "Playback DMA start failed\n");
173 } 192 }
174 /* Enable the transmitter */
175 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
176 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 1);
177 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
178
179 } else {
180
181 /* Enable the reciever */
182 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
183 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_RRST, 1);
184 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
185 } 193 }
186 194
195 /* Enable transmitter or receiver */
196 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
197 spcr |= mask;
187 198
188 /* Start frame sync */ 199 if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) {
189 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 200 /* Start frame sync */
190 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_FRST, 1); 201 spcr |= DAVINCI_MCBSP_SPCR_FRST;
191 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 202 }
203 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
192} 204}
193 205
194static void davinci_mcbsp_stop(struct snd_pcm_substream *substream) 206static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback)
195{ 207{
196 struct snd_soc_pcm_runtime *rtd = substream->private_data; 208 u32 spcr;
197 struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
198 u32 w;
199 209
200 /* Reset transmitter/receiver and sample rate/frame sync generators */ 210 /* Reset transmitter/receiver and sample rate/frame sync generators */
201 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 211 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
202 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_GRST | 212 spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST);
203 DAVINCI_MCBSP_SPCR_FRST, 0); 213 spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST;
204 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 214 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
205 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_XRST, 0); 215 toggle_clock(dev, playback);
206 else
207 MOD_REG_BIT(w, DAVINCI_MCBSP_SPCR_RRST, 0);
208 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w);
209} 216}
210 217
211static int davinci_i2s_startup(struct snd_pcm_substream *substream, 218static int davinci_i2s_startup(struct snd_pcm_substream *substream,
212 struct snd_soc_dai *dai) 219 struct snd_soc_dai *cpu_dai)
213{ 220{
214 struct snd_soc_pcm_runtime *rtd = substream->private_data; 221 struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
215 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
216 struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
217
218 cpu_dai->dma_data = dev->dma_params[substream->stream]; 222 cpu_dai->dma_data = dev->dma_params[substream->stream];
219
220 return 0; 223 return 0;
221} 224}
222 225
@@ -228,12 +231,11 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
228 struct davinci_mcbsp_dev *dev = cpu_dai->private_data; 231 struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
229 unsigned int pcr; 232 unsigned int pcr;
230 unsigned int srgr; 233 unsigned int srgr;
231 unsigned int rcr;
232 unsigned int xcr;
233 srgr = DAVINCI_MCBSP_SRGR_FSGM | 234 srgr = DAVINCI_MCBSP_SRGR_FSGM |
234 DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) | 235 DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) |
235 DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1); 236 DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1);
236 237
238 /* set master/slave audio interface */
237 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 239 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
238 case SND_SOC_DAIFMT_CBS_CFS: 240 case SND_SOC_DAIFMT_CBS_CFS:
239 /* cpu is master */ 241 /* cpu is master */
@@ -258,11 +260,8 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
258 return -EINVAL; 260 return -EINVAL;
259 } 261 }
260 262
261 rcr = DAVINCI_MCBSP_RCR_RFRLEN1(1); 263 /* interface format */
262 xcr = DAVINCI_MCBSP_XCR_XFIG | DAVINCI_MCBSP_XCR_XFRLEN1(1);
263 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 264 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
264 case SND_SOC_DAIFMT_DSP_B:
265 break;
266 case SND_SOC_DAIFMT_I2S: 265 case SND_SOC_DAIFMT_I2S:
267 /* Davinci doesn't support TRUE I2S, but some codecs will have 266 /* Davinci doesn't support TRUE I2S, but some codecs will have
268 * the left and right channels contiguous. This allows 267 * the left and right channels contiguous. This allows
@@ -282,8 +281,10 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
282 */ 281 */
283 fmt ^= SND_SOC_DAIFMT_NB_IF; 282 fmt ^= SND_SOC_DAIFMT_NB_IF;
284 case SND_SOC_DAIFMT_DSP_A: 283 case SND_SOC_DAIFMT_DSP_A:
285 rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1); 284 dev->mode = MOD_DSP_A;
286 xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1); 285 break;
286 case SND_SOC_DAIFMT_DSP_B:
287 dev->mode = MOD_DSP_B;
287 break; 288 break;
288 default: 289 default:
289 printk(KERN_ERR "%s:bad format\n", __func__); 290 printk(KERN_ERR "%s:bad format\n", __func__);
@@ -343,9 +344,8 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
343 return -EINVAL; 344 return -EINVAL;
344 } 345 }
345 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); 346 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
347 dev->pcr = pcr;
346 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr); 348 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr);
347 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
348 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
349 return 0; 349 return 0;
350} 350}
351 351
@@ -353,31 +353,40 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
353 struct snd_pcm_hw_params *params, 353 struct snd_pcm_hw_params *params,
354 struct snd_soc_dai *dai) 354 struct snd_soc_dai *dai)
355{ 355{
356 struct snd_soc_pcm_runtime *rtd = substream->private_data; 356 struct davinci_pcm_dma_params *dma_params = dai->dma_data;
357 struct davinci_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; 357 struct davinci_mcbsp_dev *dev = dai->private_data;
358 struct davinci_mcbsp_dev *dev = rtd->dai->cpu_dai->private_data;
359 struct snd_interval *i = NULL; 358 struct snd_interval *i = NULL;
360 int mcbsp_word_length; 359 int mcbsp_word_length;
361 u32 w; 360 unsigned int rcr, xcr, srgr;
361 u32 spcr;
362 362
363 /* general line settings */ 363 /* general line settings */
364 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 364 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
365 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 365 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
366 w |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE; 366 spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
367 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 367 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
368 } else { 368 } else {
369 w |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE; 369 spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE;
370 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, w); 370 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr);
371 } 371 }
372 372
373 i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); 373 i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
374 w = DAVINCI_MCBSP_SRGR_FSGM; 374 srgr = DAVINCI_MCBSP_SRGR_FSGM;
375 MOD_REG_BIT(w, DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1), 1); 375 srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1);
376 376
377 i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS); 377 i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS);
378 MOD_REG_BIT(w, DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1), 1); 378 srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1);
379 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, w); 379 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr);
380 380
381 rcr = DAVINCI_MCBSP_RCR_RFIG;
382 xcr = DAVINCI_MCBSP_XCR_XFIG;
383 if (dev->mode == MOD_DSP_B) {
384 rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0);
385 xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0);
386 } else {
387 rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1);
388 xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1);
389 }
381 /* Determine xfer data type */ 390 /* Determine xfer data type */
382 switch (params_format(params)) { 391 switch (params_format(params)) {
383 case SNDRV_PCM_FORMAT_S8: 392 case SNDRV_PCM_FORMAT_S8:
@@ -397,18 +406,31 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
397 return -EINVAL; 406 return -EINVAL;
398 } 407 }
399 408
400 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 409 dma_params->acnt = dma_params->data_type;
401 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_RCR_REG); 410 rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(1);
402 MOD_REG_BIT(w, DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) | 411 xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(1);
403 DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length), 1);
404 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, w);
405 412
406 } else { 413 rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) |
407 w = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_XCR_REG); 414 DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length);
408 MOD_REG_BIT(w, DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) | 415 xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) |
409 DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length), 1); 416 DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length);
410 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, w);
411 417
418 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
419 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr);
420 else
421 davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr);
422 return 0;
423}
424
425static int davinci_i2s_prepare(struct snd_pcm_substream *substream,
426 struct snd_soc_dai *dai)
427{
428 struct davinci_mcbsp_dev *dev = dai->private_data;
429 int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
430 davinci_mcbsp_stop(dev, playback);
431 if ((dev->pcr & DAVINCI_MCBSP_PCR_FSXM) == 0) {
432 /* codec is master */
433 davinci_mcbsp_start(dev, substream);
412 } 434 }
413 return 0; 435 return 0;
414} 436}
@@ -416,35 +438,72 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
416static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, 438static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
417 struct snd_soc_dai *dai) 439 struct snd_soc_dai *dai)
418{ 440{
441 struct davinci_mcbsp_dev *dev = dai->private_data;
419 int ret = 0; 442 int ret = 0;
443 int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
444 if ((dev->pcr & DAVINCI_MCBSP_PCR_FSXM) == 0)
445 return 0; /* return if codec is master */
420 446
421 switch (cmd) { 447 switch (cmd) {
422 case SNDRV_PCM_TRIGGER_START: 448 case SNDRV_PCM_TRIGGER_START:
423 case SNDRV_PCM_TRIGGER_RESUME: 449 case SNDRV_PCM_TRIGGER_RESUME:
424 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 450 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
425 davinci_mcbsp_start(substream); 451 davinci_mcbsp_start(dev, substream);
426 break; 452 break;
427 case SNDRV_PCM_TRIGGER_STOP: 453 case SNDRV_PCM_TRIGGER_STOP:
428 case SNDRV_PCM_TRIGGER_SUSPEND: 454 case SNDRV_PCM_TRIGGER_SUSPEND:
429 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 455 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
430 davinci_mcbsp_stop(substream); 456 davinci_mcbsp_stop(dev, playback);
431 break; 457 break;
432 default: 458 default:
433 ret = -EINVAL; 459 ret = -EINVAL;
434 } 460 }
435
436 return ret; 461 return ret;
437} 462}
438 463
439static int davinci_i2s_probe(struct platform_device *pdev, 464static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
440 struct snd_soc_dai *dai) 465 struct snd_soc_dai *dai)
466{
467 struct davinci_mcbsp_dev *dev = dai->private_data;
468 int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
469 davinci_mcbsp_stop(dev, playback);
470}
471
472#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
473
474static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
475 .startup = davinci_i2s_startup,
476 .shutdown = davinci_i2s_shutdown,
477 .prepare = davinci_i2s_prepare,
478 .trigger = davinci_i2s_trigger,
479 .hw_params = davinci_i2s_hw_params,
480 .set_fmt = davinci_i2s_set_dai_fmt,
481
482};
483
484struct snd_soc_dai davinci_i2s_dai = {
485 .name = "davinci-i2s",
486 .id = 0,
487 .playback = {
488 .channels_min = 2,
489 .channels_max = 2,
490 .rates = DAVINCI_I2S_RATES,
491 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
492 .capture = {
493 .channels_min = 2,
494 .channels_max = 2,
495 .rates = DAVINCI_I2S_RATES,
496 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
497 .ops = &davinci_i2s_dai_ops,
498
499};
500EXPORT_SYMBOL_GPL(davinci_i2s_dai);
501
502static int davinci_i2s_probe(struct platform_device *pdev)
441{ 503{
442 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 504 struct snd_platform_data *pdata = pdev->dev.platform_data;
443 struct snd_soc_card *card = socdev->card;
444 struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai;
445 struct davinci_mcbsp_dev *dev; 505 struct davinci_mcbsp_dev *dev;
446 struct resource *mem, *ioarea; 506 struct resource *mem, *ioarea, *res;
447 struct evm_snd_platform_data *pdata;
448 int ret; 507 int ret;
449 508
450 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 509 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -466,8 +525,6 @@ static int davinci_i2s_probe(struct platform_device *pdev,
466 goto err_release_region; 525 goto err_release_region;
467 } 526 }
468 527
469 cpu_dai->private_data = dev;
470
471 dev->clk = clk_get(&pdev->dev, NULL); 528 dev->clk = clk_get(&pdev->dev, NULL);
472 if (IS_ERR(dev->clk)) { 529 if (IS_ERR(dev->clk)) {
473 ret = -ENODEV; 530 ret = -ENODEV;
@@ -476,18 +533,37 @@ static int davinci_i2s_probe(struct platform_device *pdev,
476 clk_enable(dev->clk); 533 clk_enable(dev->clk);
477 534
478 dev->base = (void __iomem *)IO_ADDRESS(mem->start); 535 dev->base = (void __iomem *)IO_ADDRESS(mem->start);
479 pdata = pdev->dev.platform_data;
480 536
481 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &davinci_i2s_pcm_out; 537 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &davinci_i2s_pcm_out;
482 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = pdata->tx_dma_ch;
483 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->dma_addr = 538 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->dma_addr =
484 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG); 539 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DXR_REG);
485 540
486 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &davinci_i2s_pcm_in; 541 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &davinci_i2s_pcm_in;
487 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = pdata->rx_dma_ch;
488 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->dma_addr = 542 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->dma_addr =
489 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG); 543 (dma_addr_t)(io_v2p(dev->base) + DAVINCI_MCBSP_DRR_REG);
490 544
545 /* first TX, then RX */
546 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
547 if (!res) {
548 dev_err(&pdev->dev, "no DMA resource\n");
549 ret = -ENXIO;
550 goto err_free_mem;
551 }
552 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK]->channel = res->start;
553
554 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
555 if (!res) {
556 dev_err(&pdev->dev, "no DMA resource\n");
557 ret = -ENXIO;
558 goto err_free_mem;
559 }
560 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE]->channel = res->start;
561
562 davinci_i2s_dai.private_data = dev;
563 ret = snd_soc_register_dai(&davinci_i2s_dai);
564 if (ret != 0)
565 goto err_free_mem;
566
491 return 0; 567 return 0;
492 568
493err_free_mem: 569err_free_mem:
@@ -498,62 +574,40 @@ err_release_region:
498 return ret; 574 return ret;
499} 575}
500 576
501static void davinci_i2s_remove(struct platform_device *pdev, 577static int davinci_i2s_remove(struct platform_device *pdev)
502 struct snd_soc_dai *dai)
503{ 578{
504 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 579 struct davinci_mcbsp_dev *dev = davinci_i2s_dai.private_data;
505 struct snd_soc_card *card = socdev->card;
506 struct snd_soc_dai *cpu_dai = card->dai_link->cpu_dai;
507 struct davinci_mcbsp_dev *dev = cpu_dai->private_data;
508 struct resource *mem; 580 struct resource *mem;
509 581
582 snd_soc_unregister_dai(&davinci_i2s_dai);
510 clk_disable(dev->clk); 583 clk_disable(dev->clk);
511 clk_put(dev->clk); 584 clk_put(dev->clk);
512 dev->clk = NULL; 585 dev->clk = NULL;
513
514 kfree(dev); 586 kfree(dev);
515
516 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 587 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 release_mem_region(mem->start, (mem->end - mem->start) + 1); 588 release_mem_region(mem->start, (mem->end - mem->start) + 1);
518}
519 589
520#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 590 return 0;
521 591}
522static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
523 .startup = davinci_i2s_startup,
524 .trigger = davinci_i2s_trigger,
525 .hw_params = davinci_i2s_hw_params,
526 .set_fmt = davinci_i2s_set_dai_fmt,
527};
528 592
529struct snd_soc_dai davinci_i2s_dai = { 593static struct platform_driver davinci_mcbsp_driver = {
530 .name = "davinci-i2s", 594 .probe = davinci_i2s_probe,
531 .id = 0, 595 .remove = davinci_i2s_remove,
532 .probe = davinci_i2s_probe, 596 .driver = {
533 .remove = davinci_i2s_remove, 597 .name = "davinci-asp",
534 .playback = { 598 .owner = THIS_MODULE,
535 .channels_min = 2, 599 },
536 .channels_max = 2,
537 .rates = DAVINCI_I2S_RATES,
538 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
539 .capture = {
540 .channels_min = 2,
541 .channels_max = 2,
542 .rates = DAVINCI_I2S_RATES,
543 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
544 .ops = &davinci_i2s_dai_ops,
545}; 600};
546EXPORT_SYMBOL_GPL(davinci_i2s_dai);
547 601
548static int __init davinci_i2s_init(void) 602static int __init davinci_i2s_init(void)
549{ 603{
550 return snd_soc_register_dai(&davinci_i2s_dai); 604 return platform_driver_register(&davinci_mcbsp_driver);
551} 605}
552module_init(davinci_i2s_init); 606module_init(davinci_i2s_init);
553 607
554static void __exit davinci_i2s_exit(void) 608static void __exit davinci_i2s_exit(void)
555{ 609{
556 snd_soc_unregister_dai(&davinci_i2s_dai); 610 platform_driver_unregister(&davinci_mcbsp_driver);
557} 611}
558module_exit(davinci_i2s_exit); 612module_exit(davinci_i2s_exit);
559 613
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
new file mode 100644
index 000000000000..eca22d7829d2
--- /dev/null
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -0,0 +1,973 @@
1/*
2 * ALSA SoC McASP Audio Layer for TI DAVINCI processor
3 *
4 * Multi-channel Audio Serial Port Driver
5 *
6 * Author: Nirmal Pandey <n-pandey@ti.com>,
7 * Suresh Rajashekara <suresh.r@ti.com>
8 * Steve Chen <schen@.mvista.com>
9 *
10 * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
11 * Copyright: (C) 2009 Texas Instruments, India
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/io.h>
23#include <linux/clk.h>
24
25#include <sound/core.h>
26#include <sound/pcm.h>
27#include <sound/pcm_params.h>
28#include <sound/initval.h>
29#include <sound/soc.h>
30
31#include "davinci-pcm.h"
32#include "davinci-mcasp.h"
33
34/*
35 * McASP register definitions
36 */
37#define DAVINCI_MCASP_PID_REG 0x00
38#define DAVINCI_MCASP_PWREMUMGT_REG 0x04
39
40#define DAVINCI_MCASP_PFUNC_REG 0x10
41#define DAVINCI_MCASP_PDIR_REG 0x14
42#define DAVINCI_MCASP_PDOUT_REG 0x18
43#define DAVINCI_MCASP_PDSET_REG 0x1c
44
45#define DAVINCI_MCASP_PDCLR_REG 0x20
46
47#define DAVINCI_MCASP_TLGC_REG 0x30
48#define DAVINCI_MCASP_TLMR_REG 0x34
49
50#define DAVINCI_MCASP_GBLCTL_REG 0x44
51#define DAVINCI_MCASP_AMUTE_REG 0x48
52#define DAVINCI_MCASP_LBCTL_REG 0x4c
53
54#define DAVINCI_MCASP_TXDITCTL_REG 0x50
55
56#define DAVINCI_MCASP_GBLCTLR_REG 0x60
57#define DAVINCI_MCASP_RXMASK_REG 0x64
58#define DAVINCI_MCASP_RXFMT_REG 0x68
59#define DAVINCI_MCASP_RXFMCTL_REG 0x6c
60
61#define DAVINCI_MCASP_ACLKRCTL_REG 0x70
62#define DAVINCI_MCASP_AHCLKRCTL_REG 0x74
63#define DAVINCI_MCASP_RXTDM_REG 0x78
64#define DAVINCI_MCASP_EVTCTLR_REG 0x7c
65
66#define DAVINCI_MCASP_RXSTAT_REG 0x80
67#define DAVINCI_MCASP_RXTDMSLOT_REG 0x84
68#define DAVINCI_MCASP_RXCLKCHK_REG 0x88
69#define DAVINCI_MCASP_REVTCTL_REG 0x8c
70
71#define DAVINCI_MCASP_GBLCTLX_REG 0xa0
72#define DAVINCI_MCASP_TXMASK_REG 0xa4
73#define DAVINCI_MCASP_TXFMT_REG 0xa8
74#define DAVINCI_MCASP_TXFMCTL_REG 0xac
75
76#define DAVINCI_MCASP_ACLKXCTL_REG 0xb0
77#define DAVINCI_MCASP_AHCLKXCTL_REG 0xb4
78#define DAVINCI_MCASP_TXTDM_REG 0xb8
79#define DAVINCI_MCASP_EVTCTLX_REG 0xbc
80
81#define DAVINCI_MCASP_TXSTAT_REG 0xc0
82#define DAVINCI_MCASP_TXTDMSLOT_REG 0xc4
83#define DAVINCI_MCASP_TXCLKCHK_REG 0xc8
84#define DAVINCI_MCASP_XEVTCTL_REG 0xcc
85
86/* Left(even TDM Slot) Channel Status Register File */
87#define DAVINCI_MCASP_DITCSRA_REG 0x100
88/* Right(odd TDM slot) Channel Status Register File */
89#define DAVINCI_MCASP_DITCSRB_REG 0x118
90/* Left(even TDM slot) User Data Register File */
91#define DAVINCI_MCASP_DITUDRA_REG 0x130
92/* Right(odd TDM Slot) User Data Register File */
93#define DAVINCI_MCASP_DITUDRB_REG 0x148
94
95/* Serializer n Control Register */
96#define DAVINCI_MCASP_XRSRCTL_BASE_REG 0x180
97#define DAVINCI_MCASP_XRSRCTL_REG(n) (DAVINCI_MCASP_XRSRCTL_BASE_REG + \
98 (n << 2))
99
100/* Transmit Buffer for Serializer n */
101#define DAVINCI_MCASP_TXBUF_REG 0x200
102/* Receive Buffer for Serializer n */
103#define DAVINCI_MCASP_RXBUF_REG 0x280
104
105/* McASP FIFO Registers */
106#define DAVINCI_MCASP_WFIFOCTL (0x1010)
107#define DAVINCI_MCASP_WFIFOSTS (0x1014)
108#define DAVINCI_MCASP_RFIFOCTL (0x1018)
109#define DAVINCI_MCASP_RFIFOSTS (0x101C)
110
111/*
112 * DAVINCI_MCASP_PWREMUMGT_REG - Power Down and Emulation Management
113 * Register Bits
114 */
115#define MCASP_FREE BIT(0)
116#define MCASP_SOFT BIT(1)
117
118/*
119 * DAVINCI_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
120 */
121#define AXR(n) (1<<n)
122#define PFUNC_AMUTE BIT(25)
123#define ACLKX BIT(26)
124#define AHCLKX BIT(27)
125#define AFSX BIT(28)
126#define ACLKR BIT(29)
127#define AHCLKR BIT(30)
128#define AFSR BIT(31)
129
130/*
131 * DAVINCI_MCASP_PDIR_REG - Pin Direction Register Bits
132 */
133#define AXR(n) (1<<n)
134#define PDIR_AMUTE BIT(25)
135#define ACLKX BIT(26)
136#define AHCLKX BIT(27)
137#define AFSX BIT(28)
138#define ACLKR BIT(29)
139#define AHCLKR BIT(30)
140#define AFSR BIT(31)
141
142/*
143 * DAVINCI_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
144 */
145#define DITEN BIT(0) /* Transmit DIT mode enable/disable */
146#define VA BIT(2)
147#define VB BIT(3)
148
149/*
150 * DAVINCI_MCASP_TXFMT_REG - Transmit Bitstream Format Register Bits
151 */
152#define TXROT(val) (val)
153#define TXSEL BIT(3)
154#define TXSSZ(val) (val<<4)
155#define TXPBIT(val) (val<<8)
156#define TXPAD(val) (val<<13)
157#define TXORD BIT(15)
158#define FSXDLY(val) (val<<16)
159
160/*
161 * DAVINCI_MCASP_RXFMT_REG - Receive Bitstream Format Register Bits
162 */
163#define RXROT(val) (val)
164#define RXSEL BIT(3)
165#define RXSSZ(val) (val<<4)
166#define RXPBIT(val) (val<<8)
167#define RXPAD(val) (val<<13)
168#define RXORD BIT(15)
169#define FSRDLY(val) (val<<16)
170
171/*
172 * DAVINCI_MCASP_TXFMCTL_REG - Transmit Frame Control Register Bits
173 */
174#define FSXPOL BIT(0)
175#define AFSXE BIT(1)
176#define FSXDUR BIT(4)
177#define FSXMOD(val) (val<<7)
178
179/*
180 * DAVINCI_MCASP_RXFMCTL_REG - Receive Frame Control Register Bits
181 */
182#define FSRPOL BIT(0)
183#define AFSRE BIT(1)
184#define FSRDUR BIT(4)
185#define FSRMOD(val) (val<<7)
186
187/*
188 * DAVINCI_MCASP_ACLKXCTL_REG - Transmit Clock Control Register Bits
189 */
190#define ACLKXDIV(val) (val)
191#define ACLKXE BIT(5)
192#define TX_ASYNC BIT(6)
193#define ACLKXPOL BIT(7)
194
195/*
196 * DAVINCI_MCASP_ACLKRCTL_REG Receive Clock Control Register Bits
197 */
198#define ACLKRDIV(val) (val)
199#define ACLKRE BIT(5)
200#define RX_ASYNC BIT(6)
201#define ACLKRPOL BIT(7)
202
203/*
204 * DAVINCI_MCASP_AHCLKXCTL_REG - High Frequency Transmit Clock Control
205 * Register Bits
206 */
207#define AHCLKXDIV(val) (val)
208#define AHCLKXPOL BIT(14)
209#define AHCLKXE BIT(15)
210
211/*
212 * DAVINCI_MCASP_AHCLKRCTL_REG - High Frequency Receive Clock Control
213 * Register Bits
214 */
215#define AHCLKRDIV(val) (val)
216#define AHCLKRPOL BIT(14)
217#define AHCLKRE BIT(15)
218
219/*
220 * DAVINCI_MCASP_XRSRCTL_BASE_REG - Serializer Control Register Bits
221 */
222#define MODE(val) (val)
223#define DISMOD (val)(val<<2)
224#define TXSTATE BIT(4)
225#define RXSTATE BIT(5)
226
227/*
228 * DAVINCI_MCASP_LBCTL_REG - Loop Back Control Register Bits
229 */
230#define LBEN BIT(0)
231#define LBORD BIT(1)
232#define LBGENMODE(val) (val<<2)
233
234/*
235 * DAVINCI_MCASP_TXTDMSLOT_REG - Transmit TDM Slot Register configuration
236 */
237#define TXTDMS(n) (1<<n)
238
239/*
240 * DAVINCI_MCASP_RXTDMSLOT_REG - Receive TDM Slot Register configuration
241 */
242#define RXTDMS(n) (1<<n)
243
244/*
245 * DAVINCI_MCASP_GBLCTL_REG - Global Control Register Bits
246 */
247#define RXCLKRST BIT(0) /* Receiver Clock Divider Reset */
248#define RXHCLKRST BIT(1) /* Receiver High Frequency Clock Divider */
249#define RXSERCLR BIT(2) /* Receiver Serializer Clear */
250#define RXSMRST BIT(3) /* Receiver State Machine Reset */
251#define RXFSRST BIT(4) /* Frame Sync Generator Reset */
252#define TXCLKRST BIT(8) /* Transmitter Clock Divider Reset */
253#define TXHCLKRST BIT(9) /* Transmitter High Frequency Clock Divider*/
254#define TXSERCLR BIT(10) /* Transmit Serializer Clear */
255#define TXSMRST BIT(11) /* Transmitter State Machine Reset */
256#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
257
258/*
259 * DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits
260 */
261#define MUTENA(val) (val)
262#define MUTEINPOL BIT(2)
263#define MUTEINENA BIT(3)
264#define MUTEIN BIT(4)
265#define MUTER BIT(5)
266#define MUTEX BIT(6)
267#define MUTEFSR BIT(7)
268#define MUTEFSX BIT(8)
269#define MUTEBADCLKR BIT(9)
270#define MUTEBADCLKX BIT(10)
271#define MUTERXDMAERR BIT(11)
272#define MUTETXDMAERR BIT(12)
273
274/*
275 * DAVINCI_MCASP_REVTCTL_REG - Receiver DMA Event Control Register bits
276 */
277#define RXDATADMADIS BIT(0)
278
279/*
280 * DAVINCI_MCASP_XEVTCTL_REG - Transmitter DMA Event Control Register bits
281 */
282#define TXDATADMADIS BIT(0)
283
284/*
285 * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
286 */
287#define FIFO_ENABLE BIT(16)
288#define NUMEVT_MASK (0xFF << 8)
289#define NUMDMA_MASK (0xFF)
290
291#define DAVINCI_MCASP_NUM_SERIALIZER 16
292
293static inline void mcasp_set_bits(void __iomem *reg, u32 val)
294{
295 __raw_writel(__raw_readl(reg) | val, reg);
296}
297
298static inline void mcasp_clr_bits(void __iomem *reg, u32 val)
299{
300 __raw_writel((__raw_readl(reg) & ~(val)), reg);
301}
302
303static inline void mcasp_mod_bits(void __iomem *reg, u32 val, u32 mask)
304{
305 __raw_writel((__raw_readl(reg) & ~mask) | val, reg);
306}
307
308static inline void mcasp_set_reg(void __iomem *reg, u32 val)
309{
310 __raw_writel(val, reg);
311}
312
313static inline u32 mcasp_get_reg(void __iomem *reg)
314{
315 return (unsigned int)__raw_readl(reg);
316}
317
318static inline void mcasp_set_ctl_reg(void __iomem *regs, u32 val)
319{
320 int i = 0;
321
322 mcasp_set_bits(regs, val);
323
324 /* programming GBLCTL needs to read back from GBLCTL and verfiy */
325 /* loop count is to avoid the lock-up */
326 for (i = 0; i < 1000; i++) {
327 if ((mcasp_get_reg(regs) & val) == val)
328 break;
329 }
330
331 if (i == 1000 && ((mcasp_get_reg(regs) & val) != val))
332 printk(KERN_ERR "GBLCTL write error\n");
333}
334
335static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
336 struct snd_soc_dai *cpu_dai)
337{
338 struct davinci_audio_dev *dev = cpu_dai->private_data;
339 cpu_dai->dma_data = dev->dma_params[substream->stream];
340 return 0;
341}
342
343static void mcasp_start_rx(struct davinci_audio_dev *dev)
344{
345 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
346 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
347 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
348 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
349
350 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
351 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
352 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXBUF_REG, 0);
353
354 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
355 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
356}
357
358static void mcasp_start_tx(struct davinci_audio_dev *dev)
359{
360 u8 offset = 0, i;
361 u32 cnt;
362
363 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
364 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
365 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
366 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
367
368 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
369 mcasp_set_ctl_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
370 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
371 for (i = 0; i < dev->num_serializer; i++) {
372 if (dev->serial_dir[i] == TX_MODE) {
373 offset = i;
374 break;
375 }
376 }
377
378 /* wait for TX ready */
379 cnt = 0;
380 while (!(mcasp_get_reg(dev->base + DAVINCI_MCASP_XRSRCTL_REG(offset)) &
381 TXSTATE) && (cnt < 100000))
382 cnt++;
383
384 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXBUF_REG, 0);
385}
386
387static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
388{
389 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
390 mcasp_start_tx(dev);
391 else
392 mcasp_start_rx(dev);
393
394 /* enable FIFO */
395 if (dev->txnumevt)
396 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
397
398 if (dev->rxnumevt)
399 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
400}
401
402static void mcasp_stop_rx(struct davinci_audio_dev *dev)
403{
404 mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLR_REG, 0);
405 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
406}
407
408static void mcasp_stop_tx(struct davinci_audio_dev *dev)
409{
410 mcasp_set_reg(dev->base + DAVINCI_MCASP_GBLCTLX_REG, 0);
411 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
412}
413
414static void davinci_mcasp_stop(struct davinci_audio_dev *dev, int stream)
415{
416 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
417 mcasp_stop_tx(dev);
418 else
419 mcasp_stop_rx(dev);
420
421 /* disable FIFO */
422 if (dev->txnumevt)
423 mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
424
425 if (dev->rxnumevt)
426 mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
427}
428
429static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
430 unsigned int fmt)
431{
432 struct davinci_audio_dev *dev = cpu_dai->private_data;
433 void __iomem *base = dev->base;
434
435 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
436 case SND_SOC_DAIFMT_CBS_CFS:
437 /* codec is clock and frame slave */
438 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
439 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
440
441 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
442 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
443
444 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26));
445 break;
446 case SND_SOC_DAIFMT_CBM_CFS:
447 /* codec is clock master and frame slave */
448 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
449 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
450
451 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
452 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
453
454 mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26));
455 break;
456 case SND_SOC_DAIFMT_CBM_CFM:
457 /* codec is clock and frame master */
458 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
459 mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
460
461 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
462 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
463
464 mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26));
465 break;
466
467 default:
468 return -EINVAL;
469 }
470
471 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
472 case SND_SOC_DAIFMT_IB_NF:
473 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
474 mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
475
476 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
477 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
478 break;
479
480 case SND_SOC_DAIFMT_NB_IF:
481 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
482 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
483
484 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
485 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
486 break;
487
488 case SND_SOC_DAIFMT_IB_IF:
489 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
490 mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
491
492 mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
493 mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
494 break;
495
496 case SND_SOC_DAIFMT_NB_NF:
497 mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
498 mcasp_clr_bits(base + DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
499
500 mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
501 mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
502 break;
503
504 default:
505 return -EINVAL;
506 }
507
508 return 0;
509}
510
511static int davinci_config_channel_size(struct davinci_audio_dev *dev,
512 int channel_size)
513{
514 u32 fmt = 0;
515
516 switch (channel_size) {
517 case DAVINCI_AUDIO_WORD_8:
518 fmt = 0x03;
519 break;
520
521 case DAVINCI_AUDIO_WORD_12:
522 fmt = 0x05;
523 break;
524
525 case DAVINCI_AUDIO_WORD_16:
526 fmt = 0x07;
527 break;
528
529 case DAVINCI_AUDIO_WORD_20:
530 fmt = 0x09;
531 break;
532
533 case DAVINCI_AUDIO_WORD_24:
534 fmt = 0x0B;
535 break;
536
537 case DAVINCI_AUDIO_WORD_28:
538 fmt = 0x0D;
539 break;
540
541 case DAVINCI_AUDIO_WORD_32:
542 fmt = 0x0F;
543 break;
544
545 default:
546 return -EINVAL;
547 }
548
549 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMT_REG,
550 RXSSZ(fmt), RXSSZ(0x0F));
551 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
552 TXSSZ(fmt), TXSSZ(0x0F));
553 return 0;
554}
555
556static void davinci_hw_common_param(struct davinci_audio_dev *dev, int stream)
557{
558 int i;
559 u8 tx_ser = 0;
560 u8 rx_ser = 0;
561
562 /* Default configuration */
563 mcasp_set_bits(dev->base + DAVINCI_MCASP_PWREMUMGT_REG, MCASP_SOFT);
564
565 /* All PINS as McASP */
566 mcasp_set_reg(dev->base + DAVINCI_MCASP_PFUNC_REG, 0x00000000);
567
568 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
569 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
570 mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG,
571 TXDATADMADIS);
572 } else {
573 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
574 mcasp_clr_bits(dev->base + DAVINCI_MCASP_REVTCTL_REG,
575 RXDATADMADIS);
576 }
577
578 for (i = 0; i < dev->num_serializer; i++) {
579 mcasp_set_bits(dev->base + DAVINCI_MCASP_XRSRCTL_REG(i),
580 dev->serial_dir[i]);
581 if (dev->serial_dir[i] == TX_MODE) {
582 mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
583 AXR(i));
584 tx_ser++;
585 } else if (dev->serial_dir[i] == RX_MODE) {
586 mcasp_clr_bits(dev->base + DAVINCI_MCASP_PDIR_REG,
587 AXR(i));
588 rx_ser++;
589 }
590 }
591
592 if (dev->txnumevt && stream == SNDRV_PCM_STREAM_PLAYBACK) {
593 if (dev->txnumevt * tx_ser > 64)
594 dev->txnumevt = 1;
595
596 mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, tx_ser,
597 NUMDMA_MASK);
598 mcasp_mod_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
599 ((dev->txnumevt * tx_ser) << 8), NUMEVT_MASK);
600 mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE);
601 }
602
603 if (dev->rxnumevt && stream == SNDRV_PCM_STREAM_CAPTURE) {
604 if (dev->rxnumevt * rx_ser > 64)
605 dev->rxnumevt = 1;
606
607 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, rx_ser,
608 NUMDMA_MASK);
609 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
610 ((dev->rxnumevt * rx_ser) << 8), NUMEVT_MASK);
611 mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE);
612 }
613}
614
615static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
616{
617 int i, active_slots;
618 u32 mask = 0;
619
620 active_slots = (dev->tdm_slots > 31) ? 32 : dev->tdm_slots;
621 for (i = 0; i < active_slots; i++)
622 mask |= (1 << i);
623
624 mcasp_clr_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG, TX_ASYNC);
625
626 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
627 /* bit stream is MSB first with no delay */
628 /* DSP_B mode */
629 mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG,
630 AHCLKXE);
631 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
632 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
633
634 if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
635 mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
636 FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
637 else
638 printk(KERN_ERR "playback tdm slot %d not supported\n",
639 dev->tdm_slots);
640
641 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0xFFFFFFFF);
642 mcasp_clr_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG, FSXDUR);
643 } else {
644 /* bit stream is MSB first with no delay */
645 /* DSP_B mode */
646 mcasp_set_bits(dev->base + DAVINCI_MCASP_RXFMT_REG, RXORD);
647 mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKRCTL_REG,
648 AHCLKRE);
649 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
650
651 if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
652 mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
653 FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
654 else
655 printk(KERN_ERR "capture tdm slot %d not supported\n",
656 dev->tdm_slots);
657
658 mcasp_set_reg(dev->base + DAVINCI_MCASP_RXMASK_REG, 0xFFFFFFFF);
659 mcasp_clr_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG, FSRDUR);
660 }
661}
662
663/* S/PDIF */
664static void davinci_hw_dit_param(struct davinci_audio_dev *dev)
665{
666 /* Set the PDIR for Serialiser as output */
667 mcasp_set_bits(dev->base + DAVINCI_MCASP_PDIR_REG, AFSX);
668
669 /* TXMASK for 24 bits */
670 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXMASK_REG, 0x00FFFFFF);
671
672 /* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
673 and LSB first */
674 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG,
675 TXROT(6) | TXSSZ(15));
676
677 /* Set TX frame synch : DIT Mode, 1 bit width, internal, rising edge */
678 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
679 AFSXE | FSXMOD(0x180));
680
681 /* Set the TX tdm : for all the slots */
682 mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, 0xFFFFFFFF);
683
684 /* Set the TX clock controls : div = 1 and internal */
685 mcasp_set_bits(dev->base + DAVINCI_MCASP_ACLKXCTL_REG,
686 ACLKXE | TX_ASYNC);
687
688 mcasp_clr_bits(dev->base + DAVINCI_MCASP_XEVTCTL_REG, TXDATADMADIS);
689
690 /* Only 44100 and 48000 are valid, both have the same setting */
691 mcasp_set_bits(dev->base + DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXDIV(3));
692
693 /* Enable the DIT */
694 mcasp_set_bits(dev->base + DAVINCI_MCASP_TXDITCTL_REG, DITEN);
695}
696
697static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
698 struct snd_pcm_hw_params *params,
699 struct snd_soc_dai *cpu_dai)
700{
701 struct davinci_audio_dev *dev = cpu_dai->private_data;
702 struct davinci_pcm_dma_params *dma_params =
703 dev->dma_params[substream->stream];
704 int word_length;
705 u8 numevt;
706
707 davinci_hw_common_param(dev, substream->stream);
708 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
709 numevt = dev->txnumevt;
710 else
711 numevt = dev->rxnumevt;
712
713 if (!numevt)
714 numevt = 1;
715
716 if (dev->op_mode == DAVINCI_MCASP_DIT_MODE)
717 davinci_hw_dit_param(dev);
718 else
719 davinci_hw_param(dev, substream->stream);
720
721 switch (params_format(params)) {
722 case SNDRV_PCM_FORMAT_S8:
723 dma_params->data_type = 1;
724 word_length = DAVINCI_AUDIO_WORD_8;
725 break;
726
727 case SNDRV_PCM_FORMAT_S16_LE:
728 dma_params->data_type = 2;
729 word_length = DAVINCI_AUDIO_WORD_16;
730 break;
731
732 case SNDRV_PCM_FORMAT_S32_LE:
733 dma_params->data_type = 4;
734 word_length = DAVINCI_AUDIO_WORD_32;
735 break;
736
737 default:
738 printk(KERN_WARNING "davinci-mcasp: unsupported PCM format");
739 return -EINVAL;
740 }
741
742 if (dev->version == MCASP_VERSION_2) {
743 dma_params->data_type *= numevt;
744 dma_params->acnt = 4 * numevt;
745 } else
746 dma_params->acnt = dma_params->data_type;
747
748 davinci_config_channel_size(dev, word_length);
749
750 return 0;
751}
752
753static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
754 int cmd, struct snd_soc_dai *cpu_dai)
755{
756 struct snd_soc_pcm_runtime *rtd = substream->private_data;
757 struct davinci_audio_dev *dev = rtd->dai->cpu_dai->private_data;
758 int ret = 0;
759
760 switch (cmd) {
761 case SNDRV_PCM_TRIGGER_START:
762 case SNDRV_PCM_TRIGGER_RESUME:
763 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
764 davinci_mcasp_start(dev, substream->stream);
765 break;
766
767 case SNDRV_PCM_TRIGGER_STOP:
768 case SNDRV_PCM_TRIGGER_SUSPEND:
769 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
770 davinci_mcasp_stop(dev, substream->stream);
771 break;
772
773 default:
774 ret = -EINVAL;
775 }
776
777 return ret;
778}
779
780static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
781 .startup = davinci_mcasp_startup,
782 .trigger = davinci_mcasp_trigger,
783 .hw_params = davinci_mcasp_hw_params,
784 .set_fmt = davinci_mcasp_set_dai_fmt,
785
786};
787
788struct snd_soc_dai davinci_mcasp_dai[] = {
789 {
790 .name = "davinci-i2s",
791 .id = 0,
792 .playback = {
793 .channels_min = 2,
794 .channels_max = 2,
795 .rates = DAVINCI_MCASP_RATES,
796 .formats = SNDRV_PCM_FMTBIT_S8 |
797 SNDRV_PCM_FMTBIT_S16_LE |
798 SNDRV_PCM_FMTBIT_S32_LE,
799 },
800 .capture = {
801 .channels_min = 2,
802 .channels_max = 2,
803 .rates = DAVINCI_MCASP_RATES,
804 .formats = SNDRV_PCM_FMTBIT_S8 |
805 SNDRV_PCM_FMTBIT_S16_LE |
806 SNDRV_PCM_FMTBIT_S32_LE,
807 },
808 .ops = &davinci_mcasp_dai_ops,
809
810 },
811 {
812 .name = "davinci-dit",
813 .id = 1,
814 .playback = {
815 .channels_min = 1,
816 .channels_max = 384,
817 .rates = DAVINCI_MCASP_RATES,
818 .formats = SNDRV_PCM_FMTBIT_S16_LE,
819 },
820 .ops = &davinci_mcasp_dai_ops,
821 },
822
823};
824EXPORT_SYMBOL_GPL(davinci_mcasp_dai);
825
826static int davinci_mcasp_probe(struct platform_device *pdev)
827{
828 struct davinci_pcm_dma_params *dma_data;
829 struct resource *mem, *ioarea, *res;
830 struct snd_platform_data *pdata;
831 struct davinci_audio_dev *dev;
832 int count = 0;
833 int ret = 0;
834
835 dev = kzalloc(sizeof(struct davinci_audio_dev), GFP_KERNEL);
836 if (!dev)
837 return -ENOMEM;
838
839 dma_data = kzalloc(sizeof(struct davinci_pcm_dma_params) * 2,
840 GFP_KERNEL);
841 if (!dma_data) {
842 ret = -ENOMEM;
843 goto err_release_dev;
844 }
845
846 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
847 if (!mem) {
848 dev_err(&pdev->dev, "no mem resource?\n");
849 ret = -ENODEV;
850 goto err_release_data;
851 }
852
853 ioarea = request_mem_region(mem->start,
854 (mem->end - mem->start) + 1, pdev->name);
855 if (!ioarea) {
856 dev_err(&pdev->dev, "Audio region already claimed\n");
857 ret = -EBUSY;
858 goto err_release_data;
859 }
860
861 pdata = pdev->dev.platform_data;
862 dev->clk = clk_get(&pdev->dev, NULL);
863 if (IS_ERR(dev->clk)) {
864 ret = -ENODEV;
865 goto err_release_region;
866 }
867
868 clk_enable(dev->clk);
869
870 dev->base = (void __iomem *)IO_ADDRESS(mem->start);
871 dev->op_mode = pdata->op_mode;
872 dev->tdm_slots = pdata->tdm_slots;
873 dev->num_serializer = pdata->num_serializer;
874 dev->serial_dir = pdata->serial_dir;
875 dev->codec_fmt = pdata->codec_fmt;
876 dev->version = pdata->version;
877 dev->txnumevt = pdata->txnumevt;
878 dev->rxnumevt = pdata->rxnumevt;
879
880 dma_data[count].name = "I2S PCM Stereo out";
881 dma_data[count].eventq_no = pdata->eventq_no;
882 dma_data[count].dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
883 io_v2p(dev->base));
884 dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK] = &dma_data[count];
885
886 /* first TX, then RX */
887 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
888 if (!res) {
889 dev_err(&pdev->dev, "no DMA resource\n");
890 goto err_release_region;
891 }
892
893 dma_data[count].channel = res->start;
894 count++;
895 dma_data[count].name = "I2S PCM Stereo in";
896 dma_data[count].eventq_no = pdata->eventq_no;
897 dma_data[count].dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
898 io_v2p(dev->base));
899 dev->dma_params[SNDRV_PCM_STREAM_CAPTURE] = &dma_data[count];
900
901 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
902 if (!res) {
903 dev_err(&pdev->dev, "no DMA resource\n");
904 goto err_release_region;
905 }
906
907 dma_data[count].channel = res->start;
908 davinci_mcasp_dai[pdata->op_mode].private_data = dev;
909 davinci_mcasp_dai[pdata->op_mode].dev = &pdev->dev;
910 ret = snd_soc_register_dai(&davinci_mcasp_dai[pdata->op_mode]);
911
912 if (ret != 0)
913 goto err_release_region;
914 return 0;
915
916err_release_region:
917 release_mem_region(mem->start, (mem->end - mem->start) + 1);
918err_release_data:
919 kfree(dma_data);
920err_release_dev:
921 kfree(dev);
922
923 return ret;
924}
925
926static int davinci_mcasp_remove(struct platform_device *pdev)
927{
928 struct snd_platform_data *pdata = pdev->dev.platform_data;
929 struct davinci_pcm_dma_params *dma_data;
930 struct davinci_audio_dev *dev;
931 struct resource *mem;
932
933 snd_soc_unregister_dai(&davinci_mcasp_dai[pdata->op_mode]);
934 dev = davinci_mcasp_dai[pdata->op_mode].private_data;
935 clk_disable(dev->clk);
936 clk_put(dev->clk);
937 dev->clk = NULL;
938
939 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
940 release_mem_region(mem->start, (mem->end - mem->start) + 1);
941
942 dma_data = dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
943 kfree(dma_data);
944 kfree(dev);
945
946 return 0;
947}
948
949static struct platform_driver davinci_mcasp_driver = {
950 .probe = davinci_mcasp_probe,
951 .remove = davinci_mcasp_remove,
952 .driver = {
953 .name = "davinci-mcasp",
954 .owner = THIS_MODULE,
955 },
956};
957
958static int __init davinci_mcasp_init(void)
959{
960 return platform_driver_register(&davinci_mcasp_driver);
961}
962module_init(davinci_mcasp_init);
963
964static void __exit davinci_mcasp_exit(void)
965{
966 platform_driver_unregister(&davinci_mcasp_driver);
967}
968module_exit(davinci_mcasp_exit);
969
970MODULE_AUTHOR("Steve Chen");
971MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
972MODULE_LICENSE("GPL");
973
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
new file mode 100644
index 000000000000..554354c1cc2f
--- /dev/null
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -0,0 +1,60 @@
1/*
2 * ALSA SoC McASP Audio Layer for TI DAVINCI processor
3 *
4 * MCASP related definitions
5 *
6 * Author: Nirmal Pandey <n-pandey@ti.com>,
7 * Suresh Rajashekara <suresh.r@ti.com>
8 * Steve Chen <schen@.mvista.com>
9 *
10 * Copyright: (C) 2009 MontaVista Software, Inc., <source@mvista.com>
11 * Copyright: (C) 2009 Texas Instruments, India
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#ifndef DAVINCI_MCASP_H
19#define DAVINCI_MCASP_H
20
21#include <linux/io.h>
22#include <mach/asp.h>
23#include "davinci-pcm.h"
24
25extern struct snd_soc_dai davinci_mcasp_dai[];
26
27#define DAVINCI_MCASP_RATES SNDRV_PCM_RATE_8000_96000
28#define DAVINCI_MCASP_I2S_DAI 0
29#define DAVINCI_MCASP_DIT_DAI 1
30
31enum {
32 DAVINCI_AUDIO_WORD_8 = 0,
33 DAVINCI_AUDIO_WORD_12,
34 DAVINCI_AUDIO_WORD_16,
35 DAVINCI_AUDIO_WORD_20,
36 DAVINCI_AUDIO_WORD_24,
37 DAVINCI_AUDIO_WORD_32,
38 DAVINCI_AUDIO_WORD_28, /* This is only valid for McASP */
39};
40
41struct davinci_audio_dev {
42 void __iomem *base;
43 int sample_rate;
44 struct clk *clk;
45 struct davinci_pcm_dma_params *dma_params[2];
46 unsigned int codec_fmt;
47
48 /* McASP specific data */
49 int tdm_slots;
50 u8 op_mode;
51 u8 num_serializer;
52 u8 *serial_dir;
53 u8 version;
54
55 /* McASP FIFO related */
56 u8 txnumevt;
57 u8 rxnumevt;
58};
59
60#endif /* DAVINCI_MCASP_H */
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index a05996588489..091dacb78b4d 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -67,6 +67,7 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
67 dma_addr_t src, dst; 67 dma_addr_t src, dst;
68 unsigned short src_bidx, dst_bidx; 68 unsigned short src_bidx, dst_bidx;
69 unsigned int data_type; 69 unsigned int data_type;
70 unsigned short acnt;
70 unsigned int count; 71 unsigned int count;
71 72
72 period_size = snd_pcm_lib_period_bytes(substream); 73 period_size = snd_pcm_lib_period_bytes(substream);
@@ -91,11 +92,12 @@ static void davinci_pcm_enqueue_dma(struct snd_pcm_substream *substream)
91 dst_bidx = data_type; 92 dst_bidx = data_type;
92 } 93 }
93 94
95 acnt = prtd->params->acnt;
94 edma_set_src(lch, src, INCR, W8BIT); 96 edma_set_src(lch, src, INCR, W8BIT);
95 edma_set_dest(lch, dst, INCR, W8BIT); 97 edma_set_dest(lch, dst, INCR, W8BIT);
96 edma_set_src_index(lch, src_bidx, 0); 98 edma_set_src_index(lch, src_bidx, 0);
97 edma_set_dest_index(lch, dst_bidx, 0); 99 edma_set_dest_index(lch, dst_bidx, 0);
98 edma_set_transfer_params(lch, data_type, count, 1, 0, ASYNC); 100 edma_set_transfer_params(lch, acnt, count, 1, 0, ASYNC);
99 101
100 prtd->period++; 102 prtd->period++;
101 if (unlikely(prtd->period >= runtime->periods)) 103 if (unlikely(prtd->period >= runtime->periods))
@@ -206,6 +208,7 @@ static int davinci_pcm_prepare(struct snd_pcm_substream *substream)
206 /* Copy self-linked parameter RAM entry into master channel */ 208 /* Copy self-linked parameter RAM entry into master channel */
207 edma_read_slot(prtd->slave_lch, &temp); 209 edma_read_slot(prtd->slave_lch, &temp);
208 edma_write_slot(prtd->master_lch, &temp); 210 edma_write_slot(prtd->master_lch, &temp);
211 davinci_pcm_enqueue_dma(substream);
209 212
210 return 0; 213 return 0;
211} 214}
@@ -243,6 +246,11 @@ static int davinci_pcm_open(struct snd_pcm_substream *substream)
243 int ret = 0; 246 int ret = 0;
244 247
245 snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware); 248 snd_soc_set_runtime_hwparams(substream, &davinci_pcm_hardware);
249 /* ensure that buffer size is a multiple of period size */
250 ret = snd_pcm_hw_constraint_integer(runtime,
251 SNDRV_PCM_HW_PARAM_PERIODS);
252 if (ret < 0)
253 return ret;
246 254
247 prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL); 255 prtd = kzalloc(sizeof(struct davinci_runtime_data), GFP_KERNEL);
248 if (prtd == NULL) 256 if (prtd == NULL)
diff --git a/sound/soc/davinci/davinci-pcm.h b/sound/soc/davinci/davinci-pcm.h
index 62cb4eb07e34..63d96253c73a 100644
--- a/sound/soc/davinci/davinci-pcm.h
+++ b/sound/soc/davinci/davinci-pcm.h
@@ -12,17 +12,20 @@
12#ifndef _DAVINCI_PCM_H 12#ifndef _DAVINCI_PCM_H
13#define _DAVINCI_PCM_H 13#define _DAVINCI_PCM_H
14 14
15#include <mach/edma.h>
16#include <mach/asp.h>
17
18
15struct davinci_pcm_dma_params { 19struct davinci_pcm_dma_params {
16 char *name; /* stream identifier */ 20 char *name; /* stream identifier */
17 int channel; /* sync dma channel ID */ 21 int channel; /* sync dma channel ID */
18 dma_addr_t dma_addr; /* device physical address for DMA */ 22 unsigned short acnt;
19 unsigned int data_type; /* xfer data type */ 23 dma_addr_t dma_addr; /* device physical address for DMA */
24 enum dma_event_q eventq_no; /* event queue number */
25 unsigned char data_type; /* xfer data type */
26 unsigned char convert_mono_stereo;
20}; 27};
21 28
22struct evm_snd_platform_data {
23 int tx_dma_ch;
24 int rx_dma_ch;
25};
26 29
27extern struct snd_soc_platform davinci_soc_platform; 30extern struct snd_soc_platform davinci_soc_platform;
28 31
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index f0a2d4071998..9ff62e3a9b1d 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -69,6 +69,23 @@ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s)
69 69
70static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s) 70static void psc_dma_bcom_enqueue_tx(struct psc_dma_stream *s)
71{ 71{
72 if (s->appl_ptr > s->runtime->control->appl_ptr) {
73 /*
74 * In this case s->runtime->control->appl_ptr has wrapped around.
75 * Play the data to the end of the boundary, then wrap our own
76 * appl_ptr back around.
77 */
78 while (s->appl_ptr < s->runtime->boundary) {
79 if (bcom_queue_full(s->bcom_task))
80 return;
81
82 s->appl_ptr += s->period_size;
83
84 psc_dma_bcom_enqueue_next_buffer(s);
85 }
86 s->appl_ptr -= s->runtime->boundary;
87 }
88
72 while (s->appl_ptr < s->runtime->control->appl_ptr) { 89 while (s->appl_ptr < s->runtime->control->appl_ptr) {
73 90
74 if (bcom_queue_full(s->bcom_task)) 91 if (bcom_queue_full(s->bcom_task))
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 7eb549985d49..c4ae3e096bb9 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/of_device.h> 13#include <linux/of_device.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/delay.h>
15 16
16#include <sound/pcm.h> 17#include <sound/pcm.h>
17#include <sound/pcm_params.h> 18#include <sound/pcm_params.h>
@@ -112,7 +113,7 @@ static void psc_ac97_cold_reset(struct snd_ac97 *ac97)
112 out_8(&regs->op1, MPC52xx_PSC_OP_RES); 113 out_8(&regs->op1, MPC52xx_PSC_OP_RES);
113 udelay(10); 114 udelay(10);
114 out_8(&regs->op0, MPC52xx_PSC_OP_RES); 115 out_8(&regs->op0, MPC52xx_PSC_OP_RES);
115 udelay(50); 116 msleep(1);
116 psc_ac97_warm_reset(ac97); 117 psc_ac97_warm_reset(ac97);
117} 118}
118 119
diff --git a/sound/soc/imx/Kconfig b/sound/soc/imx/Kconfig
new file mode 100644
index 000000000000..a700562e8692
--- /dev/null
+++ b/sound/soc/imx/Kconfig
@@ -0,0 +1,21 @@
1config SND_MX1_MX2_SOC
2 tristate "SoC Audio for Freecale i.MX1x i.MX2x CPUs"
3 depends on ARCH_MX2 || ARCH_MX1
4 select SND_PCM
5 help
6 Say Y or M if you want to add support for codecs attached to
7 the MX1 or MX2 SSI interface.
8
9config SND_MXC_SOC_SSI
10 tristate
11
12config SND_SOC_MX27VIS_WM8974
13 tristate "SoC Audio support for MX27 - WM8974 Visstrim_sm10 board"
14 depends on SND_MX1_MX2_SOC && MACH_MX27 && MACH_IMX27_VISSTRIM_M10
15 select SND_MXC_SOC_SSI
16 select SND_SOC_WM8974
17 help
18 Say Y if you want to add support for SoC audio on Visstrim SM10
19 board with WM8974.
20
21
diff --git a/sound/soc/imx/Makefile b/sound/soc/imx/Makefile
new file mode 100644
index 000000000000..c2ffd2c8df5a
--- /dev/null
+++ b/sound/soc/imx/Makefile
@@ -0,0 +1,10 @@
1# i.MX Platform Support
2snd-soc-mx1_mx2-objs := mx1_mx2-pcm.o
3snd-soc-mxc-ssi-objs := mxc-ssi.o
4
5obj-$(CONFIG_SND_MX1_MX2_SOC) += snd-soc-mx1_mx2.o
6obj-$(CONFIG_SND_MXC_SOC_SSI) += snd-soc-mxc-ssi.o
7
8# i.MX Machine Support
9snd-soc-mx27vis-wm8974-objs := mx27vis_wm8974.o
10obj-$(CONFIG_SND_SOC_MX27VIS_WM8974) += snd-soc-mx27vis-wm8974.o
diff --git a/sound/soc/imx/mx1_mx2-pcm.c b/sound/soc/imx/mx1_mx2-pcm.c
new file mode 100644
index 000000000000..b83866529397
--- /dev/null
+++ b/sound/soc/imx/mx1_mx2-pcm.c
@@ -0,0 +1,488 @@
1/*
2 * mx1_mx2-pcm.c -- ALSA SoC interface for Freescale i.MX1x, i.MX2x CPUs
3 *
4 * Copyright 2009 Vista Silicon S.L.
5 * Author: Javier Martin
6 * javier.martin@vista-silicon.com
7 *
8 * Based on mxc-pcm.c by Liam Girdwood.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/dma-mapping.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/pcm_params.h>
25#include <sound/soc.h>
26#include <asm/dma.h>
27#include <mach/hardware.h>
28#include <mach/dma-mx1-mx2.h>
29
30#include "mx1_mx2-pcm.h"
31
32
33static const struct snd_pcm_hardware mx1_mx2_pcm_hardware = {
34 .info = (SNDRV_PCM_INFO_INTERLEAVED |
35 SNDRV_PCM_INFO_BLOCK_TRANSFER |
36 SNDRV_PCM_INFO_MMAP |
37 SNDRV_PCM_INFO_MMAP_VALID),
38 .formats = SNDRV_PCM_FMTBIT_S16_LE,
39 .buffer_bytes_max = 32 * 1024,
40 .period_bytes_min = 64,
41 .period_bytes_max = 8 * 1024,
42 .periods_min = 2,
43 .periods_max = 255,
44 .fifo_size = 0,
45};
46
47struct mx1_mx2_runtime_data {
48 int dma_ch;
49 int active;
50 unsigned int period;
51 unsigned int periods;
52 int tx_spin;
53 spinlock_t dma_lock;
54 struct mx1_mx2_pcm_dma_params *dma_params;
55};
56
57
58/**
59 * This function stops the current dma transfer for playback
60 * and clears the dma pointers.
61 *
62 * @param substream pointer to the structure of the current stream.
63 *
64 */
65static int audio_stop_dma(struct snd_pcm_substream *substream)
66{
67 struct snd_pcm_runtime *runtime = substream->runtime;
68 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
69 unsigned long flags;
70
71 spin_lock_irqsave(&prtd->dma_lock, flags);
72
73 pr_debug("%s\n", __func__);
74
75 prtd->active = 0;
76 prtd->period = 0;
77 prtd->periods = 0;
78
79 /* this stops the dma channel and clears the buffer ptrs */
80
81 imx_dma_disable(prtd->dma_ch);
82
83 spin_unlock_irqrestore(&prtd->dma_lock, flags);
84
85 return 0;
86}
87
88/**
89 * This function is called whenever a new audio block needs to be
90 * transferred to the codec. The function receives the address and the size
91 * of the new block and start a new DMA transfer.
92 *
93 * @param substream pointer to the structure of the current stream.
94 *
95 */
96static int dma_new_period(struct snd_pcm_substream *substream)
97{
98 struct snd_pcm_runtime *runtime = substream->runtime;
99 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
100 unsigned int dma_size;
101 unsigned int offset;
102 int ret = 0;
103 dma_addr_t mem_addr;
104 unsigned int dev_addr;
105
106 if (prtd->active) {
107 dma_size = frames_to_bytes(runtime, runtime->period_size);
108 offset = dma_size * prtd->period;
109
110 pr_debug("%s: period (%d) out of (%d)\n", __func__,
111 prtd->period,
112 runtime->periods);
113 pr_debug("period_size %d frames\n offset %d bytes\n",
114 (unsigned int)runtime->period_size,
115 offset);
116 pr_debug("dma_size %d bytes\n", dma_size);
117
118 snd_BUG_ON(dma_size > mx1_mx2_pcm_hardware.period_bytes_max);
119
120 mem_addr = (dma_addr_t)(runtime->dma_addr + offset);
121 dev_addr = prtd->dma_params->per_address;
122 pr_debug("%s: mem_addr is %x\n dev_addr is %x\n",
123 __func__, mem_addr, dev_addr);
124
125 ret = imx_dma_setup_single(prtd->dma_ch, mem_addr,
126 dma_size, dev_addr,
127 prtd->dma_params->transfer_type);
128 if (ret < 0) {
129 printk(KERN_ERR "Error %d configuring DMA\n", ret);
130 return ret;
131 }
132 imx_dma_enable(prtd->dma_ch);
133
134 pr_debug("%s: transfer enabled\nmem_addr = %x\n",
135 __func__, (unsigned int) mem_addr);
136 pr_debug("dev_addr = %x\ndma_size = %d\n",
137 (unsigned int) dev_addr, dma_size);
138
139 prtd->tx_spin = 1; /* FGA little trick to retrieve DMA pos */
140 prtd->period++;
141 prtd->period %= runtime->periods;
142 }
143 return ret;
144}
145
146
147/**
148 * This is a callback which will be called
149 * when a TX transfer finishes. The call occurs
150 * in interrupt context.
151 *
152 * @param dat pointer to the structure of the current stream.
153 *
154 */
155static void audio_dma_irq(int channel, void *data)
156{
157 struct snd_pcm_substream *substream;
158 struct snd_pcm_runtime *runtime;
159 struct mx1_mx2_runtime_data *prtd;
160 unsigned int dma_size;
161 unsigned int previous_period;
162 unsigned int offset;
163
164 substream = data;
165 runtime = substream->runtime;
166 prtd = runtime->private_data;
167 previous_period = prtd->periods;
168 dma_size = frames_to_bytes(runtime, runtime->period_size);
169 offset = dma_size * previous_period;
170
171 prtd->tx_spin = 0;
172 prtd->periods++;
173 prtd->periods %= runtime->periods;
174
175 pr_debug("%s: irq per %d offset %x\n", __func__, prtd->periods, offset);
176
177 /*
178 * If we are getting a callback for an active stream then we inform
179 * the PCM middle layer we've finished a period
180 */
181 if (prtd->active)
182 snd_pcm_period_elapsed(substream);
183
184 /*
185 * Trig next DMA transfer
186 */
187 dma_new_period(substream);
188}
189
190/**
191 * This function configures the hardware to allow audio
192 * playback operations. It is called by ALSA framework.
193 *
194 * @param substream pointer to the structure of the current stream.
195 *
196 * @return 0 on success, -1 otherwise.
197 */
198static int
199snd_mx1_mx2_prepare(struct snd_pcm_substream *substream)
200{
201 struct snd_pcm_runtime *runtime = substream->runtime;
202 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
203
204 prtd->period = 0;
205 prtd->periods = 0;
206
207 return 0;
208}
209
210static int mx1_mx2_pcm_hw_params(struct snd_pcm_substream *substream,
211 struct snd_pcm_hw_params *hw_params)
212{
213 struct snd_pcm_runtime *runtime = substream->runtime;
214 int ret;
215
216 ret = snd_pcm_lib_malloc_pages(substream,
217 params_buffer_bytes(hw_params));
218 if (ret < 0) {
219 printk(KERN_ERR "%s: Error %d failed to malloc pcm pages \n",
220 __func__, ret);
221 return ret;
222 }
223
224 pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_addr 0x(%x)\n",
225 __func__, (unsigned int)runtime->dma_addr);
226 pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_area 0x(%x)\n",
227 __func__, (unsigned int)runtime->dma_area);
228 pr_debug("%s: snd_imx1_mx2_audio_hw_params runtime->dma_bytes 0x(%x)\n",
229 __func__, (unsigned int)runtime->dma_bytes);
230
231 return ret;
232}
233
234static int mx1_mx2_pcm_hw_free(struct snd_pcm_substream *substream)
235{
236 struct snd_pcm_runtime *runtime = substream->runtime;
237 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
238
239 imx_dma_free(prtd->dma_ch);
240
241 snd_pcm_lib_free_pages(substream);
242
243 return 0;
244}
245
246static int mx1_mx2_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
247{
248 struct mx1_mx2_runtime_data *prtd = substream->runtime->private_data;
249 int ret = 0;
250
251 switch (cmd) {
252 case SNDRV_PCM_TRIGGER_START:
253 prtd->tx_spin = 0;
254 /* requested stream startup */
255 prtd->active = 1;
256 pr_debug("%s: starting dma_new_period\n", __func__);
257 ret = dma_new_period(substream);
258 break;
259 case SNDRV_PCM_TRIGGER_STOP:
260 /* requested stream shutdown */
261 pr_debug("%s: stopping dma transfer\n", __func__);
262 ret = audio_stop_dma(substream);
263 break;
264 default:
265 ret = -EINVAL;
266 break;
267 }
268
269 return ret;
270}
271
272static snd_pcm_uframes_t
273mx1_mx2_pcm_pointer(struct snd_pcm_substream *substream)
274{
275 struct snd_pcm_runtime *runtime = substream->runtime;
276 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
277 unsigned int offset = 0;
278
279 /* tx_spin value is used here to check if a transfer is active */
280 if (prtd->tx_spin) {
281 offset = (runtime->period_size * (prtd->periods)) +
282 (runtime->period_size >> 1);
283 if (offset >= runtime->buffer_size)
284 offset = runtime->period_size >> 1;
285 } else {
286 offset = (runtime->period_size * (prtd->periods));
287 if (offset >= runtime->buffer_size)
288 offset = 0;
289 }
290 pr_debug("%s: pointer offset %x\n", __func__, offset);
291
292 return offset;
293}
294
295static int mx1_mx2_pcm_open(struct snd_pcm_substream *substream)
296{
297 struct snd_pcm_runtime *runtime = substream->runtime;
298 struct mx1_mx2_runtime_data *prtd;
299 struct snd_soc_pcm_runtime *rtd = substream->private_data;
300 struct mx1_mx2_pcm_dma_params *dma_data = rtd->dai->cpu_dai->dma_data;
301 int ret;
302
303 snd_soc_set_runtime_hwparams(substream, &mx1_mx2_pcm_hardware);
304
305 ret = snd_pcm_hw_constraint_integer(runtime,
306 SNDRV_PCM_HW_PARAM_PERIODS);
307 if (ret < 0)
308 return ret;
309
310 prtd = kzalloc(sizeof(struct mx1_mx2_runtime_data), GFP_KERNEL);
311 if (prtd == NULL) {
312 ret = -ENOMEM;
313 goto out;
314 }
315
316 runtime->private_data = prtd;
317
318 if (!dma_data)
319 return -ENODEV;
320
321 prtd->dma_params = dma_data;
322
323 pr_debug("%s: Requesting dma channel (%s)\n", __func__,
324 prtd->dma_params->name);
325 prtd->dma_ch = imx_dma_request_by_prio(prtd->dma_params->name,
326 DMA_PRIO_HIGH);
327 if (prtd->dma_ch < 0) {
328 printk(KERN_ERR "Error %d requesting dma channel\n", ret);
329 return ret;
330 }
331 imx_dma_config_burstlen(prtd->dma_ch,
332 prtd->dma_params->watermark_level);
333
334 ret = imx_dma_config_channel(prtd->dma_ch,
335 prtd->dma_params->per_config,
336 prtd->dma_params->mem_config,
337 prtd->dma_params->event_id, 0);
338
339 if (ret) {
340 pr_debug(KERN_ERR "Error %d configuring dma channel %d\n",
341 ret, prtd->dma_ch);
342 return ret;
343 }
344
345 pr_debug("%s: Setting tx dma callback function\n", __func__);
346 ret = imx_dma_setup_handlers(prtd->dma_ch,
347 audio_dma_irq, NULL,
348 (void *)substream);
349 if (ret < 0) {
350 printk(KERN_ERR "Error %d setting dma callback function\n", ret);
351 return ret;
352 }
353 return 0;
354
355 out:
356 return ret;
357}
358
359static int mx1_mx2_pcm_close(struct snd_pcm_substream *substream)
360{
361 struct snd_pcm_runtime *runtime = substream->runtime;
362 struct mx1_mx2_runtime_data *prtd = runtime->private_data;
363
364 kfree(prtd);
365
366 return 0;
367}
368
369static int mx1_mx2_pcm_mmap(struct snd_pcm_substream *substream,
370 struct vm_area_struct *vma)
371{
372 struct snd_pcm_runtime *runtime = substream->runtime;
373 return dma_mmap_writecombine(substream->pcm->card->dev, vma,
374 runtime->dma_area,
375 runtime->dma_addr,
376 runtime->dma_bytes);
377}
378
379static struct snd_pcm_ops mx1_mx2_pcm_ops = {
380 .open = mx1_mx2_pcm_open,
381 .close = mx1_mx2_pcm_close,
382 .ioctl = snd_pcm_lib_ioctl,
383 .hw_params = mx1_mx2_pcm_hw_params,
384 .hw_free = mx1_mx2_pcm_hw_free,
385 .prepare = snd_mx1_mx2_prepare,
386 .trigger = mx1_mx2_pcm_trigger,
387 .pointer = mx1_mx2_pcm_pointer,
388 .mmap = mx1_mx2_pcm_mmap,
389};
390
391static u64 mx1_mx2_pcm_dmamask = 0xffffffff;
392
393static int mx1_mx2_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
394{
395 struct snd_pcm_substream *substream = pcm->streams[stream].substream;
396 struct snd_dma_buffer *buf = &substream->dma_buffer;
397 size_t size = mx1_mx2_pcm_hardware.buffer_bytes_max;
398 buf->dev.type = SNDRV_DMA_TYPE_DEV;
399 buf->dev.dev = pcm->card->dev;
400 buf->private_data = NULL;
401
402 /* Reserve uncached-buffered memory area for DMA */
403 buf->area = dma_alloc_writecombine(pcm->card->dev, size,
404 &buf->addr, GFP_KERNEL);
405
406 pr_debug("%s: preallocate_dma_buffer: area=%p, addr=%p, size=%d\n",
407 __func__, (void *) buf->area, (void *) buf->addr, size);
408
409 if (!buf->area)
410 return -ENOMEM;
411
412 buf->bytes = size;
413 return 0;
414}
415
416static void mx1_mx2_pcm_free_dma_buffers(struct snd_pcm *pcm)
417{
418 struct snd_pcm_substream *substream;
419 struct snd_dma_buffer *buf;
420 int stream;
421
422 for (stream = 0; stream < 2; stream++) {
423 substream = pcm->streams[stream].substream;
424 if (!substream)
425 continue;
426
427 buf = &substream->dma_buffer;
428 if (!buf->area)
429 continue;
430
431 dma_free_writecombine(pcm->card->dev, buf->bytes,
432 buf->area, buf->addr);
433 buf->area = NULL;
434 }
435}
436
437static int mx1_mx2_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
438 struct snd_pcm *pcm)
439{
440 int ret = 0;
441
442 if (!card->dev->dma_mask)
443 card->dev->dma_mask = &mx1_mx2_pcm_dmamask;
444 if (!card->dev->coherent_dma_mask)
445 card->dev->coherent_dma_mask = 0xffffffff;
446
447 if (dai->playback.channels_min) {
448 ret = mx1_mx2_pcm_preallocate_dma_buffer(pcm,
449 SNDRV_PCM_STREAM_PLAYBACK);
450 pr_debug("%s: preallocate playback buffer\n", __func__);
451 if (ret)
452 goto out;
453 }
454
455 if (dai->capture.channels_min) {
456 ret = mx1_mx2_pcm_preallocate_dma_buffer(pcm,
457 SNDRV_PCM_STREAM_CAPTURE);
458 pr_debug("%s: preallocate capture buffer\n", __func__);
459 if (ret)
460 goto out;
461 }
462 out:
463 return ret;
464}
465
466struct snd_soc_platform mx1_mx2_soc_platform = {
467 .name = "mx1_mx2-audio",
468 .pcm_ops = &mx1_mx2_pcm_ops,
469 .pcm_new = mx1_mx2_pcm_new,
470 .pcm_free = mx1_mx2_pcm_free_dma_buffers,
471};
472EXPORT_SYMBOL_GPL(mx1_mx2_soc_platform);
473
474static int __init mx1_mx2_soc_platform_init(void)
475{
476 return snd_soc_register_platform(&mx1_mx2_soc_platform);
477}
478module_init(mx1_mx2_soc_platform_init);
479
480static void __exit mx1_mx2_soc_platform_exit(void)
481{
482 snd_soc_unregister_platform(&mx1_mx2_soc_platform);
483}
484module_exit(mx1_mx2_soc_platform_exit);
485
486MODULE_AUTHOR("Javier Martin, javier.martin@vista-silicon.com");
487MODULE_DESCRIPTION("Freescale i.MX2x, i.MX1x PCM DMA module");
488MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mx1_mx2-pcm.h b/sound/soc/imx/mx1_mx2-pcm.h
new file mode 100644
index 000000000000..2e528106570b
--- /dev/null
+++ b/sound/soc/imx/mx1_mx2-pcm.h
@@ -0,0 +1,26 @@
1/*
2 * mx1_mx2-pcm.h :- ASoC platform header for Freescale i.MX1x, i.MX2x
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _MX1_MX2_PCM_H
10#define _MX1_MX2_PCM_H
11
12/* DMA information for mx1_mx2 platforms */
13struct mx1_mx2_pcm_dma_params {
14 char *name; /* stream identifier */
15 unsigned int transfer_type; /* READ or WRITE DMA transfer */
16 dma_addr_t per_address; /* physical address of SSI fifo */
17 int event_id; /* fixed DMA number for SSI fifo */
18 int watermark_level; /* SSI fifo watermark level */
19 int per_config; /* DMA Config flags for peripheral */
20 int mem_config; /* DMA Config flags for RAM */
21 };
22
23/* platform data */
24extern struct snd_soc_platform mx1_mx2_soc_platform;
25
26#endif
diff --git a/sound/soc/imx/mx27vis_wm8974.c b/sound/soc/imx/mx27vis_wm8974.c
new file mode 100644
index 000000000000..e4dcb539108a
--- /dev/null
+++ b/sound/soc/imx/mx27vis_wm8974.c
@@ -0,0 +1,317 @@
1/*
2 * mx27vis_wm8974.c -- SoC audio for mx27vis
3 *
4 * Copyright 2009 Vista Silicon S.L.
5 * Author: Javier Martin
6 * javier.martin@vista-silicon.com
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/device.h>
18#include <linux/i2c.h>
19#include <sound/core.h>
20#include <sound/pcm.h>
21#include <sound/soc.h>
22#include <sound/soc-dapm.h>
23
24
25#include "../codecs/wm8974.h"
26#include "mx1_mx2-pcm.h"
27#include "mxc-ssi.h"
28#include <mach/gpio.h>
29#include <mach/iomux.h>
30
31#define IGNORED_ARG 0
32
33
34static struct snd_soc_card mx27vis;
35
36/**
37 * This function connects SSI1 (HPCR1) as slave to
38 * SSI1 external signals (PPCR1)
39 * As slave, HPCR1 must set TFSDIR and TCLKDIR as inputs from
40 * port 4
41 */
42void audmux_connect_1_4(void)
43{
44 pr_debug("AUDMUX: normal operation mode\n");
45 /* Reset HPCR1 and PPCR1 */
46
47 DAM_HPCR1 = 0x00000000;
48 DAM_PPCR1 = 0x00000000;
49
50 /* set to synchronous */
51 DAM_HPCR1 |= AUDMUX_HPCR_SYN;
52 DAM_PPCR1 |= AUDMUX_PPCR_SYN;
53
54
55 /* set Rx sources 1 <--> 4 */
56 DAM_HPCR1 |= AUDMUX_HPCR_RXDSEL(3); /* port 4 */
57 DAM_PPCR1 |= AUDMUX_PPCR_RXDSEL(0); /* port 1 */
58
59 /* set Tx frame and Clock direction and source 4 --> 1 output */
60 DAM_HPCR1 |= AUDMUX_HPCR_TFSDIR | AUDMUX_HPCR_TCLKDIR;
61 DAM_HPCR1 |= AUDMUX_HPCR_TFCSEL(3); /* TxDS and TxCclk from port 4 */
62
63 return;
64}
65
66static int mx27vis_hifi_hw_params(struct snd_pcm_substream *substream,
67 struct snd_pcm_hw_params *params)
68{
69 struct snd_soc_pcm_runtime *rtd = substream->private_data;
70 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
71 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
72 unsigned int pll_out = 0, bclk = 0, fmt = 0, mclk = 0;
73 int ret = 0;
74
75 /*
76 * The WM8974 is better at generating accurate audio clocks than the
77 * MX27 SSI controller, so we will use it as master when we can.
78 */
79 switch (params_rate(params)) {
80 case 8000:
81 fmt = SND_SOC_DAIFMT_CBM_CFM;
82 mclk = WM8974_MCLKDIV_12;
83 pll_out = 24576000;
84 break;
85 case 16000:
86 fmt = SND_SOC_DAIFMT_CBM_CFM;
87 pll_out = 12288000;
88 break;
89 case 48000:
90 fmt = SND_SOC_DAIFMT_CBM_CFM;
91 bclk = WM8974_BCLKDIV_4;
92 pll_out = 12288000;
93 break;
94 case 96000:
95 fmt = SND_SOC_DAIFMT_CBM_CFM;
96 bclk = WM8974_BCLKDIV_2;
97 pll_out = 12288000;
98 break;
99 case 11025:
100 fmt = SND_SOC_DAIFMT_CBM_CFM;
101 bclk = WM8974_BCLKDIV_16;
102 pll_out = 11289600;
103 break;
104 case 22050:
105 fmt = SND_SOC_DAIFMT_CBM_CFM;
106 bclk = WM8974_BCLKDIV_8;
107 pll_out = 11289600;
108 break;
109 case 44100:
110 fmt = SND_SOC_DAIFMT_CBM_CFM;
111 bclk = WM8974_BCLKDIV_4;
112 mclk = WM8974_MCLKDIV_2;
113 pll_out = 11289600;
114 break;
115 case 88200:
116 fmt = SND_SOC_DAIFMT_CBM_CFM;
117 bclk = WM8974_BCLKDIV_2;
118 pll_out = 11289600;
119 break;
120 }
121
122 /* set codec DAI configuration */
123 ret = codec_dai->ops->set_fmt(codec_dai,
124 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
125 SND_SOC_DAIFMT_SYNC | fmt);
126 if (ret < 0) {
127 printk(KERN_ERR "Error from codec DAI configuration\n");
128 return ret;
129 }
130
131 /* set cpu DAI configuration */
132 ret = cpu_dai->ops->set_fmt(cpu_dai,
133 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
134 SND_SOC_DAIFMT_SYNC | fmt);
135 if (ret < 0) {
136 printk(KERN_ERR "Error from cpu DAI configuration\n");
137 return ret;
138 }
139
140 /* Put DC field of STCCR to 1 (not zero) */
141 ret = cpu_dai->ops->set_tdm_slot(cpu_dai, 0, 2);
142
143 /* set the SSI system clock as input */
144 ret = cpu_dai->ops->set_sysclk(cpu_dai, IMX_SSP_SYS_CLK, 0,
145 SND_SOC_CLOCK_IN);
146 if (ret < 0) {
147 printk(KERN_ERR "Error when setting system SSI clk\n");
148 return ret;
149 }
150
151 /* set codec BCLK division for sample rate */
152 ret = codec_dai->ops->set_clkdiv(codec_dai, WM8974_BCLKDIV, bclk);
153 if (ret < 0) {
154 printk(KERN_ERR "Error when setting BCLK division\n");
155 return ret;
156 }
157
158
159 /* codec PLL input is 25 MHz */
160 ret = codec_dai->ops->set_pll(codec_dai, IGNORED_ARG,
161 25000000, pll_out);
162 if (ret < 0) {
163 printk(KERN_ERR "Error when setting PLL input\n");
164 return ret;
165 }
166
167 /*set codec MCLK division for sample rate */
168 ret = codec_dai->ops->set_clkdiv(codec_dai, WM8974_MCLKDIV, mclk);
169 if (ret < 0) {
170 printk(KERN_ERR "Error when setting MCLK division\n");
171 return ret;
172 }
173
174 return 0;
175}
176
177static int mx27vis_hifi_hw_free(struct snd_pcm_substream *substream)
178{
179 struct snd_soc_pcm_runtime *rtd = substream->private_data;
180 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
181
182 /* disable the PLL */
183 return codec_dai->ops->set_pll(codec_dai, IGNORED_ARG, 0, 0);
184}
185
186/*
187 * mx27vis WM8974 HiFi DAI opserations.
188 */
189static struct snd_soc_ops mx27vis_hifi_ops = {
190 .hw_params = mx27vis_hifi_hw_params,
191 .hw_free = mx27vis_hifi_hw_free,
192};
193
194
195static int mx27vis_suspend(struct platform_device *pdev, pm_message_t state)
196{
197 return 0;
198}
199
200static int mx27vis_resume(struct platform_device *pdev)
201{
202 return 0;
203}
204
205static int mx27vis_probe(struct platform_device *pdev)
206{
207 int ret = 0;
208
209 ret = get_ssi_clk(0, &pdev->dev);
210
211 if (ret < 0) {
212 printk(KERN_ERR "%s: cant get ssi clock\n", __func__);
213 return ret;
214 }
215
216
217 return 0;
218}
219
220static int mx27vis_remove(struct platform_device *pdev)
221{
222 put_ssi_clk(0);
223 return 0;
224}
225
226static struct snd_soc_dai_link mx27vis_dai[] = {
227{ /* Hifi Playback*/
228 .name = "WM8974",
229 .stream_name = "WM8974 HiFi",
230 .cpu_dai = &imx_ssi_pcm_dai[0],
231 .codec_dai = &wm8974_dai,
232 .ops = &mx27vis_hifi_ops,
233},
234};
235
236static struct snd_soc_card mx27vis = {
237 .name = "mx27vis",
238 .platform = &mx1_mx2_soc_platform,
239 .probe = mx27vis_probe,
240 .remove = mx27vis_remove,
241 .suspend_pre = mx27vis_suspend,
242 .resume_post = mx27vis_resume,
243 .dai_link = mx27vis_dai,
244 .num_links = ARRAY_SIZE(mx27vis_dai),
245};
246
247static struct snd_soc_device mx27vis_snd_devdata = {
248 .card = &mx27vis,
249 .codec_dev = &soc_codec_dev_wm8974,
250};
251
252static struct platform_device *mx27vis_snd_device;
253
254/* Temporal definition of board specific behaviour */
255void gpio_ssi_active(int ssi_num)
256{
257 int ret = 0;
258
259 unsigned int ssi1_pins[] = {
260 PC20_PF_SSI1_FS,
261 PC21_PF_SSI1_RXD,
262 PC22_PF_SSI1_TXD,
263 PC23_PF_SSI1_CLK,
264 };
265 unsigned int ssi2_pins[] = {
266 PC24_PF_SSI2_FS,
267 PC25_PF_SSI2_RXD,
268 PC26_PF_SSI2_TXD,
269 PC27_PF_SSI2_CLK,
270 };
271 if (ssi_num == 0)
272 ret = mxc_gpio_setup_multiple_pins(ssi1_pins,
273 ARRAY_SIZE(ssi1_pins), "USB OTG");
274 else
275 ret = mxc_gpio_setup_multiple_pins(ssi2_pins,
276 ARRAY_SIZE(ssi2_pins), "USB OTG");
277 if (ret)
278 printk(KERN_ERR "Error requesting ssi %x pins\n", ssi_num);
279}
280
281
282static int __init mx27vis_init(void)
283{
284 int ret;
285
286 mx27vis_snd_device = platform_device_alloc("soc-audio", -1);
287 if (!mx27vis_snd_device)
288 return -ENOMEM;
289
290 platform_set_drvdata(mx27vis_snd_device, &mx27vis_snd_devdata);
291 mx27vis_snd_devdata.dev = &mx27vis_snd_device->dev;
292 ret = platform_device_add(mx27vis_snd_device);
293
294 if (ret) {
295 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
296 platform_device_put(mx27vis_snd_device);
297 }
298
299 /* WM8974 uses SSI1 (HPCR1) via AUDMUX port 4 for audio (PPCR1) */
300 gpio_ssi_active(0);
301 audmux_connect_1_4();
302
303 return ret;
304}
305
306static void __exit mx27vis_exit(void)
307{
308 /* We should call some "ssi_gpio_inactive()" properly */
309}
310
311module_init(mx27vis_init);
312module_exit(mx27vis_exit);
313
314
315MODULE_AUTHOR("Javier Martin, javier.martin@vista-silicon.com");
316MODULE_DESCRIPTION("ALSA SoC WM8974 mx27vis");
317MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mxc-ssi.c b/sound/soc/imx/mxc-ssi.c
new file mode 100644
index 000000000000..3806ff2c0cd4
--- /dev/null
+++ b/sound/soc/imx/mxc-ssi.c
@@ -0,0 +1,868 @@
1/*
2 * mxc-ssi.c -- SSI driver for Freescale IMX
3 *
4 * Copyright 2006 Wolfson Microelectronics PLC.
5 * Author: Liam Girdwood
6 * liam.girdwood@wolfsonmicro.com or linux@wolfsonmicro.com
7 *
8 * Based on mxc-alsa-mc13783 (C) 2006 Freescale.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * TODO:
16 * Need to rework SSI register defs when new defs go into mainline.
17 * Add support for TDM and FIFO 1.
18 * Add support for i.mx3x DMA interface.
19 *
20 */
21
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include <linux/dma-mapping.h>
28#include <linux/clk.h>
29#include <sound/core.h>
30#include <sound/pcm.h>
31#include <sound/pcm_params.h>
32#include <sound/soc.h>
33#include <mach/dma-mx1-mx2.h>
34#include <asm/mach-types.h>
35
36#include "mxc-ssi.h"
37#include "mx1_mx2-pcm.h"
38
39#define SSI1_PORT 0
40#define SSI2_PORT 1
41
42static int ssi_active[2] = {0, 0};
43
44/* DMA information for mx1_mx2 platforms */
45static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_out0 = {
46 .name = "SSI1 PCM Stereo out 0",
47 .transfer_type = DMA_MODE_WRITE,
48 .per_address = SSI1_BASE_ADDR + STX0,
49 .event_id = DMA_REQ_SSI1_TX0,
50 .watermark_level = TXFIFO_WATERMARK,
51 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
52 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
53};
54
55static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_out1 = {
56 .name = "SSI1 PCM Stereo out 1",
57 .transfer_type = DMA_MODE_WRITE,
58 .per_address = SSI1_BASE_ADDR + STX1,
59 .event_id = DMA_REQ_SSI1_TX1,
60 .watermark_level = TXFIFO_WATERMARK,
61 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
62 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
63};
64
65static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_in0 = {
66 .name = "SSI1 PCM Stereo in 0",
67 .transfer_type = DMA_MODE_READ,
68 .per_address = SSI1_BASE_ADDR + SRX0,
69 .event_id = DMA_REQ_SSI1_RX0,
70 .watermark_level = RXFIFO_WATERMARK,
71 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
72 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
73};
74
75static struct mx1_mx2_pcm_dma_params imx_ssi1_pcm_stereo_in1 = {
76 .name = "SSI1 PCM Stereo in 1",
77 .transfer_type = DMA_MODE_READ,
78 .per_address = SSI1_BASE_ADDR + SRX1,
79 .event_id = DMA_REQ_SSI1_RX1,
80 .watermark_level = RXFIFO_WATERMARK,
81 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
82 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
83};
84
85static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_out0 = {
86 .name = "SSI2 PCM Stereo out 0",
87 .transfer_type = DMA_MODE_WRITE,
88 .per_address = SSI2_BASE_ADDR + STX0,
89 .event_id = DMA_REQ_SSI2_TX0,
90 .watermark_level = TXFIFO_WATERMARK,
91 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
92 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
93};
94
95static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_out1 = {
96 .name = "SSI2 PCM Stereo out 1",
97 .transfer_type = DMA_MODE_WRITE,
98 .per_address = SSI2_BASE_ADDR + STX1,
99 .event_id = DMA_REQ_SSI2_TX1,
100 .watermark_level = TXFIFO_WATERMARK,
101 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
102 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
103};
104
105static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_in0 = {
106 .name = "SSI2 PCM Stereo in 0",
107 .transfer_type = DMA_MODE_READ,
108 .per_address = SSI2_BASE_ADDR + SRX0,
109 .event_id = DMA_REQ_SSI2_RX0,
110 .watermark_level = RXFIFO_WATERMARK,
111 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
112 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
113};
114
115static struct mx1_mx2_pcm_dma_params imx_ssi2_pcm_stereo_in1 = {
116 .name = "SSI2 PCM Stereo in 1",
117 .transfer_type = DMA_MODE_READ,
118 .per_address = SSI2_BASE_ADDR + SRX1,
119 .event_id = DMA_REQ_SSI2_RX1,
120 .watermark_level = RXFIFO_WATERMARK,
121 .per_config = IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO,
122 .mem_config = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
123};
124
125static struct clk *ssi_clk0, *ssi_clk1;
126
127int get_ssi_clk(int ssi, struct device *dev)
128{
129 switch (ssi) {
130 case 0:
131 ssi_clk0 = clk_get(dev, "ssi1");
132 if (IS_ERR(ssi_clk0))
133 return PTR_ERR(ssi_clk0);
134 return 0;
135 case 1:
136 ssi_clk1 = clk_get(dev, "ssi2");
137 if (IS_ERR(ssi_clk1))
138 return PTR_ERR(ssi_clk1);
139 return 0;
140 default:
141 return -EINVAL;
142 }
143}
144EXPORT_SYMBOL(get_ssi_clk);
145
146void put_ssi_clk(int ssi)
147{
148 switch (ssi) {
149 case 0:
150 clk_put(ssi_clk0);
151 ssi_clk0 = NULL;
152 break;
153 case 1:
154 clk_put(ssi_clk1);
155 ssi_clk1 = NULL;
156 break;
157 }
158}
159EXPORT_SYMBOL(put_ssi_clk);
160
161/*
162 * SSI system clock configuration.
163 * Should only be called when port is inactive (i.e. SSIEN = 0).
164 */
165static int imx_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
166 int clk_id, unsigned int freq, int dir)
167{
168 u32 scr;
169
170 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
171 scr = SSI1_SCR;
172 pr_debug("%s: SCR for SSI1 is %x\n", __func__, scr);
173 } else {
174 scr = SSI2_SCR;
175 pr_debug("%s: SCR for SSI2 is %x\n", __func__, scr);
176 }
177
178 if (scr & SSI_SCR_SSIEN) {
179 printk(KERN_WARNING "Warning ssi already enabled\n");
180 return 0;
181 }
182
183 switch (clk_id) {
184 case IMX_SSP_SYS_CLK:
185 if (dir == SND_SOC_CLOCK_OUT) {
186 scr |= SSI_SCR_SYS_CLK_EN;
187 pr_debug("%s: clk of is output\n", __func__);
188 } else {
189 scr &= ~SSI_SCR_SYS_CLK_EN;
190 pr_debug("%s: clk of is input\n", __func__);
191 }
192 break;
193 default:
194 return -EINVAL;
195 }
196
197 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
198 pr_debug("%s: writeback of SSI1_SCR\n", __func__);
199 SSI1_SCR = scr;
200 } else {
201 pr_debug("%s: writeback of SSI2_SCR\n", __func__);
202 SSI2_SCR = scr;
203 }
204
205 return 0;
206}
207
208/*
209 * SSI Clock dividers
210 * Should only be called when port is inactive (i.e. SSIEN = 0).
211 */
212static int imx_ssi_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
213 int div_id, int div)
214{
215 u32 stccr, srccr;
216
217 pr_debug("%s\n", __func__);
218 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
219 if (SSI1_SCR & SSI_SCR_SSIEN)
220 return 0;
221 srccr = SSI1_STCCR;
222 stccr = SSI1_STCCR;
223 } else {
224 if (SSI2_SCR & SSI_SCR_SSIEN)
225 return 0;
226 srccr = SSI2_STCCR;
227 stccr = SSI2_STCCR;
228 }
229
230 switch (div_id) {
231 case IMX_SSI_TX_DIV_2:
232 stccr &= ~SSI_STCCR_DIV2;
233 stccr |= div;
234 break;
235 case IMX_SSI_TX_DIV_PSR:
236 stccr &= ~SSI_STCCR_PSR;
237 stccr |= div;
238 break;
239 case IMX_SSI_TX_DIV_PM:
240 stccr &= ~0xff;
241 stccr |= SSI_STCCR_PM(div);
242 break;
243 case IMX_SSI_RX_DIV_2:
244 stccr &= ~SSI_STCCR_DIV2;
245 stccr |= div;
246 break;
247 case IMX_SSI_RX_DIV_PSR:
248 stccr &= ~SSI_STCCR_PSR;
249 stccr |= div;
250 break;
251 case IMX_SSI_RX_DIV_PM:
252 stccr &= ~0xff;
253 stccr |= SSI_STCCR_PM(div);
254 break;
255 default:
256 return -EINVAL;
257 }
258
259 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
260 SSI1_STCCR = stccr;
261 SSI1_SRCCR = srccr;
262 } else {
263 SSI2_STCCR = stccr;
264 SSI2_SRCCR = srccr;
265 }
266 return 0;
267}
268
269/*
270 * SSI Network Mode or TDM slots configuration.
271 * Should only be called when port is inactive (i.e. SSIEN = 0).
272 */
273static int imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
274 unsigned int mask, int slots)
275{
276 u32 stmsk, srmsk, stccr;
277
278 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
279 if (SSI1_SCR & SSI_SCR_SSIEN) {
280 printk(KERN_WARNING "Warning ssi already enabled\n");
281 return 0;
282 }
283 stccr = SSI1_STCCR;
284 } else {
285 if (SSI2_SCR & SSI_SCR_SSIEN) {
286 printk(KERN_WARNING "Warning ssi already enabled\n");
287 return 0;
288 }
289 stccr = SSI2_STCCR;
290 }
291
292 stmsk = srmsk = mask;
293 stccr &= ~SSI_STCCR_DC_MASK;
294 stccr |= SSI_STCCR_DC(slots - 1);
295
296 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
297 SSI1_STMSK = stmsk;
298 SSI1_SRMSK = srmsk;
299 SSI1_SRCCR = SSI1_STCCR = stccr;
300 } else {
301 SSI2_STMSK = stmsk;
302 SSI2_SRMSK = srmsk;
303 SSI2_SRCCR = SSI2_STCCR = stccr;
304 }
305
306 return 0;
307}
308
309/*
310 * SSI DAI format configuration.
311 * Should only be called when port is inactive (i.e. SSIEN = 0).
312 * Note: We don't use the I2S modes but instead manually configure the
313 * SSI for I2S.
314 */
315static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai,
316 unsigned int fmt)
317{
318 u32 stcr = 0, srcr = 0, scr;
319
320 /*
321 * This is done to avoid this function to modify
322 * previous set values in stcr
323 */
324 stcr = SSI1_STCR;
325
326 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
327 scr = SSI1_SCR & ~(SSI_SCR_SYN | SSI_SCR_NET);
328 else
329 scr = SSI2_SCR & ~(SSI_SCR_SYN | SSI_SCR_NET);
330
331 if (scr & SSI_SCR_SSIEN) {
332 printk(KERN_WARNING "Warning ssi already enabled\n");
333 return 0;
334 }
335
336 /* DAI mode */
337 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
338 case SND_SOC_DAIFMT_I2S:
339 /* data on rising edge of bclk, frame low 1clk before data */
340 stcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
341 srcr |= SSI_SRCR_RFSI | SSI_SRCR_REFS | SSI_SRCR_RXBIT0;
342 break;
343 case SND_SOC_DAIFMT_LEFT_J:
344 /* data on rising edge of bclk, frame high with data */
345 stcr |= SSI_STCR_TXBIT0;
346 srcr |= SSI_SRCR_RXBIT0;
347 break;
348 case SND_SOC_DAIFMT_DSP_B:
349 /* data on rising edge of bclk, frame high with data */
350 stcr |= SSI_STCR_TFSL;
351 srcr |= SSI_SRCR_RFSL;
352 break;
353 case SND_SOC_DAIFMT_DSP_A:
354 /* data on rising edge of bclk, frame high 1clk before data */
355 stcr |= SSI_STCR_TFSL | SSI_STCR_TEFS;
356 srcr |= SSI_SRCR_RFSL | SSI_SRCR_REFS;
357 break;
358 }
359
360 /* DAI clock inversion */
361 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
362 case SND_SOC_DAIFMT_IB_IF:
363 stcr |= SSI_STCR_TFSI;
364 stcr &= ~SSI_STCR_TSCKP;
365 srcr |= SSI_SRCR_RFSI;
366 srcr &= ~SSI_SRCR_RSCKP;
367 break;
368 case SND_SOC_DAIFMT_IB_NF:
369 stcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
370 srcr &= ~(SSI_SRCR_RSCKP | SSI_SRCR_RFSI);
371 break;
372 case SND_SOC_DAIFMT_NB_IF:
373 stcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
374 srcr |= SSI_SRCR_RFSI | SSI_SRCR_RSCKP;
375 break;
376 case SND_SOC_DAIFMT_NB_NF:
377 stcr &= ~SSI_STCR_TFSI;
378 stcr |= SSI_STCR_TSCKP;
379 srcr &= ~SSI_SRCR_RFSI;
380 srcr |= SSI_SRCR_RSCKP;
381 break;
382 }
383
384 /* DAI clock master masks */
385 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
386 case SND_SOC_DAIFMT_CBS_CFS:
387 stcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
388 srcr |= SSI_SRCR_RFDIR | SSI_SRCR_RXDIR;
389 break;
390 case SND_SOC_DAIFMT_CBM_CFS:
391 stcr |= SSI_STCR_TFDIR;
392 srcr |= SSI_SRCR_RFDIR;
393 break;
394 case SND_SOC_DAIFMT_CBS_CFM:
395 stcr |= SSI_STCR_TXDIR;
396 srcr |= SSI_SRCR_RXDIR;
397 break;
398 }
399
400 /* sync */
401 if (!(fmt & SND_SOC_DAIFMT_ASYNC))
402 scr |= SSI_SCR_SYN;
403
404 /* tdm - only for stereo atm */
405 if (fmt & SND_SOC_DAIFMT_TDM)
406 scr |= SSI_SCR_NET;
407
408 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
409 SSI1_STCR = stcr;
410 SSI1_SRCR = srcr;
411 SSI1_SCR = scr;
412 } else {
413 SSI2_STCR = stcr;
414 SSI2_SRCR = srcr;
415 SSI2_SCR = scr;
416 }
417
418 return 0;
419}
420
421static int imx_ssi_startup(struct snd_pcm_substream *substream,
422 struct snd_soc_dai *dai)
423{
424 struct snd_soc_pcm_runtime *rtd = substream->private_data;
425 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
426
427 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
428 /* set up TX DMA params */
429 switch (cpu_dai->id) {
430 case IMX_DAI_SSI0:
431 cpu_dai->dma_data = &imx_ssi1_pcm_stereo_out0;
432 break;
433 case IMX_DAI_SSI1:
434 cpu_dai->dma_data = &imx_ssi1_pcm_stereo_out1;
435 break;
436 case IMX_DAI_SSI2:
437 cpu_dai->dma_data = &imx_ssi2_pcm_stereo_out0;
438 break;
439 case IMX_DAI_SSI3:
440 cpu_dai->dma_data = &imx_ssi2_pcm_stereo_out1;
441 }
442 pr_debug("%s: (playback)\n", __func__);
443 } else {
444 /* set up RX DMA params */
445 switch (cpu_dai->id) {
446 case IMX_DAI_SSI0:
447 cpu_dai->dma_data = &imx_ssi1_pcm_stereo_in0;
448 break;
449 case IMX_DAI_SSI1:
450 cpu_dai->dma_data = &imx_ssi1_pcm_stereo_in1;
451 break;
452 case IMX_DAI_SSI2:
453 cpu_dai->dma_data = &imx_ssi2_pcm_stereo_in0;
454 break;
455 case IMX_DAI_SSI3:
456 cpu_dai->dma_data = &imx_ssi2_pcm_stereo_in1;
457 }
458 pr_debug("%s: (capture)\n", __func__);
459 }
460
461 /*
462 * we cant really change any SSI values after SSI is enabled
463 * need to fix in software for max flexibility - lrg
464 */
465 if (cpu_dai->active) {
466 printk(KERN_WARNING "Warning ssi already enabled\n");
467 return 0;
468 }
469
470 /* reset the SSI port - Sect 45.4.4 */
471 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
472
473 if (!ssi_clk0)
474 return -EINVAL;
475
476 if (ssi_active[SSI1_PORT]++) {
477 pr_debug("%s: exit before reset\n", __func__);
478 return 0;
479 }
480
481 /* SSI1 Reset */
482 SSI1_SCR = 0;
483
484 SSI1_SFCSR = SSI_SFCSR_RFWM1(RXFIFO_WATERMARK) |
485 SSI_SFCSR_RFWM0(RXFIFO_WATERMARK) |
486 SSI_SFCSR_TFWM1(TXFIFO_WATERMARK) |
487 SSI_SFCSR_TFWM0(TXFIFO_WATERMARK);
488 } else {
489
490 if (!ssi_clk1)
491 return -EINVAL;
492
493 if (ssi_active[SSI2_PORT]++) {
494 pr_debug("%s: exit before reset\n", __func__);
495 return 0;
496 }
497
498 /* SSI2 Reset */
499 SSI2_SCR = 0;
500
501 SSI2_SFCSR = SSI_SFCSR_RFWM1(RXFIFO_WATERMARK) |
502 SSI_SFCSR_RFWM0(RXFIFO_WATERMARK) |
503 SSI_SFCSR_TFWM1(TXFIFO_WATERMARK) |
504 SSI_SFCSR_TFWM0(TXFIFO_WATERMARK);
505 }
506
507 return 0;
508}
509
510int imx_ssi_hw_tx_params(struct snd_pcm_substream *substream,
511 struct snd_pcm_hw_params *params)
512{
513 struct snd_soc_pcm_runtime *rtd = substream->private_data;
514 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
515 u32 stccr, stcr, sier;
516
517 pr_debug("%s\n", __func__);
518
519 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
520 stccr = SSI1_STCCR & ~SSI_STCCR_WL_MASK;
521 stcr = SSI1_STCR;
522 sier = SSI1_SIER;
523 } else {
524 stccr = SSI2_STCCR & ~SSI_STCCR_WL_MASK;
525 stcr = SSI2_STCR;
526 sier = SSI2_SIER;
527 }
528
529 /* DAI data (word) size */
530 switch (params_format(params)) {
531 case SNDRV_PCM_FORMAT_S16_LE:
532 stccr |= SSI_STCCR_WL(16);
533 break;
534 case SNDRV_PCM_FORMAT_S20_3LE:
535 stccr |= SSI_STCCR_WL(20);
536 break;
537 case SNDRV_PCM_FORMAT_S24_LE:
538 stccr |= SSI_STCCR_WL(24);
539 break;
540 }
541
542 /* enable interrupts */
543 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
544 stcr |= SSI_STCR_TFEN0;
545 else
546 stcr |= SSI_STCR_TFEN1;
547 sier |= SSI_SIER_TDMAE;
548
549 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
550 SSI1_STCR = stcr;
551 SSI1_STCCR = stccr;
552 SSI1_SIER = sier;
553 } else {
554 SSI2_STCR = stcr;
555 SSI2_STCCR = stccr;
556 SSI2_SIER = sier;
557 }
558
559 return 0;
560}
561
562int imx_ssi_hw_rx_params(struct snd_pcm_substream *substream,
563 struct snd_pcm_hw_params *params)
564{
565 struct snd_soc_pcm_runtime *rtd = substream->private_data;
566 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
567 u32 srccr, srcr, sier;
568
569 pr_debug("%s\n", __func__);
570
571 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
572 srccr = SSI1_SRCCR & ~SSI_SRCCR_WL_MASK;
573 srcr = SSI1_SRCR;
574 sier = SSI1_SIER;
575 } else {
576 srccr = SSI2_SRCCR & ~SSI_SRCCR_WL_MASK;
577 srcr = SSI2_SRCR;
578 sier = SSI2_SIER;
579 }
580
581 /* DAI data (word) size */
582 switch (params_format(params)) {
583 case SNDRV_PCM_FORMAT_S16_LE:
584 srccr |= SSI_SRCCR_WL(16);
585 break;
586 case SNDRV_PCM_FORMAT_S20_3LE:
587 srccr |= SSI_SRCCR_WL(20);
588 break;
589 case SNDRV_PCM_FORMAT_S24_LE:
590 srccr |= SSI_SRCCR_WL(24);
591 break;
592 }
593
594 /* enable interrupts */
595 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
596 srcr |= SSI_SRCR_RFEN0;
597 else
598 srcr |= SSI_SRCR_RFEN1;
599 sier |= SSI_SIER_RDMAE;
600
601 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
602 SSI1_SRCR = srcr;
603 SSI1_SRCCR = srccr;
604 SSI1_SIER = sier;
605 } else {
606 SSI2_SRCR = srcr;
607 SSI2_SRCCR = srccr;
608 SSI2_SIER = sier;
609 }
610
611 return 0;
612}
613
614/*
615 * Should only be called when port is inactive (i.e. SSIEN = 0),
616 * although can be called multiple times by upper layers.
617 */
618int imx_ssi_hw_params(struct snd_pcm_substream *substream,
619 struct snd_pcm_hw_params *params,
620 struct snd_soc_dai *dai)
621{
622 struct snd_soc_pcm_runtime *rtd = substream->private_data;
623 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
624
625 int ret;
626
627 /* cant change any parameters when SSI is running */
628 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
629 if (SSI1_SCR & SSI_SCR_SSIEN) {
630 printk(KERN_WARNING "Warning ssi already enabled\n");
631 return 0;
632 }
633 } else {
634 if (SSI2_SCR & SSI_SCR_SSIEN) {
635 printk(KERN_WARNING "Warning ssi already enabled\n");
636 return 0;
637 }
638 }
639
640 /*
641 * Configure both tx and rx params with the same settings. This is
642 * really a harware restriction because SSI must be disabled until
643 * we can change those values. If there is an active audio stream in
644 * one direction, enabling the other direction with different
645 * settings would mean disturbing the running one.
646 */
647 ret = imx_ssi_hw_tx_params(substream, params);
648 if (ret < 0)
649 return ret;
650 return imx_ssi_hw_rx_params(substream, params);
651}
652
653int imx_ssi_prepare(struct snd_pcm_substream *substream,
654 struct snd_soc_dai *dai)
655{
656 struct snd_soc_pcm_runtime *rtd = substream->private_data;
657 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
658 int ret;
659
660 pr_debug("%s\n", __func__);
661
662 /* Enable clks here to follow SSI recommended init sequence */
663 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2) {
664 ret = clk_enable(ssi_clk0);
665 if (ret < 0)
666 printk(KERN_ERR "Unable to enable ssi_clk0\n");
667 } else {
668 ret = clk_enable(ssi_clk1);
669 if (ret < 0)
670 printk(KERN_ERR "Unable to enable ssi_clk1\n");
671 }
672
673 return 0;
674}
675
676static int imx_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
677 struct snd_soc_dai *dai)
678{
679 struct snd_soc_pcm_runtime *rtd = substream->private_data;
680 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
681 u32 scr;
682
683 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
684 scr = SSI1_SCR;
685 else
686 scr = SSI2_SCR;
687
688 switch (cmd) {
689 case SNDRV_PCM_TRIGGER_START:
690 case SNDRV_PCM_TRIGGER_RESUME:
691 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
692 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
693 scr |= SSI_SCR_TE | SSI_SCR_SSIEN;
694 else
695 scr |= SSI_SCR_RE | SSI_SCR_SSIEN;
696 break;
697 case SNDRV_PCM_TRIGGER_SUSPEND:
698 case SNDRV_PCM_TRIGGER_STOP:
699 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
700 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
701 scr &= ~SSI_SCR_TE;
702 else
703 scr &= ~SSI_SCR_RE;
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 if (cpu_dai->id == IMX_DAI_SSI0 || cpu_dai->id == IMX_DAI_SSI2)
710 SSI1_SCR = scr;
711 else
712 SSI2_SCR = scr;
713
714 return 0;
715}
716
717static void imx_ssi_shutdown(struct snd_pcm_substream *substream,
718 struct snd_soc_dai *dai)
719{
720 struct snd_soc_pcm_runtime *rtd = substream->private_data;
721 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
722
723 /* shutdown SSI if neither Tx or Rx is active */
724 if (!cpu_dai->active) {
725
726 if (cpu_dai->id == IMX_DAI_SSI0 ||
727 cpu_dai->id == IMX_DAI_SSI2) {
728
729 if (--ssi_active[SSI1_PORT] > 1)
730 return;
731
732 SSI1_SCR = 0;
733 clk_disable(ssi_clk0);
734 } else {
735 if (--ssi_active[SSI2_PORT])
736 return;
737 SSI2_SCR = 0;
738 clk_disable(ssi_clk1);
739 }
740 }
741}
742
743#ifdef CONFIG_PM
744static int imx_ssi_suspend(struct platform_device *dev,
745 struct snd_soc_dai *dai)
746{
747 return 0;
748}
749
750static int imx_ssi_resume(struct platform_device *pdev,
751 struct snd_soc_dai *dai)
752{
753 return 0;
754}
755
756#else
757#define imx_ssi_suspend NULL
758#define imx_ssi_resume NULL
759#endif
760
761#define IMX_SSI_RATES \
762 (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \
763 SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
764 SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
765 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
766 SNDRV_PCM_RATE_96000)
767
768#define IMX_SSI_BITS \
769 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
770 SNDRV_PCM_FMTBIT_S24_LE)
771
772static struct snd_soc_dai_ops imx_ssi_pcm_dai_ops = {
773 .startup = imx_ssi_startup,
774 .shutdown = imx_ssi_shutdown,
775 .trigger = imx_ssi_trigger,
776 .prepare = imx_ssi_prepare,
777 .hw_params = imx_ssi_hw_params,
778 .set_sysclk = imx_ssi_set_dai_sysclk,
779 .set_clkdiv = imx_ssi_set_dai_clkdiv,
780 .set_fmt = imx_ssi_set_dai_fmt,
781 .set_tdm_slot = imx_ssi_set_dai_tdm_slot,
782};
783
784struct snd_soc_dai imx_ssi_pcm_dai[] = {
785{
786 .name = "imx-i2s-1-0",
787 .id = IMX_DAI_SSI0,
788 .suspend = imx_ssi_suspend,
789 .resume = imx_ssi_resume,
790 .playback = {
791 .channels_min = 1,
792 .channels_max = 2,
793 .formats = IMX_SSI_BITS,
794 .rates = IMX_SSI_RATES,},
795 .capture = {
796 .channels_min = 1,
797 .channels_max = 2,
798 .formats = IMX_SSI_BITS,
799 .rates = IMX_SSI_RATES,},
800 .ops = &imx_ssi_pcm_dai_ops,
801},
802{
803 .name = "imx-i2s-2-0",
804 .id = IMX_DAI_SSI1,
805 .playback = {
806 .channels_min = 1,
807 .channels_max = 2,
808 .formats = IMX_SSI_BITS,
809 .rates = IMX_SSI_RATES,},
810 .capture = {
811 .channels_min = 1,
812 .channels_max = 2,
813 .formats = IMX_SSI_BITS,
814 .rates = IMX_SSI_RATES,},
815 .ops = &imx_ssi_pcm_dai_ops,
816},
817{
818 .name = "imx-i2s-1-1",
819 .id = IMX_DAI_SSI2,
820 .suspend = imx_ssi_suspend,
821 .resume = imx_ssi_resume,
822 .playback = {
823 .channels_min = 1,
824 .channels_max = 2,
825 .formats = IMX_SSI_BITS,
826 .rates = IMX_SSI_RATES,},
827 .capture = {
828 .channels_min = 1,
829 .channels_max = 2,
830 .formats = IMX_SSI_BITS,
831 .rates = IMX_SSI_RATES,},
832 .ops = &imx_ssi_pcm_dai_ops,
833},
834{
835 .name = "imx-i2s-2-1",
836 .id = IMX_DAI_SSI3,
837 .playback = {
838 .channels_min = 1,
839 .channels_max = 2,
840 .formats = IMX_SSI_BITS,
841 .rates = IMX_SSI_RATES,},
842 .capture = {
843 .channels_min = 1,
844 .channels_max = 2,
845 .formats = IMX_SSI_BITS,
846 .rates = IMX_SSI_RATES,},
847 .ops = &imx_ssi_pcm_dai_ops,
848},
849};
850EXPORT_SYMBOL_GPL(imx_ssi_pcm_dai);
851
852static int __init imx_ssi_init(void)
853{
854 return snd_soc_register_dais(imx_ssi_pcm_dai,
855 ARRAY_SIZE(imx_ssi_pcm_dai));
856}
857
858static void __exit imx_ssi_exit(void)
859{
860 snd_soc_unregister_dais(imx_ssi_pcm_dai,
861 ARRAY_SIZE(imx_ssi_pcm_dai));
862}
863
864module_init(imx_ssi_init);
865module_exit(imx_ssi_exit);
866MODULE_AUTHOR("Liam Girdwood, liam.girdwood@wolfsonmicro.com");
867MODULE_DESCRIPTION("i.MX ASoC I2S driver");
868MODULE_LICENSE("GPL");
diff --git a/sound/soc/imx/mxc-ssi.h b/sound/soc/imx/mxc-ssi.h
new file mode 100644
index 000000000000..12bbdc9c7ecd
--- /dev/null
+++ b/sound/soc/imx/mxc-ssi.h
@@ -0,0 +1,238 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#ifndef _IMX_SSI_H
8#define _IMX_SSI_H
9
10#include <mach/hardware.h>
11
12/* SSI regs definition - MOVE to /arch/arm/plat-mxc/include/mach/ when stable */
13#define SSI1_IO_BASE_ADDR IO_ADDRESS(SSI1_BASE_ADDR)
14#define SSI2_IO_BASE_ADDR IO_ADDRESS(SSI2_BASE_ADDR)
15
16#define STX0 0x00
17#define STX1 0x04
18#define SRX0 0x08
19#define SRX1 0x0c
20#define SCR 0x10
21#define SISR 0x14
22#define SIER 0x18
23#define STCR 0x1c
24#define SRCR 0x20
25#define STCCR 0x24
26#define SRCCR 0x28
27#define SFCSR 0x2c
28#define STR 0x30
29#define SOR 0x34
30#define SACNT 0x38
31#define SACADD 0x3c
32#define SACDAT 0x40
33#define SATAG 0x44
34#define STMSK 0x48
35#define SRMSK 0x4c
36
37#define SSI1_STX0 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STX0)))
38#define SSI1_STX1 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STX1)))
39#define SSI1_SRX0 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRX0)))
40#define SSI1_SRX1 (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRX1)))
41#define SSI1_SCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SCR)))
42#define SSI1_SISR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SISR)))
43#define SSI1_SIER (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SIER)))
44#define SSI1_STCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STCR)))
45#define SSI1_SRCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRCR)))
46#define SSI1_STCCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STCCR)))
47#define SSI1_SRCCR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRCCR)))
48#define SSI1_SFCSR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SFCSR)))
49#define SSI1_STR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STR)))
50#define SSI1_SOR (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SOR)))
51#define SSI1_SACNT (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACNT)))
52#define SSI1_SACADD (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACADD)))
53#define SSI1_SACDAT (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SACDAT)))
54#define SSI1_SATAG (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SATAG)))
55#define SSI1_STMSK (*((volatile u32 *)(SSI1_IO_BASE_ADDR + STMSK)))
56#define SSI1_SRMSK (*((volatile u32 *)(SSI1_IO_BASE_ADDR + SRMSK)))
57
58
59#define SSI2_STX0 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STX0)))
60#define SSI2_STX1 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STX1)))
61#define SSI2_SRX0 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRX0)))
62#define SSI2_SRX1 (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRX1)))
63#define SSI2_SCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SCR)))
64#define SSI2_SISR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SISR)))
65#define SSI2_SIER (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SIER)))
66#define SSI2_STCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STCR)))
67#define SSI2_SRCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRCR)))
68#define SSI2_STCCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STCCR)))
69#define SSI2_SRCCR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRCCR)))
70#define SSI2_SFCSR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SFCSR)))
71#define SSI2_STR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STR)))
72#define SSI2_SOR (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SOR)))
73#define SSI2_SACNT (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACNT)))
74#define SSI2_SACADD (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACADD)))
75#define SSI2_SACDAT (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SACDAT)))
76#define SSI2_SATAG (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SATAG)))
77#define SSI2_STMSK (*((volatile u32 *)(SSI2_IO_BASE_ADDR + STMSK)))
78#define SSI2_SRMSK (*((volatile u32 *)(SSI2_IO_BASE_ADDR + SRMSK)))
79
80#define SSI_SCR_CLK_IST (1 << 9)
81#define SSI_SCR_TCH_EN (1 << 8)
82#define SSI_SCR_SYS_CLK_EN (1 << 7)
83#define SSI_SCR_I2S_MODE_NORM (0 << 5)
84#define SSI_SCR_I2S_MODE_MSTR (1 << 5)
85#define SSI_SCR_I2S_MODE_SLAVE (2 << 5)
86#define SSI_SCR_SYN (1 << 4)
87#define SSI_SCR_NET (1 << 3)
88#define SSI_SCR_RE (1 << 2)
89#define SSI_SCR_TE (1 << 1)
90#define SSI_SCR_SSIEN (1 << 0)
91
92#define SSI_SISR_CMDAU (1 << 18)
93#define SSI_SISR_CMDDU (1 << 17)
94#define SSI_SISR_RXT (1 << 16)
95#define SSI_SISR_RDR1 (1 << 15)
96#define SSI_SISR_RDR0 (1 << 14)
97#define SSI_SISR_TDE1 (1 << 13)
98#define SSI_SISR_TDE0 (1 << 12)
99#define SSI_SISR_ROE1 (1 << 11)
100#define SSI_SISR_ROE0 (1 << 10)
101#define SSI_SISR_TUE1 (1 << 9)
102#define SSI_SISR_TUE0 (1 << 8)
103#define SSI_SISR_TFS (1 << 7)
104#define SSI_SISR_RFS (1 << 6)
105#define SSI_SISR_TLS (1 << 5)
106#define SSI_SISR_RLS (1 << 4)
107#define SSI_SISR_RFF1 (1 << 3)
108#define SSI_SISR_RFF0 (1 << 2)
109#define SSI_SISR_TFE1 (1 << 1)
110#define SSI_SISR_TFE0 (1 << 0)
111
112#define SSI_SIER_RDMAE (1 << 22)
113#define SSI_SIER_RIE (1 << 21)
114#define SSI_SIER_TDMAE (1 << 20)
115#define SSI_SIER_TIE (1 << 19)
116#define SSI_SIER_CMDAU_EN (1 << 18)
117#define SSI_SIER_CMDDU_EN (1 << 17)
118#define SSI_SIER_RXT_EN (1 << 16)
119#define SSI_SIER_RDR1_EN (1 << 15)
120#define SSI_SIER_RDR0_EN (1 << 14)
121#define SSI_SIER_TDE1_EN (1 << 13)
122#define SSI_SIER_TDE0_EN (1 << 12)
123#define SSI_SIER_ROE1_EN (1 << 11)
124#define SSI_SIER_ROE0_EN (1 << 10)
125#define SSI_SIER_TUE1_EN (1 << 9)
126#define SSI_SIER_TUE0_EN (1 << 8)
127#define SSI_SIER_TFS_EN (1 << 7)
128#define SSI_SIER_RFS_EN (1 << 6)
129#define SSI_SIER_TLS_EN (1 << 5)
130#define SSI_SIER_RLS_EN (1 << 4)
131#define SSI_SIER_RFF1_EN (1 << 3)
132#define SSI_SIER_RFF0_EN (1 << 2)
133#define SSI_SIER_TFE1_EN (1 << 1)
134#define SSI_SIER_TFE0_EN (1 << 0)
135
136#define SSI_STCR_TXBIT0 (1 << 9)
137#define SSI_STCR_TFEN1 (1 << 8)
138#define SSI_STCR_TFEN0 (1 << 7)
139#define SSI_STCR_TFDIR (1 << 6)
140#define SSI_STCR_TXDIR (1 << 5)
141#define SSI_STCR_TSHFD (1 << 4)
142#define SSI_STCR_TSCKP (1 << 3)
143#define SSI_STCR_TFSI (1 << 2)
144#define SSI_STCR_TFSL (1 << 1)
145#define SSI_STCR_TEFS (1 << 0)
146
147#define SSI_SRCR_RXBIT0 (1 << 9)
148#define SSI_SRCR_RFEN1 (1 << 8)
149#define SSI_SRCR_RFEN0 (1 << 7)
150#define SSI_SRCR_RFDIR (1 << 6)
151#define SSI_SRCR_RXDIR (1 << 5)
152#define SSI_SRCR_RSHFD (1 << 4)
153#define SSI_SRCR_RSCKP (1 << 3)
154#define SSI_SRCR_RFSI (1 << 2)
155#define SSI_SRCR_RFSL (1 << 1)
156#define SSI_SRCR_REFS (1 << 0)
157
158#define SSI_STCCR_DIV2 (1 << 18)
159#define SSI_STCCR_PSR (1 << 15)
160#define SSI_STCCR_WL(x) ((((x) - 2) >> 1) << 13)
161#define SSI_STCCR_DC(x) (((x) & 0x1f) << 8)
162#define SSI_STCCR_PM(x) (((x) & 0xff) << 0)
163#define SSI_STCCR_WL_MASK (0xf << 13)
164#define SSI_STCCR_DC_MASK (0x1f << 8)
165#define SSI_STCCR_PM_MASK (0xff << 0)
166
167#define SSI_SRCCR_DIV2 (1 << 18)
168#define SSI_SRCCR_PSR (1 << 15)
169#define SSI_SRCCR_WL(x) ((((x) - 2) >> 1) << 13)
170#define SSI_SRCCR_DC(x) (((x) & 0x1f) << 8)
171#define SSI_SRCCR_PM(x) (((x) & 0xff) << 0)
172#define SSI_SRCCR_WL_MASK (0xf << 13)
173#define SSI_SRCCR_DC_MASK (0x1f << 8)
174#define SSI_SRCCR_PM_MASK (0xff << 0)
175
176
177#define SSI_SFCSR_RFCNT1(x) (((x) & 0xf) << 28)
178#define SSI_SFCSR_TFCNT1(x) (((x) & 0xf) << 24)
179#define SSI_SFCSR_RFWM1(x) (((x) & 0xf) << 20)
180#define SSI_SFCSR_TFWM1(x) (((x) & 0xf) << 16)
181#define SSI_SFCSR_RFCNT0(x) (((x) & 0xf) << 12)
182#define SSI_SFCSR_TFCNT0(x) (((x) & 0xf) << 8)
183#define SSI_SFCSR_RFWM0(x) (((x) & 0xf) << 4)
184#define SSI_SFCSR_TFWM0(x) (((x) & 0xf) << 0)
185
186#define SSI_STR_TEST (1 << 15)
187#define SSI_STR_RCK2TCK (1 << 14)
188#define SSI_STR_RFS2TFS (1 << 13)
189#define SSI_STR_RXSTATE(x) (((x) & 0xf) << 8)
190#define SSI_STR_TXD2RXD (1 << 7)
191#define SSI_STR_TCK2RCK (1 << 6)
192#define SSI_STR_TFS2RFS (1 << 5)
193#define SSI_STR_TXSTATE(x) (((x) & 0xf) << 0)
194
195#define SSI_SOR_CLKOFF (1 << 6)
196#define SSI_SOR_RX_CLR (1 << 5)
197#define SSI_SOR_TX_CLR (1 << 4)
198#define SSI_SOR_INIT (1 << 3)
199#define SSI_SOR_WAIT(x) (((x) & 0x3) << 1)
200#define SSI_SOR_SYNRST (1 << 0)
201
202#define SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
203#define SSI_SACNT_WR (x << 4)
204#define SSI_SACNT_RD (x << 3)
205#define SSI_SACNT_TIF (x << 2)
206#define SSI_SACNT_FV (x << 1)
207#define SSI_SACNT_AC97EN (x << 0)
208
209/* Watermarks for FIFO's */
210#define TXFIFO_WATERMARK 0x4
211#define RXFIFO_WATERMARK 0x4
212
213/* i.MX DAI SSP ID's */
214#define IMX_DAI_SSI0 0 /* SSI1 FIFO 0 */
215#define IMX_DAI_SSI1 1 /* SSI1 FIFO 1 */
216#define IMX_DAI_SSI2 2 /* SSI2 FIFO 0 */
217#define IMX_DAI_SSI3 3 /* SSI2 FIFO 1 */
218
219/* SSI clock sources */
220#define IMX_SSP_SYS_CLK 0
221
222/* SSI audio dividers */
223#define IMX_SSI_TX_DIV_2 0
224#define IMX_SSI_TX_DIV_PSR 1
225#define IMX_SSI_TX_DIV_PM 2
226#define IMX_SSI_RX_DIV_2 3
227#define IMX_SSI_RX_DIV_PSR 4
228#define IMX_SSI_RX_DIV_PM 5
229
230
231/* SSI Div 2 */
232#define IMX_SSI_DIV_2_OFF (~SSI_STCCR_DIV2)
233#define IMX_SSI_DIV_2_ON SSI_STCCR_DIV2
234
235extern struct snd_soc_dai imx_ssi_pcm_dai[4];
236extern int get_ssi_clk(int ssi, struct device *dev);
237extern void put_ssi_clk(int ssi);
238#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index b771238662b6..2dee9839be86 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -15,6 +15,14 @@ config SND_OMAP_SOC_N810
15 help 15 help
16 Say Y if you want to add support for SoC audio on Nokia N810. 16 Say Y if you want to add support for SoC audio on Nokia N810.
17 17
18config SND_OMAP_SOC_AMS_DELTA
19 tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
20 depends on SND_OMAP_SOC && MACH_AMS_DELTA
21 select SND_OMAP_SOC_MCBSP
22 select SND_SOC_CX20442
23 help
24 Say Y if you want to add support for SoC audio on Amstrad Delta.
25
18config SND_OMAP_SOC_OSK5912 26config SND_OMAP_SOC_OSK5912
19 tristate "SoC Audio support for omap osk5912" 27 tristate "SoC Audio support for omap osk5912"
20 depends on SND_OMAP_SOC && MACH_OMAP_OSK && I2C 28 depends on SND_OMAP_SOC && MACH_OMAP_OSK && I2C
@@ -72,4 +80,11 @@ config SND_OMAP_SOC_OMAP3_BEAGLE
72 help 80 help
73 Say Y if you want to add support for SoC audio on the Beagleboard. 81 Say Y if you want to add support for SoC audio on the Beagleboard.
74 82
83config SND_OMAP_SOC_ZOOM2
84 tristate "SoC Audio support for Zoom2"
85 depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP_ZOOM2
86 select SND_OMAP_SOC_MCBSP
87 select SND_SOC_TWL4030
88 help
89 Say Y if you want to add support for Soc audio on Zoom2 board.
75 90
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index a37f49862389..02d69471dcb5 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
7 7
8# OMAP Machine Support 8# OMAP Machine Support
9snd-soc-n810-objs := n810.o 9snd-soc-n810-objs := n810.o
10snd-soc-ams-delta-objs := ams-delta.o
10snd-soc-osk5912-objs := osk5912.o 11snd-soc-osk5912-objs := osk5912.o
11snd-soc-overo-objs := overo.o 12snd-soc-overo-objs := overo.o
12snd-soc-omap2evm-objs := omap2evm.o 13snd-soc-omap2evm-objs := omap2evm.o
@@ -14,8 +15,10 @@ snd-soc-omap3evm-objs := omap3evm.o
14snd-soc-sdp3430-objs := sdp3430.o 15snd-soc-sdp3430-objs := sdp3430.o
15snd-soc-omap3pandora-objs := omap3pandora.o 16snd-soc-omap3pandora-objs := omap3pandora.o
16snd-soc-omap3beagle-objs := omap3beagle.o 17snd-soc-omap3beagle-objs := omap3beagle.o
18snd-soc-zoom2-objs := zoom2.o
17 19
18obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o 20obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
21obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
19obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o 22obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
20obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o 23obj-$(CONFIG_SND_OMAP_SOC_OVERO) += snd-soc-overo.o
21obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o 24obj-$(CONFIG_MACH_OMAP2EVM) += snd-soc-omap2evm.o
@@ -23,3 +26,4 @@ obj-$(CONFIG_MACH_OMAP3EVM) += snd-soc-omap3evm.o
23obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o 26obj-$(CONFIG_SND_OMAP_SOC_SDP3430) += snd-soc-sdp3430.o
24obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o 27obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
25obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o 28obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
29obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
new file mode 100644
index 000000000000..5a5166ac7279
--- /dev/null
+++ b/sound/soc/omap/ams-delta.c
@@ -0,0 +1,646 @@
1/*
2 * ams-delta.c -- SoC audio for Amstrad E3 (Delta) videophone
3 *
4 * Copyright (C) 2009 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
5 *
6 * Initially based on sound/soc/omap/osk5912.x
7 * Copyright (C) 2008 Mistral Solutions
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#include <linux/gpio.h>
26#include <linux/spinlock.h>
27#include <linux/tty.h>
28
29#include <sound/soc-dapm.h>
30#include <sound/jack.h>
31
32#include <asm/mach-types.h>
33
34#include <mach/board-ams-delta.h>
35#include <mach/mcbsp.h>
36
37#include "omap-mcbsp.h"
38#include "omap-pcm.h"
39#include "../codecs/cx20442.h"
40
41
42/* Board specific DAPM widgets */
43 const struct snd_soc_dapm_widget ams_delta_dapm_widgets[] = {
44 /* Handset */
45 SND_SOC_DAPM_MIC("Mouthpiece", NULL),
46 SND_SOC_DAPM_HP("Earpiece", NULL),
47 /* Handsfree/Speakerphone */
48 SND_SOC_DAPM_MIC("Microphone", NULL),
49 SND_SOC_DAPM_SPK("Speaker", NULL),
50};
51
52/* How they are connected to codec pins */
53static const struct snd_soc_dapm_route ams_delta_audio_map[] = {
54 {"TELIN", NULL, "Mouthpiece"},
55 {"Earpiece", NULL, "TELOUT"},
56
57 {"MIC", NULL, "Microphone"},
58 {"Speaker", NULL, "SPKOUT"},
59};
60
61/*
62 * Controls, functional after the modem line discipline is activated.
63 */
64
65/* Virtual switch: audio input/output constellations */
66static const char *ams_delta_audio_mode[] =
67 {"Mixed", "Handset", "Handsfree", "Speakerphone"};
68
69/* Selection <-> pin translation */
70#define AMS_DELTA_MOUTHPIECE 0
71#define AMS_DELTA_EARPIECE 1
72#define AMS_DELTA_MICROPHONE 2
73#define AMS_DELTA_SPEAKER 3
74#define AMS_DELTA_AGC 4
75
76#define AMS_DELTA_MIXED ((1 << AMS_DELTA_EARPIECE) | \
77 (1 << AMS_DELTA_MICROPHONE))
78#define AMS_DELTA_HANDSET ((1 << AMS_DELTA_MOUTHPIECE) | \
79 (1 << AMS_DELTA_EARPIECE))
80#define AMS_DELTA_HANDSFREE ((1 << AMS_DELTA_MICROPHONE) | \
81 (1 << AMS_DELTA_SPEAKER))
82#define AMS_DELTA_SPEAKERPHONE (AMS_DELTA_HANDSFREE | (1 << AMS_DELTA_AGC))
83
84unsigned short ams_delta_audio_mode_pins[] = {
85 AMS_DELTA_MIXED,
86 AMS_DELTA_HANDSET,
87 AMS_DELTA_HANDSFREE,
88 AMS_DELTA_SPEAKERPHONE,
89};
90
91static unsigned short ams_delta_audio_agc;
92
93static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
94 struct snd_ctl_elem_value *ucontrol)
95{
96 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
97 struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
98 unsigned short pins;
99 int pin, changed = 0;
100
101 /* Refuse any mode changes if we are not able to control the codec. */
102 if (!codec->control_data)
103 return -EUNATCH;
104
105 if (ucontrol->value.enumerated.item[0] >= control->max)
106 return -EINVAL;
107
108 mutex_lock(&codec->mutex);
109
110 /* Translate selection to bitmap */
111 pins = ams_delta_audio_mode_pins[ucontrol->value.enumerated.item[0]];
112
113 /* Setup pins after corresponding bits if changed */
114 pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
115 if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) {
116 changed = 1;
117 if (pin)
118 snd_soc_dapm_enable_pin(codec, "Mouthpiece");
119 else
120 snd_soc_dapm_disable_pin(codec, "Mouthpiece");
121 }
122 pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
123 if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) {
124 changed = 1;
125 if (pin)
126 snd_soc_dapm_enable_pin(codec, "Earpiece");
127 else
128 snd_soc_dapm_disable_pin(codec, "Earpiece");
129 }
130 pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
131 if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) {
132 changed = 1;
133 if (pin)
134 snd_soc_dapm_enable_pin(codec, "Microphone");
135 else
136 snd_soc_dapm_disable_pin(codec, "Microphone");
137 }
138 pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
139 if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) {
140 changed = 1;
141 if (pin)
142 snd_soc_dapm_enable_pin(codec, "Speaker");
143 else
144 snd_soc_dapm_disable_pin(codec, "Speaker");
145 }
146 pin = !!(pins & (1 << AMS_DELTA_AGC));
147 if (pin != ams_delta_audio_agc) {
148 ams_delta_audio_agc = pin;
149 changed = 1;
150 if (pin)
151 snd_soc_dapm_enable_pin(codec, "AGCIN");
152 else
153 snd_soc_dapm_disable_pin(codec, "AGCIN");
154 }
155 if (changed)
156 snd_soc_dapm_sync(codec);
157
158 mutex_unlock(&codec->mutex);
159
160 return changed;
161}
162
163static int ams_delta_get_audio_mode(struct snd_kcontrol *kcontrol,
164 struct snd_ctl_elem_value *ucontrol)
165{
166 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
167 unsigned short pins, mode;
168
169 pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") <<
170 AMS_DELTA_MOUTHPIECE) |
171 (snd_soc_dapm_get_pin_status(codec, "Earpiece") <<
172 AMS_DELTA_EARPIECE));
173 if (pins)
174 pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") <<
175 AMS_DELTA_MICROPHONE);
176 else
177 pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") <<
178 AMS_DELTA_MICROPHONE) |
179 (snd_soc_dapm_get_pin_status(codec, "Speaker") <<
180 AMS_DELTA_SPEAKER) |
181 (ams_delta_audio_agc << AMS_DELTA_AGC));
182
183 for (mode = 0; mode < ARRAY_SIZE(ams_delta_audio_mode); mode++)
184 if (pins == ams_delta_audio_mode_pins[mode])
185 break;
186
187 if (mode >= ARRAY_SIZE(ams_delta_audio_mode))
188 return -EINVAL;
189
190 ucontrol->value.enumerated.item[0] = mode;
191
192 return 0;
193}
194
195static const struct soc_enum ams_delta_audio_enum[] = {
196 SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(ams_delta_audio_mode),
197 ams_delta_audio_mode),
198};
199
200static const struct snd_kcontrol_new ams_delta_audio_controls[] = {
201 SOC_ENUM_EXT("Audio Mode", ams_delta_audio_enum[0],
202 ams_delta_get_audio_mode, ams_delta_set_audio_mode),
203};
204
205/* Hook switch */
206static struct snd_soc_jack ams_delta_hook_switch;
207static struct snd_soc_jack_gpio ams_delta_hook_switch_gpios[] = {
208 {
209 .gpio = 4,
210 .name = "hook_switch",
211 .report = SND_JACK_HEADSET,
212 .invert = 1,
213 .debounce_time = 150,
214 }
215};
216
217/* After we are able to control the codec over the modem,
218 * the hook switch can be used for dynamic DAPM reconfiguration. */
219static struct snd_soc_jack_pin ams_delta_hook_switch_pins[] = {
220 /* Handset */
221 {
222 .pin = "Mouthpiece",
223 .mask = SND_JACK_MICROPHONE,
224 },
225 {
226 .pin = "Earpiece",
227 .mask = SND_JACK_HEADPHONE,
228 },
229 /* Handsfree */
230 {
231 .pin = "Microphone",
232 .mask = SND_JACK_MICROPHONE,
233 .invert = 1,
234 },
235 {
236 .pin = "Speaker",
237 .mask = SND_JACK_HEADPHONE,
238 .invert = 1,
239 },
240};
241
242
243/*
244 * Modem line discipline, required for making above controls functional.
245 * Activated from userspace with ldattach, possibly invoked from udev rule.
246 */
247
248/* To actually apply any modem controlled configuration changes to the codec,
249 * we must connect codec DAI pins to the modem for a moment. Be carefull not
250 * to interfere with our digital mute function that shares the same hardware. */
251static struct timer_list cx81801_timer;
252static bool cx81801_cmd_pending;
253static bool ams_delta_muted;
254static DEFINE_SPINLOCK(ams_delta_lock);
255
256static void cx81801_timeout(unsigned long data)
257{
258 int muted;
259
260 spin_lock(&ams_delta_lock);
261 cx81801_cmd_pending = 0;
262 muted = ams_delta_muted;
263 spin_unlock(&ams_delta_lock);
264
265 /* Reconnect the codec DAI back from the modem to the CPU DAI
266 * only if digital mute still off */
267 if (!muted)
268 ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC, 0);
269}
270
271/* Line discipline .open() */
272static int cx81801_open(struct tty_struct *tty)
273{
274 return v253_ops.open(tty);
275}
276
277/* Line discipline .close() */
278static void cx81801_close(struct tty_struct *tty)
279{
280 struct snd_soc_codec *codec = tty->disc_data;
281
282 del_timer_sync(&cx81801_timer);
283
284 v253_ops.close(tty);
285
286 /* Prevent the hook switch from further changing the DAPM pins */
287 INIT_LIST_HEAD(&ams_delta_hook_switch.pins);
288
289 /* Revert back to default audio input/output constellation */
290 snd_soc_dapm_disable_pin(codec, "Mouthpiece");
291 snd_soc_dapm_enable_pin(codec, "Earpiece");
292 snd_soc_dapm_enable_pin(codec, "Microphone");
293 snd_soc_dapm_disable_pin(codec, "Speaker");
294 snd_soc_dapm_disable_pin(codec, "AGCIN");
295 snd_soc_dapm_sync(codec);
296}
297
298/* Line discipline .hangup() */
299static int cx81801_hangup(struct tty_struct *tty)
300{
301 cx81801_close(tty);
302 return 0;
303}
304
305/* Line discipline .recieve_buf() */
306static void cx81801_receive(struct tty_struct *tty,
307 const unsigned char *cp, char *fp, int count)
308{
309 struct snd_soc_codec *codec = tty->disc_data;
310 const unsigned char *c;
311 int apply, ret;
312
313 if (!codec->control_data) {
314 /* First modem response, complete setup procedure */
315
316 /* Initialize timer used for config pulse generation */
317 setup_timer(&cx81801_timer, cx81801_timeout, 0);
318
319 v253_ops.receive_buf(tty, cp, fp, count);
320
321 /* Link hook switch to DAPM pins */
322 ret = snd_soc_jack_add_pins(&ams_delta_hook_switch,
323 ARRAY_SIZE(ams_delta_hook_switch_pins),
324 ams_delta_hook_switch_pins);
325 if (ret)
326 dev_warn(codec->socdev->card->dev,
327 "Failed to link hook switch to DAPM pins, "
328 "will continue with hook switch unlinked.\n");
329
330 return;
331 }
332
333 v253_ops.receive_buf(tty, cp, fp, count);
334
335 for (c = &cp[count - 1]; c >= cp; c--) {
336 if (*c != '\r')
337 continue;
338 /* Complete modem response received, apply config to codec */
339
340 spin_lock_bh(&ams_delta_lock);
341 mod_timer(&cx81801_timer, jiffies + msecs_to_jiffies(150));
342 apply = !ams_delta_muted && !cx81801_cmd_pending;
343 cx81801_cmd_pending = 1;
344 spin_unlock_bh(&ams_delta_lock);
345
346 /* Apply config pulse by connecting the codec to the modem
347 * if not already done */
348 if (apply)
349 ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC,
350 AMS_DELTA_LATCH2_MODEM_CODEC);
351 break;
352 }
353}
354
355/* Line discipline .write_wakeup() */
356static void cx81801_wakeup(struct tty_struct *tty)
357{
358 v253_ops.write_wakeup(tty);
359}
360
361static struct tty_ldisc_ops cx81801_ops = {
362 .magic = TTY_LDISC_MAGIC,
363 .name = "cx81801",
364 .owner = THIS_MODULE,
365 .open = cx81801_open,
366 .close = cx81801_close,
367 .hangup = cx81801_hangup,
368 .receive_buf = cx81801_receive,
369 .write_wakeup = cx81801_wakeup,
370};
371
372
373/*
374 * Even if not very usefull, the sound card can still work without any of the
375 * above functonality activated. You can still control its audio input/output
376 * constellation and speakerphone gain from userspace by issueing AT commands
377 * over the modem port.
378 */
379
380static int ams_delta_hw_params(struct snd_pcm_substream *substream,
381 struct snd_pcm_hw_params *params)
382{
383 struct snd_soc_pcm_runtime *rtd = substream->private_data;
384
385 /* Set cpu DAI configuration */
386 return snd_soc_dai_set_fmt(rtd->dai->cpu_dai,
387 SND_SOC_DAIFMT_DSP_A |
388 SND_SOC_DAIFMT_NB_NF |
389 SND_SOC_DAIFMT_CBM_CFM);
390}
391
392static struct snd_soc_ops ams_delta_ops = {
393 .hw_params = ams_delta_hw_params,
394};
395
396
397/* Board specific codec bias level control */
398static int ams_delta_set_bias_level(struct snd_soc_card *card,
399 enum snd_soc_bias_level level)
400{
401 struct snd_soc_codec *codec = card->codec;
402
403 switch (level) {
404 case SND_SOC_BIAS_ON:
405 case SND_SOC_BIAS_PREPARE:
406 case SND_SOC_BIAS_STANDBY:
407 if (codec->bias_level == SND_SOC_BIAS_OFF)
408 ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
409 AMS_DELTA_LATCH2_MODEM_NRESET);
410 break;
411 case SND_SOC_BIAS_OFF:
412 if (codec->bias_level != SND_SOC_BIAS_OFF)
413 ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
414 0);
415 }
416 codec->bias_level = level;
417
418 return 0;
419}
420
421/* Digital mute implemented using modem/CPU multiplexer.
422 * Shares hardware with codec config pulse generation */
423static bool ams_delta_muted = 1;
424
425static int ams_delta_digital_mute(struct snd_soc_dai *dai, int mute)
426{
427 int apply;
428
429 if (ams_delta_muted == mute)
430 return 0;
431
432 spin_lock_bh(&ams_delta_lock);
433 ams_delta_muted = mute;
434 apply = !cx81801_cmd_pending;
435 spin_unlock_bh(&ams_delta_lock);
436
437 if (apply)
438 ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_CODEC,
439 mute ? AMS_DELTA_LATCH2_MODEM_CODEC : 0);
440 return 0;
441}
442
443/* Our codec DAI probably doesn't have its own .ops structure */
444static struct snd_soc_dai_ops ams_delta_dai_ops = {
445 .digital_mute = ams_delta_digital_mute,
446};
447
448/* Will be used if the codec ever has its own digital_mute function */
449static int ams_delta_startup(struct snd_pcm_substream *substream)
450{
451 return ams_delta_digital_mute(NULL, 0);
452}
453
454static void ams_delta_shutdown(struct snd_pcm_substream *substream)
455{
456 ams_delta_digital_mute(NULL, 1);
457}
458
459
460/*
461 * Card initialization
462 */
463
464static int ams_delta_cx20442_init(struct snd_soc_codec *codec)
465{
466 struct snd_soc_dai *codec_dai = codec->dai;
467 struct snd_soc_card *card = codec->socdev->card;
468 int ret;
469 /* Codec is ready, now add/activate board specific controls */
470
471 /* Set up digital mute if not provided by the codec */
472 if (!codec_dai->ops) {
473 codec_dai->ops = &ams_delta_dai_ops;
474 } else if (!codec_dai->ops->digital_mute) {
475 codec_dai->ops->digital_mute = ams_delta_digital_mute;
476 } else {
477 ams_delta_ops.startup = ams_delta_startup;
478 ams_delta_ops.shutdown = ams_delta_shutdown;
479 }
480
481 /* Set codec bias level */
482 ams_delta_set_bias_level(card, SND_SOC_BIAS_STANDBY);
483
484 /* Add hook switch - can be used to control the codec from userspace
485 * even if line discipline fails */
486 ret = snd_soc_jack_new(card, "hook_switch",
487 SND_JACK_HEADSET, &ams_delta_hook_switch);
488 if (ret)
489 dev_warn(card->dev,
490 "Failed to allocate resources for hook switch, "
491 "will continue without one.\n");
492 else {
493 ret = snd_soc_jack_add_gpios(&ams_delta_hook_switch,
494 ARRAY_SIZE(ams_delta_hook_switch_gpios),
495 ams_delta_hook_switch_gpios);
496 if (ret)
497 dev_warn(card->dev,
498 "Failed to set up hook switch GPIO line, "
499 "will continue with hook switch inactive.\n");
500 }
501
502 /* Register optional line discipline for over the modem control */
503 ret = tty_register_ldisc(N_V253, &cx81801_ops);
504 if (ret) {
505 dev_warn(card->dev,
506 "Failed to register line discipline, "
507 "will continue without any controls.\n");
508 return 0;
509 }
510
511 /* Add board specific DAPM widgets and routes */
512 ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets,
513 ARRAY_SIZE(ams_delta_dapm_widgets));
514 if (ret) {
515 dev_warn(card->dev,
516 "Failed to register DAPM controls, "
517 "will continue without any.\n");
518 return 0;
519 }
520
521 ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map,
522 ARRAY_SIZE(ams_delta_audio_map));
523 if (ret) {
524 dev_warn(card->dev,
525 "Failed to set up DAPM routes, "
526 "will continue with codec default map.\n");
527 return 0;
528 }
529
530 /* Set up initial pin constellation */
531 snd_soc_dapm_disable_pin(codec, "Mouthpiece");
532 snd_soc_dapm_enable_pin(codec, "Earpiece");
533 snd_soc_dapm_enable_pin(codec, "Microphone");
534 snd_soc_dapm_disable_pin(codec, "Speaker");
535 snd_soc_dapm_disable_pin(codec, "AGCIN");
536 snd_soc_dapm_disable_pin(codec, "AGCOUT");
537 snd_soc_dapm_sync(codec);
538
539 /* Add virtual switch */
540 ret = snd_soc_add_controls(codec, ams_delta_audio_controls,
541 ARRAY_SIZE(ams_delta_audio_controls));
542 if (ret)
543 dev_warn(card->dev,
544 "Failed to register audio mode control, "
545 "will continue without it.\n");
546
547 return 0;
548}
549
550/* DAI glue - connects codec <--> CPU */
551static struct snd_soc_dai_link ams_delta_dai_link = {
552 .name = "CX20442",
553 .stream_name = "CX20442",
554 .cpu_dai = &omap_mcbsp_dai[0],
555 .codec_dai = &cx20442_dai,
556 .init = ams_delta_cx20442_init,
557 .ops = &ams_delta_ops,
558};
559
560/* Audio card driver */
561static struct snd_soc_card ams_delta_audio_card = {
562 .name = "AMS_DELTA",
563 .platform = &omap_soc_platform,
564 .dai_link = &ams_delta_dai_link,
565 .num_links = 1,
566 .set_bias_level = ams_delta_set_bias_level,
567};
568
569/* Audio subsystem */
570static struct snd_soc_device ams_delta_snd_soc_device = {
571 .card = &ams_delta_audio_card,
572 .codec_dev = &cx20442_codec_dev,
573};
574
575/* Module init/exit */
576static struct platform_device *ams_delta_audio_platform_device;
577static struct platform_device *cx20442_platform_device;
578
579static int __init ams_delta_module_init(void)
580{
581 int ret;
582
583 if (!(machine_is_ams_delta()))
584 return -ENODEV;
585
586 ams_delta_audio_platform_device =
587 platform_device_alloc("soc-audio", -1);
588 if (!ams_delta_audio_platform_device)
589 return -ENOMEM;
590
591 platform_set_drvdata(ams_delta_audio_platform_device,
592 &ams_delta_snd_soc_device);
593 ams_delta_snd_soc_device.dev = &ams_delta_audio_platform_device->dev;
594 *(unsigned int *)ams_delta_dai_link.cpu_dai->private_data = OMAP_MCBSP1;
595
596 ret = platform_device_add(ams_delta_audio_platform_device);
597 if (ret)
598 goto err;
599
600 /*
601 * Codec platform device could be registered from elsewhere (board?),
602 * but I do it here as it makes sense only if used with the card.
603 */
604 cx20442_platform_device = platform_device_register_simple("cx20442",
605 -1, NULL, 0);
606 return 0;
607err:
608 platform_device_put(ams_delta_audio_platform_device);
609 return ret;
610}
611module_init(ams_delta_module_init);
612
613static void __exit ams_delta_module_exit(void)
614{
615 struct snd_soc_codec *codec;
616 struct tty_struct *tty;
617
618 if (ams_delta_audio_card.codec) {
619 codec = ams_delta_audio_card.codec;
620
621 if (codec->control_data) {
622 tty = codec->control_data;
623
624 tty_hangup(tty);
625 }
626 }
627
628 if (tty_unregister_ldisc(N_V253) != 0)
629 dev_warn(&ams_delta_audio_platform_device->dev,
630 "failed to unregister V253 line discipline\n");
631
632 snd_soc_jack_free_gpios(&ams_delta_hook_switch,
633 ARRAY_SIZE(ams_delta_hook_switch_gpios),
634 ams_delta_hook_switch_gpios);
635
636 /* Keep modem power on */
637 ams_delta_set_bias_level(&ams_delta_audio_card, SND_SOC_BIAS_STANDBY);
638
639 platform_device_unregister(cx20442_platform_device);
640 platform_device_unregister(ams_delta_audio_platform_device);
641}
642module_exit(ams_delta_module_exit);
643
644MODULE_AUTHOR("Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>");
645MODULE_DESCRIPTION("ALSA SoC driver for Amstrad E3 (Delta) videophone");
646MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index b60b1dfbc435..0a505938e42b 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/i2c.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <sound/core.h> 27#include <sound/core.h>
27#include <sound/pcm.h> 28#include <sound/pcm.h>
@@ -322,8 +323,6 @@ static struct snd_soc_card snd_soc_n810 = {
322 323
323/* Audio private data */ 324/* Audio private data */
324static struct aic3x_setup_data n810_aic33_setup = { 325static struct aic3x_setup_data n810_aic33_setup = {
325 .i2c_bus = 2,
326 .i2c_address = 0x18,
327 .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED, 326 .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
328 .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT, 327 .gpio_func[1] = AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT,
329}; 328};
@@ -337,6 +336,13 @@ static struct snd_soc_device n810_snd_devdata = {
337 336
338static struct platform_device *n810_snd_device; 337static struct platform_device *n810_snd_device;
339 338
339/* temporary i2c device creation until this can be moved into the machine
340 * support file.
341*/
342static struct i2c_board_info i2c_device[] = {
343 { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }
344};
345
340static int __init n810_soc_init(void) 346static int __init n810_soc_init(void)
341{ 347{
342 int err; 348 int err;
@@ -345,6 +351,8 @@ static int __init n810_soc_init(void)
345 if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) 351 if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax()))
346 return -ENODEV; 352 return -ENODEV;
347 353
354 i2c_register_board_info(1, i2c_device, ARRAY_SIZE(i2c_device));
355
348 n810_snd_device = platform_device_alloc("soc-audio", -1); 356 n810_snd_device = platform_device_alloc("soc-audio", -1);
349 if (!n810_snd_device) 357 if (!n810_snd_device)
350 return -ENOMEM; 358 return -ENOMEM;
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index a5d46a7b196a..3341f49402ca 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -139,27 +139,67 @@ static const unsigned long omap34xx_mcbsp_port[][2] = {
139static const unsigned long omap34xx_mcbsp_port[][2] = {}; 139static const unsigned long omap34xx_mcbsp_port[][2] = {};
140#endif 140#endif
141 141
142static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
143{
144 struct snd_soc_pcm_runtime *rtd = substream->private_data;
145 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
146 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
147 int dma_op_mode = omap_mcbsp_get_dma_op_mode(mcbsp_data->bus_id);
148 int samples;
149
150 /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
151 if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
152 samples = snd_pcm_lib_period_bytes(substream) >> 1;
153 else
154 samples = 1;
155
156 /* Configure McBSP internal buffer usage */
157 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
158 omap_mcbsp_set_tx_threshold(mcbsp_data->bus_id, samples - 1);
159 else
160 omap_mcbsp_set_rx_threshold(mcbsp_data->bus_id, samples - 1);
161}
162
142static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream, 163static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
143 struct snd_soc_dai *dai) 164 struct snd_soc_dai *dai)
144{ 165{
145 struct snd_soc_pcm_runtime *rtd = substream->private_data; 166 struct snd_soc_pcm_runtime *rtd = substream->private_data;
146 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 167 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
147 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); 168 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
169 int bus_id = mcbsp_data->bus_id;
148 int err = 0; 170 int err = 0;
149 171
150 if (cpu_is_omap343x() && mcbsp_data->bus_id == 1) { 172 if (!cpu_dai->active)
173 err = omap_mcbsp_request(bus_id);
174
175 if (cpu_is_omap343x()) {
176 int dma_op_mode = omap_mcbsp_get_dma_op_mode(bus_id);
177 int max_period;
178
151 /* 179 /*
152 * McBSP2 in OMAP3 has 1024 * 32-bit internal audio buffer. 180 * McBSP2 in OMAP3 has 1024 * 32-bit internal audio buffer.
153 * Set constraint for minimum buffer size to the same than FIFO 181 * Set constraint for minimum buffer size to the same than FIFO
154 * size in order to avoid underruns in playback startup because 182 * size in order to avoid underruns in playback startup because
155 * HW is keeping the DMA request active until FIFO is filled. 183 * HW is keeping the DMA request active until FIFO is filled.
156 */ 184 */
157 snd_pcm_hw_constraint_minmax(substream->runtime, 185 if (bus_id == 1)
158 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 4096, UINT_MAX); 186 snd_pcm_hw_constraint_minmax(substream->runtime,
159 } 187 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
188 4096, UINT_MAX);
160 189
161 if (!cpu_dai->active) 190 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
162 err = omap_mcbsp_request(mcbsp_data->bus_id); 191 max_period = omap_mcbsp_get_max_tx_threshold(bus_id);
192 else
193 max_period = omap_mcbsp_get_max_rx_threshold(bus_id);
194
195 max_period++;
196 max_period <<= 1;
197
198 if (dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
199 snd_pcm_hw_constraint_minmax(substream->runtime,
200 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
201 32, max_period);
202 }
163 203
164 return err; 204 return err;
165} 205}
@@ -183,21 +223,21 @@ static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
183 struct snd_soc_pcm_runtime *rtd = substream->private_data; 223 struct snd_soc_pcm_runtime *rtd = substream->private_data;
184 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 224 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
185 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); 225 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
186 int err = 0; 226 int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
187 227
188 switch (cmd) { 228 switch (cmd) {
189 case SNDRV_PCM_TRIGGER_START: 229 case SNDRV_PCM_TRIGGER_START:
190 case SNDRV_PCM_TRIGGER_RESUME: 230 case SNDRV_PCM_TRIGGER_RESUME:
191 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 231 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
192 if (!mcbsp_data->active++) 232 mcbsp_data->active++;
193 omap_mcbsp_start(mcbsp_data->bus_id); 233 omap_mcbsp_start(mcbsp_data->bus_id, play, !play);
194 break; 234 break;
195 235
196 case SNDRV_PCM_TRIGGER_STOP: 236 case SNDRV_PCM_TRIGGER_STOP:
197 case SNDRV_PCM_TRIGGER_SUSPEND: 237 case SNDRV_PCM_TRIGGER_SUSPEND:
198 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 238 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
199 if (!--mcbsp_data->active) 239 omap_mcbsp_stop(mcbsp_data->bus_id, play, !play);
200 omap_mcbsp_stop(mcbsp_data->bus_id); 240 mcbsp_data->active--;
201 break; 241 break;
202 default: 242 default:
203 err = -EINVAL; 243 err = -EINVAL;
@@ -215,7 +255,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
215 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data); 255 struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
216 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; 256 struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
217 int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id; 257 int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
218 int wlen, channels, wpf; 258 int wlen, channels, wpf, sync_mode = OMAP_DMA_SYNC_ELEMENT;
219 unsigned long port; 259 unsigned long port;
220 unsigned int format; 260 unsigned int format;
221 261
@@ -231,6 +271,12 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
231 } else if (cpu_is_omap343x()) { 271 } else if (cpu_is_omap343x()) {
232 dma = omap24xx_dma_reqs[bus_id][substream->stream]; 272 dma = omap24xx_dma_reqs[bus_id][substream->stream];
233 port = omap34xx_mcbsp_port[bus_id][substream->stream]; 273 port = omap34xx_mcbsp_port[bus_id][substream->stream];
274 omap_mcbsp_dai_dma_params[id][substream->stream].set_threshold =
275 omap_mcbsp_set_threshold;
276 /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
277 if (omap_mcbsp_get_dma_op_mode(bus_id) ==
278 MCBSP_DMA_MODE_THRESHOLD)
279 sync_mode = OMAP_DMA_SYNC_FRAME;
234 } else { 280 } else {
235 return -ENODEV; 281 return -ENODEV;
236 } 282 }
@@ -238,6 +284,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
238 substream->stream ? "Audio Capture" : "Audio Playback"; 284 substream->stream ? "Audio Capture" : "Audio Playback";
239 omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma; 285 omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma;
240 omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port; 286 omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port;
287 omap_mcbsp_dai_dma_params[id][substream->stream].sync_mode = sync_mode;
241 cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream]; 288 cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream];
242 289
243 if (mcbsp_data->configured) { 290 if (mcbsp_data->configured) {
@@ -321,11 +368,14 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
321 /* Generic McBSP register settings */ 368 /* Generic McBSP register settings */
322 regs->spcr2 |= XINTM(3) | FREE; 369 regs->spcr2 |= XINTM(3) | FREE;
323 regs->spcr1 |= RINTM(3); 370 regs->spcr1 |= RINTM(3);
324 regs->rcr2 |= RFIG; 371 /* RFIG and XFIG are not defined in 34xx */
325 regs->xcr2 |= XFIG; 372 if (!cpu_is_omap34xx()) {
373 regs->rcr2 |= RFIG;
374 regs->xcr2 |= XFIG;
375 }
326 if (cpu_is_omap2430() || cpu_is_omap34xx()) { 376 if (cpu_is_omap2430() || cpu_is_omap34xx()) {
327 regs->xccr = DXENDLY(1) | XDMAEN; 377 regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
328 regs->rccr = RFULL_CYCLE | RDMAEN; 378 regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
329 } 379 }
330 380
331 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 381 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -462,6 +512,40 @@ static int omap_mcbsp_dai_set_clks_src(struct omap_mcbsp_data *mcbsp_data,
462 return 0; 512 return 0;
463} 513}
464 514
515static int omap_mcbsp_dai_set_rcvr_src(struct omap_mcbsp_data *mcbsp_data,
516 int clk_id)
517{
518 int sel_bit, set = 0;
519 u16 reg = OMAP2_CONTROL_DEVCONF0;
520
521 if (cpu_class_is_omap1())
522 return -EINVAL; /* TODO: Can this be implemented for OMAP1? */
523 if (mcbsp_data->bus_id != 0)
524 return -EINVAL;
525
526 switch (clk_id) {
527 case OMAP_MCBSP_CLKR_SRC_CLKX:
528 set = 1;
529 case OMAP_MCBSP_CLKR_SRC_CLKR:
530 sel_bit = 3;
531 break;
532 case OMAP_MCBSP_FSR_SRC_FSX:
533 set = 1;
534 case OMAP_MCBSP_FSR_SRC_FSR:
535 sel_bit = 4;
536 break;
537 default:
538 return -EINVAL;
539 }
540
541 if (set)
542 omap_ctrl_writel(omap_ctrl_readl(reg) | (1 << sel_bit), reg);
543 else
544 omap_ctrl_writel(omap_ctrl_readl(reg) & ~(1 << sel_bit), reg);
545
546 return 0;
547}
548
465static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, 549static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
466 int clk_id, unsigned int freq, 550 int clk_id, unsigned int freq,
467 int dir) 551 int dir)
@@ -484,6 +568,13 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
484 case OMAP_MCBSP_SYSCLK_CLKR_EXT: 568 case OMAP_MCBSP_SYSCLK_CLKR_EXT:
485 regs->pcr0 |= SCLKME; 569 regs->pcr0 |= SCLKME;
486 break; 570 break;
571
572 case OMAP_MCBSP_CLKR_SRC_CLKR:
573 case OMAP_MCBSP_CLKR_SRC_CLKX:
574 case OMAP_MCBSP_FSR_SRC_FSR:
575 case OMAP_MCBSP_FSR_SRC_FSX:
576 err = omap_mcbsp_dai_set_rcvr_src(mcbsp_data, clk_id);
577 break;
487 default: 578 default:
488 err = -ENODEV; 579 err = -ENODEV;
489 } 580 }
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index c8147aace813..647d2f981ab0 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -32,6 +32,10 @@ enum omap_mcbsp_clksrg_clk {
32 OMAP_MCBSP_SYSCLK_CLK, /* Internal ICLK */ 32 OMAP_MCBSP_SYSCLK_CLK, /* Internal ICLK */
33 OMAP_MCBSP_SYSCLK_CLKX_EXT, /* External CLKX pin */ 33 OMAP_MCBSP_SYSCLK_CLKX_EXT, /* External CLKX pin */
34 OMAP_MCBSP_SYSCLK_CLKR_EXT, /* External CLKR pin */ 34 OMAP_MCBSP_SYSCLK_CLKR_EXT, /* External CLKR pin */
35 OMAP_MCBSP_CLKR_SRC_CLKR, /* CLKR from CLKR pin */
36 OMAP_MCBSP_CLKR_SRC_CLKX, /* CLKR from CLKX pin */
37 OMAP_MCBSP_FSR_SRC_FSR, /* FSR from FSR pin */
38 OMAP_MCBSP_FSR_SRC_FSX, /* FSR from FSX pin */
35}; 39};
36 40
37/* McBSP dividers */ 41/* McBSP dividers */
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 84a1950880eb..5735945788bf 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -59,16 +59,31 @@ static void omap_pcm_dma_irq(int ch, u16 stat, void *data)
59 struct omap_runtime_data *prtd = runtime->private_data; 59 struct omap_runtime_data *prtd = runtime->private_data;
60 unsigned long flags; 60 unsigned long flags;
61 61
62 if (cpu_is_omap1510()) { 62 if ((cpu_is_omap1510()) &&
63 (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)) {
63 /* 64 /*
64 * OMAP1510 doesn't support DMA chaining so have to restart 65 * OMAP1510 doesn't fully support DMA progress counter
65 * the transfer after all periods are transferred 66 * and there is no software emulation implemented yet,
67 * so have to maintain our own playback progress counter
68 * that can be used by omap_pcm_pointer() instead.
66 */ 69 */
67 spin_lock_irqsave(&prtd->lock, flags); 70 spin_lock_irqsave(&prtd->lock, flags);
71 if ((stat == OMAP_DMA_LAST_IRQ) &&
72 (prtd->period_index == runtime->periods - 1)) {
73 /* we are in sync, do nothing */
74 spin_unlock_irqrestore(&prtd->lock, flags);
75 return;
76 }
68 if (prtd->period_index >= 0) { 77 if (prtd->period_index >= 0) {
69 if (++prtd->period_index == runtime->periods) { 78 if (stat & OMAP_DMA_BLOCK_IRQ) {
79 /* end of buffer reached, loop back */
80 prtd->period_index = 0;
81 } else if (stat & OMAP_DMA_LAST_IRQ) {
82 /* update the counter for the last period */
83 prtd->period_index = runtime->periods - 1;
84 } else if (++prtd->period_index >= runtime->periods) {
85 /* end of buffer missed? loop back */
70 prtd->period_index = 0; 86 prtd->period_index = 0;
71 omap_start_dma(prtd->dma_ch);
72 } 87 }
73 } 88 }
74 spin_unlock_irqrestore(&prtd->lock, flags); 89 spin_unlock_irqrestore(&prtd->lock, flags);
@@ -100,7 +115,7 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
100 prtd->dma_data = dma_data; 115 prtd->dma_data = dma_data;
101 err = omap_request_dma(dma_data->dma_req, dma_data->name, 116 err = omap_request_dma(dma_data->dma_req, dma_data->name,
102 omap_pcm_dma_irq, substream, &prtd->dma_ch); 117 omap_pcm_dma_irq, substream, &prtd->dma_ch);
103 if (!err && !cpu_is_omap1510()) { 118 if (!err) {
104 /* 119 /*
105 * Link channel with itself so DMA doesn't need any 120 * Link channel with itself so DMA doesn't need any
106 * reprogramming while looping the buffer 121 * reprogramming while looping the buffer
@@ -119,8 +134,7 @@ static int omap_pcm_hw_free(struct snd_pcm_substream *substream)
119 if (prtd->dma_data == NULL) 134 if (prtd->dma_data == NULL)
120 return 0; 135 return 0;
121 136
122 if (!cpu_is_omap1510()) 137 omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch);
123 omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch);
124 omap_free_dma(prtd->dma_ch); 138 omap_free_dma(prtd->dma_ch);
125 prtd->dma_data = NULL; 139 prtd->dma_data = NULL;
126 140
@@ -148,7 +162,7 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream)
148 */ 162 */
149 dma_params.data_type = OMAP_DMA_DATA_TYPE_S16; 163 dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
150 dma_params.trigger = dma_data->dma_req; 164 dma_params.trigger = dma_data->dma_req;
151 dma_params.sync_mode = OMAP_DMA_SYNC_ELEMENT; 165 dma_params.sync_mode = dma_data->sync_mode;
152 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 166 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
153 dma_params.src_amode = OMAP_DMA_AMODE_POST_INC; 167 dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
154 dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT; 168 dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
@@ -174,7 +188,15 @@ static int omap_pcm_prepare(struct snd_pcm_substream *substream)
174 dma_params.frame_count = runtime->periods; 188 dma_params.frame_count = runtime->periods;
175 omap_set_dma_params(prtd->dma_ch, &dma_params); 189 omap_set_dma_params(prtd->dma_ch, &dma_params);
176 190
177 omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ); 191 if ((cpu_is_omap1510()) &&
192 (substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
193 omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ |
194 OMAP_DMA_LAST_IRQ | OMAP_DMA_BLOCK_IRQ);
195 else
196 omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);
197
198 omap_set_dma_src_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);
199 omap_set_dma_dest_burst_mode(prtd->dma_ch, OMAP_DMA_DATA_BURST_16);
178 200
179 return 0; 201 return 0;
180} 202}
@@ -183,6 +205,7 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
183{ 205{
184 struct snd_pcm_runtime *runtime = substream->runtime; 206 struct snd_pcm_runtime *runtime = substream->runtime;
185 struct omap_runtime_data *prtd = runtime->private_data; 207 struct omap_runtime_data *prtd = runtime->private_data;
208 struct omap_pcm_dma_data *dma_data = prtd->dma_data;
186 unsigned long flags; 209 unsigned long flags;
187 int ret = 0; 210 int ret = 0;
188 211
@@ -192,6 +215,10 @@ static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
192 case SNDRV_PCM_TRIGGER_RESUME: 215 case SNDRV_PCM_TRIGGER_RESUME:
193 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 216 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
194 prtd->period_index = 0; 217 prtd->period_index = 0;
218 /* Configure McBSP internal buffer usage */
219 if (dma_data->set_threshold)
220 dma_data->set_threshold(substream);
221
195 omap_start_dma(prtd->dma_ch); 222 omap_start_dma(prtd->dma_ch);
196 break; 223 break;
197 224
@@ -288,7 +315,7 @@ static struct snd_pcm_ops omap_pcm_ops = {
288 .mmap = omap_pcm_mmap, 315 .mmap = omap_pcm_mmap,
289}; 316};
290 317
291static u64 omap_pcm_dmamask = DMA_BIT_MASK(32); 318static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
292 319
293static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, 320static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
294 int stream) 321 int stream)
@@ -330,7 +357,7 @@ static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm)
330 } 357 }
331} 358}
332 359
333int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, 360static int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
334 struct snd_pcm *pcm) 361 struct snd_pcm *pcm)
335{ 362{
336 int ret = 0; 363 int ret = 0;
@@ -338,7 +365,7 @@ int omap_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
338 if (!card->dev->dma_mask) 365 if (!card->dev->dma_mask)
339 card->dev->dma_mask = &omap_pcm_dmamask; 366 card->dev->dma_mask = &omap_pcm_dmamask;
340 if (!card->dev->coherent_dma_mask) 367 if (!card->dev->coherent_dma_mask)
341 card->dev->coherent_dma_mask = DMA_BIT_MASK(32); 368 card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
342 369
343 if (dai->playback.channels_min) { 370 if (dai->playback.channels_min) {
344 ret = omap_pcm_preallocate_dma_buffer(pcm, 371 ret = omap_pcm_preallocate_dma_buffer(pcm,
diff --git a/sound/soc/omap/omap-pcm.h b/sound/soc/omap/omap-pcm.h
index 8d9d26916b05..38a821dd4118 100644
--- a/sound/soc/omap/omap-pcm.h
+++ b/sound/soc/omap/omap-pcm.h
@@ -29,6 +29,8 @@ struct omap_pcm_dma_data {
29 char *name; /* stream identifier */ 29 char *name; /* stream identifier */
30 int dma_req; /* DMA request line */ 30 int dma_req; /* DMA request line */
31 unsigned long port_addr; /* transmit/receive register */ 31 unsigned long port_addr; /* transmit/receive register */
32 int sync_mode; /* DMA sync mode */
33 void (*set_threshold)(struct snd_pcm_substream *substream);
32}; 34};
33 35
34extern struct snd_soc_platform omap_soc_platform; 36extern struct snd_soc_platform omap_soc_platform;
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index b719e5db4f57..4a3f62d1f295 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/i2c/twl4030.h>
27#include <sound/core.h> 28#include <sound/core.h>
28#include <sound/pcm.h> 29#include <sound/pcm.h>
29#include <sound/soc.h> 30#include <sound/soc.h>
@@ -39,6 +40,11 @@
39#include "omap-pcm.h" 40#include "omap-pcm.h"
40#include "../codecs/twl4030.h" 41#include "../codecs/twl4030.h"
41 42
43/* TWL4030 PMBR1 Register */
44#define TWL4030_INTBR_PMBR1 0x0D
45/* TWL4030 PMBR1 Register GPIO6 mux bit */
46#define TWL4030_GPIO6_PWM0_MUTE(value) (value << 2)
47
42static struct snd_soc_card snd_soc_sdp3430; 48static struct snd_soc_card snd_soc_sdp3430;
43 49
44static int sdp3430_hw_params(struct snd_pcm_substream *substream, 50static int sdp3430_hw_params(struct snd_pcm_substream *substream,
@@ -96,7 +102,7 @@ static int sdp3430_hw_voice_params(struct snd_pcm_substream *substream,
96 ret = snd_soc_dai_set_fmt(codec_dai, 102 ret = snd_soc_dai_set_fmt(codec_dai,
97 SND_SOC_DAIFMT_DSP_A | 103 SND_SOC_DAIFMT_DSP_A |
98 SND_SOC_DAIFMT_IB_NF | 104 SND_SOC_DAIFMT_IB_NF |
99 SND_SOC_DAIFMT_CBS_CFM); 105 SND_SOC_DAIFMT_CBM_CFM);
100 if (ret) { 106 if (ret) {
101 printk(KERN_ERR "can't set codec DAI configuration\n"); 107 printk(KERN_ERR "can't set codec DAI configuration\n");
102 return ret; 108 return ret;
@@ -280,6 +286,7 @@ static struct snd_soc_card snd_soc_sdp3430 = {
280static struct twl4030_setup_data twl4030_setup = { 286static struct twl4030_setup_data twl4030_setup = {
281 .ramp_delay_value = 3, 287 .ramp_delay_value = 3,
282 .sysclk = 26000, 288 .sysclk = 26000,
289 .hs_extmute = 1,
283}; 290};
284 291
285/* Audio subsystem */ 292/* Audio subsystem */
@@ -294,6 +301,7 @@ static struct platform_device *sdp3430_snd_device;
294static int __init sdp3430_soc_init(void) 301static int __init sdp3430_soc_init(void)
295{ 302{
296 int ret; 303 int ret;
304 u8 pin_mux;
297 305
298 if (!machine_is_omap_3430sdp()) { 306 if (!machine_is_omap_3430sdp()) {
299 pr_debug("Not SDP3430!\n"); 307 pr_debug("Not SDP3430!\n");
@@ -312,6 +320,14 @@ static int __init sdp3430_soc_init(void)
312 *(unsigned int *)sdp3430_dai[0].cpu_dai->private_data = 1; /* McBSP2 */ 320 *(unsigned int *)sdp3430_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
313 *(unsigned int *)sdp3430_dai[1].cpu_dai->private_data = 2; /* McBSP3 */ 321 *(unsigned int *)sdp3430_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
314 322
323 /* Set TWL4030 GPIO6 as EXTMUTE signal */
324 twl4030_i2c_read_u8(TWL4030_MODULE_INTBR, &pin_mux,
325 TWL4030_INTBR_PMBR1);
326 pin_mux &= ~TWL4030_GPIO6_PWM0_MUTE(0x03);
327 pin_mux |= TWL4030_GPIO6_PWM0_MUTE(0x02);
328 twl4030_i2c_write_u8(TWL4030_MODULE_INTBR, pin_mux,
329 TWL4030_INTBR_PMBR1);
330
315 ret = platform_device_add(sdp3430_snd_device); 331 ret = platform_device_add(sdp3430_snd_device);
316 if (ret) 332 if (ret)
317 goto err1; 333 goto err1;
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
new file mode 100644
index 000000000000..f90b45f56220
--- /dev/null
+++ b/sound/soc/omap/zoom2.c
@@ -0,0 +1,314 @@
1/*
2 * zoom2.c -- SoC audio for Zoom2
3 *
4 * Author: Misael Lopez Cruz <x0052729@ti.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/clk.h>
23#include <linux/platform_device.h>
24#include <sound/core.h>
25#include <sound/pcm.h>
26#include <sound/soc.h>
27#include <sound/soc-dapm.h>
28
29#include <asm/mach-types.h>
30#include <mach/hardware.h>
31#include <mach/gpio.h>
32#include <mach/mcbsp.h>
33
34#include "omap-mcbsp.h"
35#include "omap-pcm.h"
36#include "../codecs/twl4030.h"
37
38#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
39#define ZOOM2_HEADSET_EXTMUTE_GPIO 153
40
41static int zoom2_hw_params(struct snd_pcm_substream *substream,
42 struct snd_pcm_hw_params *params)
43{
44 struct snd_soc_pcm_runtime *rtd = substream->private_data;
45 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
46 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
47 int ret;
48
49 /* Set codec DAI configuration */
50 ret = snd_soc_dai_set_fmt(codec_dai,
51 SND_SOC_DAIFMT_I2S |
52 SND_SOC_DAIFMT_NB_NF |
53 SND_SOC_DAIFMT_CBM_CFM);
54 if (ret < 0) {
55 printk(KERN_ERR "can't set codec DAI configuration\n");
56 return ret;
57 }
58
59 /* Set cpu DAI configuration */
60 ret = snd_soc_dai_set_fmt(cpu_dai,
61 SND_SOC_DAIFMT_I2S |
62 SND_SOC_DAIFMT_NB_NF |
63 SND_SOC_DAIFMT_CBM_CFM);
64 if (ret < 0) {
65 printk(KERN_ERR "can't set cpu DAI configuration\n");
66 return ret;
67 }
68
69 /* Set the codec system clock for DAC and ADC */
70 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
71 SND_SOC_CLOCK_IN);
72 if (ret < 0) {
73 printk(KERN_ERR "can't set codec system clock\n");
74 return ret;
75 }
76
77 return 0;
78}
79
80static struct snd_soc_ops zoom2_ops = {
81 .hw_params = zoom2_hw_params,
82};
83
84static int zoom2_hw_voice_params(struct snd_pcm_substream *substream,
85 struct snd_pcm_hw_params *params)
86{
87 struct snd_soc_pcm_runtime *rtd = substream->private_data;
88 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
89 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
90 int ret;
91
92 /* Set codec DAI configuration */
93 ret = snd_soc_dai_set_fmt(codec_dai,
94 SND_SOC_DAIFMT_DSP_A |
95 SND_SOC_DAIFMT_IB_NF |
96 SND_SOC_DAIFMT_CBM_CFM);
97 if (ret) {
98 printk(KERN_ERR "can't set codec DAI configuration\n");
99 return ret;
100 }
101
102 /* Set cpu DAI configuration */
103 ret = snd_soc_dai_set_fmt(cpu_dai,
104 SND_SOC_DAIFMT_DSP_A |
105 SND_SOC_DAIFMT_IB_NF |
106 SND_SOC_DAIFMT_CBM_CFM);
107 if (ret < 0) {
108 printk(KERN_ERR "can't set cpu DAI configuration\n");
109 return ret;
110 }
111
112 /* Set the codec system clock for DAC and ADC */
113 ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
114 SND_SOC_CLOCK_IN);
115 if (ret < 0) {
116 printk(KERN_ERR "can't set codec system clock\n");
117 return ret;
118 }
119
120 return 0;
121}
122
123static struct snd_soc_ops zoom2_voice_ops = {
124 .hw_params = zoom2_hw_voice_params,
125};
126
127/* Zoom2 machine DAPM */
128static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = {
129 SND_SOC_DAPM_MIC("Ext Mic", NULL),
130 SND_SOC_DAPM_SPK("Ext Spk", NULL),
131 SND_SOC_DAPM_MIC("Headset Mic", NULL),
132 SND_SOC_DAPM_HP("Headset Stereophone", NULL),
133 SND_SOC_DAPM_LINE("Aux In", NULL),
134};
135
136static const struct snd_soc_dapm_route audio_map[] = {
137 /* External Mics: MAINMIC, SUBMIC with bias*/
138 {"MAINMIC", NULL, "Mic Bias 1"},
139 {"SUBMIC", NULL, "Mic Bias 2"},
140 {"Mic Bias 1", NULL, "Ext Mic"},
141 {"Mic Bias 2", NULL, "Ext Mic"},
142
143 /* External Speakers: HFL, HFR */
144 {"Ext Spk", NULL, "HFL"},
145 {"Ext Spk", NULL, "HFR"},
146
147 /* Headset Stereophone: HSOL, HSOR */
148 {"Headset Stereophone", NULL, "HSOL"},
149 {"Headset Stereophone", NULL, "HSOR"},
150
151 /* Headset Mic: HSMIC with bias */
152 {"HSMIC", NULL, "Headset Mic Bias"},
153 {"Headset Mic Bias", NULL, "Headset Mic"},
154
155 /* Aux In: AUXL, AUXR */
156 {"Aux In", NULL, "AUXL"},
157 {"Aux In", NULL, "AUXR"},
158};
159
160static int zoom2_twl4030_init(struct snd_soc_codec *codec)
161{
162 int ret;
163
164 /* Add Zoom2 specific widgets */
165 ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
166 ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
167 if (ret)
168 return ret;
169
170 /* Set up Zoom2 specific audio path audio_map */
171 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
172
173 /* Zoom2 connected pins */
174 snd_soc_dapm_enable_pin(codec, "Ext Mic");
175 snd_soc_dapm_enable_pin(codec, "Ext Spk");
176 snd_soc_dapm_enable_pin(codec, "Headset Mic");
177 snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
178 snd_soc_dapm_enable_pin(codec, "Aux In");
179
180 /* TWL4030 not connected pins */
181 snd_soc_dapm_nc_pin(codec, "CARKITMIC");
182 snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
183 snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
184
185 snd_soc_dapm_nc_pin(codec, "OUTL");
186 snd_soc_dapm_nc_pin(codec, "OUTR");
187 snd_soc_dapm_nc_pin(codec, "EARPIECE");
188 snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
189 snd_soc_dapm_nc_pin(codec, "PREDRIVER");
190 snd_soc_dapm_nc_pin(codec, "CARKITL");
191 snd_soc_dapm_nc_pin(codec, "CARKITR");
192
193 ret = snd_soc_dapm_sync(codec);
194
195 return ret;
196}
197
198static int zoom2_twl4030_voice_init(struct snd_soc_codec *codec)
199{
200 unsigned short reg;
201
202 /* Enable voice interface */
203 reg = codec->read(codec, TWL4030_REG_VOICE_IF);
204 reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
205 codec->write(codec, TWL4030_REG_VOICE_IF, reg);
206
207 return 0;
208}
209
210/* Digital audio interface glue - connects codec <--> CPU */
211static struct snd_soc_dai_link zoom2_dai[] = {
212 {
213 .name = "TWL4030 I2S",
214 .stream_name = "TWL4030 Audio",
215 .cpu_dai = &omap_mcbsp_dai[0],
216 .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI],
217 .init = zoom2_twl4030_init,
218 .ops = &zoom2_ops,
219 },
220 {
221 .name = "TWL4030 PCM",
222 .stream_name = "TWL4030 Voice",
223 .cpu_dai = &omap_mcbsp_dai[1],
224 .codec_dai = &twl4030_dai[TWL4030_DAI_VOICE],
225 .init = zoom2_twl4030_voice_init,
226 .ops = &zoom2_voice_ops,
227 },
228};
229
230/* Audio machine driver */
231static struct snd_soc_card snd_soc_zoom2 = {
232 .name = "Zoom2",
233 .platform = &omap_soc_platform,
234 .dai_link = zoom2_dai,
235 .num_links = ARRAY_SIZE(zoom2_dai),
236};
237
238/* EXTMUTE callback function */
239void zoom2_set_hs_extmute(int mute)
240{
241 gpio_set_value(ZOOM2_HEADSET_EXTMUTE_GPIO, mute);
242}
243
244/* twl4030 setup */
245static struct twl4030_setup_data twl4030_setup = {
246 .ramp_delay_value = 3, /* 161 ms */
247 .sysclk = 26000,
248 .hs_extmute = 1,
249 .set_hs_extmute = zoom2_set_hs_extmute,
250};
251
252/* Audio subsystem */
253static struct snd_soc_device zoom2_snd_devdata = {
254 .card = &snd_soc_zoom2,
255 .codec_dev = &soc_codec_dev_twl4030,
256 .codec_data = &twl4030_setup,
257};
258
259static struct platform_device *zoom2_snd_device;
260
261static int __init zoom2_soc_init(void)
262{
263 int ret;
264
265 if (!machine_is_omap_zoom2()) {
266 pr_debug("Not Zoom2!\n");
267 return -ENODEV;
268 }
269 printk(KERN_INFO "Zoom2 SoC init\n");
270
271 zoom2_snd_device = platform_device_alloc("soc-audio", -1);
272 if (!zoom2_snd_device) {
273 printk(KERN_ERR "Platform device allocation failed\n");
274 return -ENOMEM;
275 }
276
277 platform_set_drvdata(zoom2_snd_device, &zoom2_snd_devdata);
278 zoom2_snd_devdata.dev = &zoom2_snd_device->dev;
279 *(unsigned int *)zoom2_dai[0].cpu_dai->private_data = 1; /* McBSP2 */
280 *(unsigned int *)zoom2_dai[1].cpu_dai->private_data = 2; /* McBSP3 */
281
282 ret = platform_device_add(zoom2_snd_device);
283 if (ret)
284 goto err1;
285
286 BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
287 gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
288
289 BUG_ON(gpio_request(ZOOM2_HEADSET_EXTMUTE_GPIO, "ext_mute") < 0);
290 gpio_direction_output(ZOOM2_HEADSET_EXTMUTE_GPIO, 0);
291
292 return 0;
293
294err1:
295 printk(KERN_ERR "Unable to add platform device\n");
296 platform_device_put(zoom2_snd_device);
297
298 return ret;
299}
300module_init(zoom2_soc_init);
301
302static void __exit zoom2_soc_exit(void)
303{
304 gpio_free(ZOOM2_HEADSET_MUX_GPIO);
305 gpio_free(ZOOM2_HEADSET_EXTMUTE_GPIO);
306
307 platform_device_unregister(zoom2_snd_device);
308}
309module_exit(zoom2_soc_exit);
310
311MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
312MODULE_DESCRIPTION("ALSA SoC Zoom2");
313MODULE_LICENSE("GPL");
314
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 326955dea36c..9f7c61e23daf 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -20,12 +20,14 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/i2c.h>
23 24
24#include <sound/core.h> 25#include <sound/core.h>
25#include <sound/pcm.h> 26#include <sound/pcm.h>
26#include <sound/pcm_params.h> 27#include <sound/pcm_params.h>
27#include <sound/soc.h> 28#include <sound/soc.h>
28#include <sound/soc-dapm.h> 29#include <sound/soc-dapm.h>
30#include <sound/uda1380.h>
29 31
30#include <mach/magician.h> 32#include <mach/magician.h>
31#include <asm/mach-types.h> 33#include <asm/mach-types.h>
@@ -188,7 +190,7 @@ static int magician_playback_hw_params(struct snd_pcm_substream *substream,
188 if (ret < 0) 190 if (ret < 0)
189 return ret; 191 return ret;
190 192
191 ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 1); 193 ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width);
192 if (ret < 0) 194 if (ret < 0)
193 return ret; 195 return ret;
194 196
@@ -447,34 +449,47 @@ static struct snd_soc_card snd_soc_card_magician = {
447 .platform = &pxa2xx_soc_platform, 449 .platform = &pxa2xx_soc_platform,
448}; 450};
449 451
450/* magician audio private data */
451static struct uda1380_setup_data magician_uda1380_setup = {
452 .i2c_address = 0x18,
453 .dac_clk = UDA1380_DAC_CLK_WSPLL,
454};
455
456/* magician audio subsystem */ 452/* magician audio subsystem */
457static struct snd_soc_device magician_snd_devdata = { 453static struct snd_soc_device magician_snd_devdata = {
458 .card = &snd_soc_card_magician, 454 .card = &snd_soc_card_magician,
459 .codec_dev = &soc_codec_dev_uda1380, 455 .codec_dev = &soc_codec_dev_uda1380,
460 .codec_data = &magician_uda1380_setup,
461}; 456};
462 457
463static struct platform_device *magician_snd_device; 458static struct platform_device *magician_snd_device;
464 459
460/*
461 * FIXME: move into magician board file once merged into the pxa tree
462 */
463static struct uda1380_platform_data uda1380_info = {
464 .gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
465 .gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
466 .dac_clk = UDA1380_DAC_CLK_WSPLL,
467};
468
469static struct i2c_board_info i2c_board_info[] = {
470 {
471 I2C_BOARD_INFO("uda1380", 0x18),
472 .platform_data = &uda1380_info,
473 },
474};
475
465static int __init magician_init(void) 476static int __init magician_init(void)
466{ 477{
467 int ret; 478 int ret;
479 struct i2c_adapter *adapter;
480 struct i2c_client *client;
468 481
469 if (!machine_is_magician()) 482 if (!machine_is_magician())
470 return -ENODEV; 483 return -ENODEV;
471 484
472 ret = gpio_request(EGPIO_MAGICIAN_CODEC_POWER, "CODEC_POWER"); 485 adapter = i2c_get_adapter(0);
473 if (ret) 486 if (!adapter)
474 goto err_request_power; 487 return -ENODEV;
475 ret = gpio_request(EGPIO_MAGICIAN_CODEC_RESET, "CODEC_RESET"); 488 client = i2c_new_device(adapter, i2c_board_info);
476 if (ret) 489 i2c_put_adapter(adapter);
477 goto err_request_reset; 490 if (!client)
491 return -ENODEV;
492
478 ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER"); 493 ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
479 if (ret) 494 if (ret)
480 goto err_request_spk; 495 goto err_request_spk;
@@ -491,14 +506,8 @@ static int __init magician_init(void)
491 if (ret) 506 if (ret)
492 goto err_request_in_sel1; 507 goto err_request_in_sel1;
493 508
494 gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 1);
495 gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0); 509 gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
496 510
497 /* we may need to have the clock running here - pH5 */
498 gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 1);
499 udelay(5);
500 gpio_set_value(EGPIO_MAGICIAN_CODEC_RESET, 0);
501
502 magician_snd_device = platform_device_alloc("soc-audio", -1); 511 magician_snd_device = platform_device_alloc("soc-audio", -1);
503 if (!magician_snd_device) { 512 if (!magician_snd_device) {
504 ret = -ENOMEM; 513 ret = -ENOMEM;
@@ -526,10 +535,6 @@ err_request_mic:
526err_request_ep: 535err_request_ep:
527 gpio_free(EGPIO_MAGICIAN_SPK_POWER); 536 gpio_free(EGPIO_MAGICIAN_SPK_POWER);
528err_request_spk: 537err_request_spk:
529 gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
530err_request_reset:
531 gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
532err_request_power:
533 return ret; 538 return ret;
534} 539}
535 540
@@ -540,15 +545,12 @@ static void __exit magician_exit(void)
540 gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0); 545 gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
541 gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0); 546 gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
542 gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0); 547 gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
543 gpio_set_value(EGPIO_MAGICIAN_CODEC_POWER, 0);
544 548
545 gpio_free(EGPIO_MAGICIAN_IN_SEL1); 549 gpio_free(EGPIO_MAGICIAN_IN_SEL1);
546 gpio_free(EGPIO_MAGICIAN_IN_SEL0); 550 gpio_free(EGPIO_MAGICIAN_IN_SEL0);
547 gpio_free(EGPIO_MAGICIAN_MIC_POWER); 551 gpio_free(EGPIO_MAGICIAN_MIC_POWER);
548 gpio_free(EGPIO_MAGICIAN_EP_POWER); 552 gpio_free(EGPIO_MAGICIAN_EP_POWER);
549 gpio_free(EGPIO_MAGICIAN_SPK_POWER); 553 gpio_free(EGPIO_MAGICIAN_SPK_POWER);
550 gpio_free(EGPIO_MAGICIAN_CODEC_RESET);
551 gpio_free(EGPIO_MAGICIAN_CODEC_POWER);
552} 554}
553 555
554module_init(magician_init); 556module_init(magician_init);
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index e6102fda0a7f..1f96e3227be5 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -17,13 +17,12 @@
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22 20
23#include <sound/core.h> 21#include <sound/core.h>
24#include <sound/pcm.h> 22#include <sound/pcm.h>
25#include <sound/soc.h> 23#include <sound/soc.h>
26#include <sound/soc-dapm.h> 24#include <sound/soc-dapm.h>
25#include <sound/jack.h>
27 26
28#include <asm/mach-types.h> 27#include <asm/mach-types.h>
29#include <mach/audio.h> 28#include <mach/audio.h>
@@ -33,90 +32,31 @@
33#include "pxa2xx-pcm.h" 32#include "pxa2xx-pcm.h"
34#include "pxa2xx-ac97.h" 33#include "pxa2xx-ac97.h"
35 34
36static int palm27x_jack_func = 1; 35static struct snd_soc_jack hs_jack;
37static int palm27x_spk_func = 1;
38static int palm27x_ep_gpio = -1;
39 36
40static void palm27x_ext_control(struct snd_soc_codec *codec) 37/* Headphones jack detection DAPM pins */
41{ 38static struct snd_soc_jack_pin hs_jack_pins[] = {
42 if (!palm27x_spk_func) 39 {
43 snd_soc_dapm_enable_pin(codec, "Speaker"); 40 .pin = "Headphone Jack",
44 else 41 .mask = SND_JACK_HEADPHONE,
45 snd_soc_dapm_disable_pin(codec, "Speaker"); 42 },
46
47 if (!palm27x_jack_func)
48 snd_soc_dapm_enable_pin(codec, "Headphone Jack");
49 else
50 snd_soc_dapm_disable_pin(codec, "Headphone Jack");
51
52 snd_soc_dapm_sync(codec);
53}
54
55static int palm27x_startup(struct snd_pcm_substream *substream)
56{
57 struct snd_soc_pcm_runtime *rtd = substream->private_data;
58 struct snd_soc_codec *codec = rtd->socdev->card->codec;
59
60 /* check the jack status at stream startup */
61 palm27x_ext_control(codec);
62 return 0;
63}
64
65static struct snd_soc_ops palm27x_ops = {
66 .startup = palm27x_startup,
67}; 43};
68 44
69static irqreturn_t palm27x_interrupt(int irq, void *v) 45/* Headphones jack detection gpios */
70{ 46static struct snd_soc_jack_gpio hs_jack_gpios[] = {
71 palm27x_spk_func = gpio_get_value(palm27x_ep_gpio); 47 [0] = {
72 palm27x_jack_func = !palm27x_spk_func; 48 /* gpio is set on per-platform basis */
73 return IRQ_HANDLED; 49 .name = "hp-gpio",
74} 50 .report = SND_JACK_HEADPHONE,
75 51 .debounce_time = 200,
76static int palm27x_get_jack(struct snd_kcontrol *kcontrol, 52 },
77 struct snd_ctl_elem_value *ucontrol) 53};
78{
79 ucontrol->value.integer.value[0] = palm27x_jack_func;
80 return 0;
81}
82
83static int palm27x_set_jack(struct snd_kcontrol *kcontrol,
84 struct snd_ctl_elem_value *ucontrol)
85{
86 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
87
88 if (palm27x_jack_func == ucontrol->value.integer.value[0])
89 return 0;
90
91 palm27x_jack_func = ucontrol->value.integer.value[0];
92 palm27x_ext_control(codec);
93 return 1;
94}
95
96static int palm27x_get_spk(struct snd_kcontrol *kcontrol,
97 struct snd_ctl_elem_value *ucontrol)
98{
99 ucontrol->value.integer.value[0] = palm27x_spk_func;
100 return 0;
101}
102
103static int palm27x_set_spk(struct snd_kcontrol *kcontrol,
104 struct snd_ctl_elem_value *ucontrol)
105{
106 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
107
108 if (palm27x_spk_func == ucontrol->value.integer.value[0])
109 return 0;
110
111 palm27x_spk_func = ucontrol->value.integer.value[0];
112 palm27x_ext_control(codec);
113 return 1;
114}
115 54
116/* PalmTX machine dapm widgets */ 55/* Palm27x machine dapm widgets */
117static const struct snd_soc_dapm_widget palm27x_dapm_widgets[] = { 56static const struct snd_soc_dapm_widget palm27x_dapm_widgets[] = {
118 SND_SOC_DAPM_HP("Headphone Jack", NULL), 57 SND_SOC_DAPM_HP("Headphone Jack", NULL),
119 SND_SOC_DAPM_SPK("Speaker", NULL), 58 SND_SOC_DAPM_SPK("Ext. Speaker", NULL),
59 SND_SOC_DAPM_MIC("Ext. Microphone", NULL),
120}; 60};
121 61
122/* PalmTX audio map */ 62/* PalmTX audio map */
@@ -126,46 +66,66 @@ static const struct snd_soc_dapm_route audio_map[] = {
126 {"Headphone Jack", NULL, "HPOUTR"}, 66 {"Headphone Jack", NULL, "HPOUTR"},
127 67
128 /* ext speaker connected to ROUT2, LOUT2 */ 68 /* ext speaker connected to ROUT2, LOUT2 */
129 {"Speaker", NULL, "LOUT2"}, 69 {"Ext. Speaker", NULL, "LOUT2"},
130 {"Speaker", NULL, "ROUT2"}, 70 {"Ext. Speaker", NULL, "ROUT2"},
131};
132 71
133static const char *jack_function[] = {"Headphone", "Off"}; 72 /* mic connected to MIC1 */
134static const char *spk_function[] = {"On", "Off"}; 73 {"Ext. Microphone", NULL, "MIC1"},
135static const struct soc_enum palm27x_enum[] = {
136 SOC_ENUM_SINGLE_EXT(2, jack_function),
137 SOC_ENUM_SINGLE_EXT(2, spk_function),
138}; 74};
139 75
140static const struct snd_kcontrol_new palm27x_controls[] = { 76static struct snd_soc_card palm27x_asoc;
141 SOC_ENUM_EXT("Jack Function", palm27x_enum[0], palm27x_get_jack,
142 palm27x_set_jack),
143 SOC_ENUM_EXT("Speaker Function", palm27x_enum[1], palm27x_get_spk,
144 palm27x_set_spk),
145};
146 77
147static int palm27x_ac97_init(struct snd_soc_codec *codec) 78static int palm27x_ac97_init(struct snd_soc_codec *codec)
148{ 79{
149 int err; 80 int err;
150 81
82 /* add palm27x specific widgets */
83 err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
84 ARRAY_SIZE(palm27x_dapm_widgets));
85 if (err)
86 return err;
87
88 /* set up palm27x specific audio path audio_map */
89 err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
90 if (err)
91 return err;
92
93 /* connected pins */
94 if (machine_is_palmld())
95 snd_soc_dapm_enable_pin(codec, "MIC1");
96 snd_soc_dapm_enable_pin(codec, "HPOUTL");
97 snd_soc_dapm_enable_pin(codec, "HPOUTR");
98 snd_soc_dapm_enable_pin(codec, "LOUT2");
99 snd_soc_dapm_enable_pin(codec, "ROUT2");
100
101 /* not connected pins */
151 snd_soc_dapm_nc_pin(codec, "OUT3"); 102 snd_soc_dapm_nc_pin(codec, "OUT3");
152 snd_soc_dapm_nc_pin(codec, "MONOOUT"); 103 snd_soc_dapm_nc_pin(codec, "MONOOUT");
104 snd_soc_dapm_nc_pin(codec, "LINEINL");
105 snd_soc_dapm_nc_pin(codec, "LINEINR");
106 snd_soc_dapm_nc_pin(codec, "PCBEEP");
107 snd_soc_dapm_nc_pin(codec, "PHONE");
108 snd_soc_dapm_nc_pin(codec, "MIC2");
109
110 err = snd_soc_dapm_sync(codec);
111 if (err)
112 return err;
153 113
154 /* add palm27x specific controls */ 114 /* Jack detection API stuff */
155 err = snd_soc_add_controls(codec, palm27x_controls, 115 err = snd_soc_jack_new(&palm27x_asoc, "Headphone Jack",
156 ARRAY_SIZE(palm27x_controls)); 116 SND_JACK_HEADPHONE, &hs_jack);
157 if (err < 0) 117 if (err)
158 return err; 118 return err;
159 119
160 /* add palm27x specific widgets */ 120 err = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
161 snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets, 121 hs_jack_pins);
162 ARRAY_SIZE(palm27x_dapm_widgets)); 122 if (err)
123 return err;
163 124
164 /* set up palm27x specific audio path audio_map */ 125 err = snd_soc_jack_add_gpios(&hs_jack, ARRAY_SIZE(hs_jack_gpios),
165 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map)); 126 hs_jack_gpios);
166 127
167 snd_soc_dapm_sync(codec); 128 return err;
168 return 0;
169} 129}
170 130
171static struct snd_soc_dai_link palm27x_dai[] = { 131static struct snd_soc_dai_link palm27x_dai[] = {
@@ -175,14 +135,12 @@ static struct snd_soc_dai_link palm27x_dai[] = {
175 .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_HIFI], 135 .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_HIFI],
176 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_HIFI], 136 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_HIFI],
177 .init = palm27x_ac97_init, 137 .init = palm27x_ac97_init,
178 .ops = &palm27x_ops,
179}, 138},
180{ 139{
181 .name = "AC97 Aux", 140 .name = "AC97 Aux",
182 .stream_name = "AC97 Aux", 141 .stream_name = "AC97 Aux",
183 .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_AUX], 142 .cpu_dai = &pxa_ac97_dai[PXA2XX_DAI_AC97_AUX],
184 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_AUX], 143 .codec_dai = &wm9712_dai[WM9712_DAI_AC97_AUX],
185 .ops = &palm27x_ops,
186}, 144},
187}; 145};
188 146
@@ -208,27 +166,17 @@ static int palm27x_asoc_probe(struct platform_device *pdev)
208 machine_is_palmld() || machine_is_palmte2())) 166 machine_is_palmld() || machine_is_palmte2()))
209 return -ENODEV; 167 return -ENODEV;
210 168
211 if (pdev->dev.platform_data) 169 if (!pdev->dev.platform_data) {
212 palm27x_ep_gpio = ((struct palm27x_asoc_info *) 170 dev_err(&pdev->dev, "please supply platform_data\n");
213 (pdev->dev.platform_data))->jack_gpio; 171 return -ENODEV;
214 172 }
215 ret = gpio_request(palm27x_ep_gpio, "Headphone Jack");
216 if (ret)
217 return ret;
218 ret = gpio_direction_input(palm27x_ep_gpio);
219 if (ret)
220 goto err_alloc;
221 173
222 if (request_irq(gpio_to_irq(palm27x_ep_gpio), palm27x_interrupt, 174 hs_jack_gpios[0].gpio = ((struct palm27x_asoc_info *)
223 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 175 (pdev->dev.platform_data))->jack_gpio;
224 "Headphone jack", NULL))
225 goto err_alloc;
226 176
227 palm27x_snd_device = platform_device_alloc("soc-audio", -1); 177 palm27x_snd_device = platform_device_alloc("soc-audio", -1);
228 if (!palm27x_snd_device) { 178 if (!palm27x_snd_device)
229 ret = -ENOMEM; 179 return -ENOMEM;
230 goto err_dev;
231 }
232 180
233 platform_set_drvdata(palm27x_snd_device, &palm27x_snd_devdata); 181 platform_set_drvdata(palm27x_snd_device, &palm27x_snd_devdata);
234 palm27x_snd_devdata.dev = &palm27x_snd_device->dev; 182 palm27x_snd_devdata.dev = &palm27x_snd_device->dev;
@@ -241,18 +189,12 @@ static int palm27x_asoc_probe(struct platform_device *pdev)
241 189
242put_device: 190put_device:
243 platform_device_put(palm27x_snd_device); 191 platform_device_put(palm27x_snd_device);
244err_dev:
245 free_irq(gpio_to_irq(palm27x_ep_gpio), NULL);
246err_alloc:
247 gpio_free(palm27x_ep_gpio);
248 192
249 return ret; 193 return ret;
250} 194}
251 195
252static int __devexit palm27x_asoc_remove(struct platform_device *pdev) 196static int __devexit palm27x_asoc_remove(struct platform_device *pdev)
253{ 197{
254 free_irq(gpio_to_irq(palm27x_ep_gpio), NULL);
255 gpio_free(palm27x_ep_gpio);
256 platform_device_unregister(palm27x_snd_device); 198 platform_device_unregister(palm27x_snd_device);
257 return 0; 199 return 0;
258} 200}
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 19c45409d94c..5b9ed6464789 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -375,21 +375,34 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai,
375 * Set the active slots in TDM/Network mode 375 * Set the active slots in TDM/Network mode
376 */ 376 */
377static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, 377static int pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai,
378 unsigned int mask, int slots) 378 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
379{ 379{
380 struct ssp_priv *priv = cpu_dai->private_data; 380 struct ssp_priv *priv = cpu_dai->private_data;
381 struct ssp_device *ssp = priv->dev.ssp; 381 struct ssp_device *ssp = priv->dev.ssp;
382 u32 sscr0; 382 u32 sscr0;
383 383
384 sscr0 = ssp_read_reg(ssp, SSCR0) & ~SSCR0_SlotsPerFrm(7); 384 sscr0 = ssp_read_reg(ssp, SSCR0);
385 sscr0 &= ~(SSCR0_MOD | SSCR0_SlotsPerFrm(8) | SSCR0_EDSS | SSCR0_DSS);
386
387 /* set slot width */
388 if (slot_width > 16)
389 sscr0 |= SSCR0_EDSS | SSCR0_DataSize(slot_width - 16);
390 else
391 sscr0 |= SSCR0_DataSize(slot_width);
392
393 if (slots > 1) {
394 /* enable network mode */
395 sscr0 |= SSCR0_MOD;
385 396
386 /* set number of active slots */ 397 /* set number of active slots */
387 sscr0 |= SSCR0_SlotsPerFrm(slots); 398 sscr0 |= SSCR0_SlotsPerFrm(slots);
399
400 /* set active slot mask */
401 ssp_write_reg(ssp, SSTSA, tx_mask);
402 ssp_write_reg(ssp, SSRSA, rx_mask);
403 }
388 ssp_write_reg(ssp, SSCR0, sscr0); 404 ssp_write_reg(ssp, SSCR0, sscr0);
389 405
390 /* set active slot mask */
391 ssp_write_reg(ssp, SSTSA, mask);
392 ssp_write_reg(ssp, SSRSA, mask);
393 return 0; 406 return 0;
394} 407}
395 408
@@ -457,31 +470,27 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
457 return -EINVAL; 470 return -EINVAL;
458 } 471 }
459 472
460 ssp_write_reg(ssp, SSCR0, sscr0); 473 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
461 ssp_write_reg(ssp, SSCR1, sscr1); 474 case SND_SOC_DAIFMT_NB_NF:
462 ssp_write_reg(ssp, SSPSP, sspsp); 475 sspsp |= SSPSP_SFRMP;
476 break;
477 case SND_SOC_DAIFMT_NB_IF:
478 break;
479 case SND_SOC_DAIFMT_IB_IF:
480 sspsp |= SSPSP_SCMODE(2);
481 break;
482 case SND_SOC_DAIFMT_IB_NF:
483 sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
484 break;
485 default:
486 return -EINVAL;
487 }
463 488
464 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 489 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
465 case SND_SOC_DAIFMT_I2S: 490 case SND_SOC_DAIFMT_I2S:
466 sscr0 |= SSCR0_PSP; 491 sscr0 |= SSCR0_PSP;
467 sscr1 |= SSCR1_RWOT | SSCR1_TRAIL; 492 sscr1 |= SSCR1_RWOT | SSCR1_TRAIL;
468
469 /* See hw_params() */ 493 /* See hw_params() */
470 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
471 case SND_SOC_DAIFMT_NB_NF:
472 sspsp |= SSPSP_SFRMP;
473 break;
474 case SND_SOC_DAIFMT_NB_IF:
475 break;
476 case SND_SOC_DAIFMT_IB_IF:
477 sspsp |= SSPSP_SCMODE(2);
478 break;
479 case SND_SOC_DAIFMT_IB_NF:
480 sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
481 break;
482 default:
483 return -EINVAL;
484 }
485 break; 494 break;
486 495
487 case SND_SOC_DAIFMT_DSP_A: 496 case SND_SOC_DAIFMT_DSP_A:
@@ -489,22 +498,6 @@ static int pxa_ssp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
489 case SND_SOC_DAIFMT_DSP_B: 498 case SND_SOC_DAIFMT_DSP_B:
490 sscr0 |= SSCR0_MOD | SSCR0_PSP; 499 sscr0 |= SSCR0_MOD | SSCR0_PSP;
491 sscr1 |= SSCR1_TRAIL | SSCR1_RWOT; 500 sscr1 |= SSCR1_TRAIL | SSCR1_RWOT;
492
493 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
494 case SND_SOC_DAIFMT_NB_NF:
495 sspsp |= SSPSP_SFRMP;
496 break;
497 case SND_SOC_DAIFMT_NB_IF:
498 break;
499 case SND_SOC_DAIFMT_IB_IF:
500 sspsp |= SSPSP_SCMODE(2);
501 break;
502 case SND_SOC_DAIFMT_IB_NF:
503 sspsp |= SSPSP_SCMODE(2) | SSPSP_SFRMP;
504 break;
505 default:
506 return -EINVAL;
507 }
508 break; 501 break;
509 502
510 default: 503 default:
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index d9c94d71fa61..e9ae7b3a7e00 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -22,6 +22,7 @@
22#include <mach/hardware.h> 22#include <mach/hardware.h>
23#include <mach/regs-ac97.h> 23#include <mach/regs-ac97.h>
24#include <mach/dma.h> 24#include <mach/dma.h>
25#include <mach/audio.h>
25 26
26#include "pxa2xx-pcm.h" 27#include "pxa2xx-pcm.h"
27#include "pxa2xx-ac97.h" 28#include "pxa2xx-ac97.h"
@@ -241,9 +242,18 @@ EXPORT_SYMBOL_GPL(soc_ac97_ops);
241static int __devinit pxa2xx_ac97_dev_probe(struct platform_device *pdev) 242static int __devinit pxa2xx_ac97_dev_probe(struct platform_device *pdev)
242{ 243{
243 int i; 244 int i;
245 pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
244 246
245 for (i = 0; i < ARRAY_SIZE(pxa_ac97_dai); i++) 247 if (pdev->id >= 0) {
248 dev_err(&pdev->dev, "PXA2xx has only one AC97 port.\n");
249 return -ENXIO;
250 }
251
252 for (i = 0; i < ARRAY_SIZE(pxa_ac97_dai); i++) {
246 pxa_ac97_dai[i].dev = &pdev->dev; 253 pxa_ac97_dai[i].dev = &pdev->dev;
254 if (pdata && pdata->codec_pdata[0])
255 pxa_ac97_dai[i].ac97_pdata = pdata->codec_pdata[0];
256 }
247 257
248 /* Punt most of the init to the SoC probe; we may need the machine 258 /* Punt most of the init to the SoC probe; we may need the machine
249 * driver to do interesting things with the clocking to get us up 259 * driver to do interesting things with the clocking to get us up
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index df494d1e346f..923428fc1adb 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -1,6 +1,7 @@
1config SND_S3C24XX_SOC 1config SND_S3C24XX_SOC
2 tristate "SoC Audio for the Samsung S3CXXXX chips" 2 tristate "SoC Audio for the Samsung S3CXXXX chips"
3 depends on ARCH_S3C2410 3 depends on ARCH_S3C2410 || ARCH_S3C64XX
4 select S3C64XX_DMA if ARCH_S3C64XX
4 help 5 help
5 Say Y or M if you want to add support for codecs attached to 6 Say Y or M if you want to add support for codecs attached to
6 the S3C24XX AC97 or I2S interfaces. You will also need to 7 the S3C24XX AC97 or I2S interfaces. You will also need to
@@ -38,6 +39,15 @@ config SND_S3C24XX_SOC_NEO1973_WM8753
38 Say Y if you want to add support for SoC audio on smdk2440 39 Say Y if you want to add support for SoC audio on smdk2440
39 with the WM8753. 40 with the WM8753.
40 41
42config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753
43 tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
44 depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02
45 select SND_S3C24XX_SOC_I2S
46 select SND_SOC_WM8753
47 help
48 This driver provides audio support for the Openmoko Neo FreeRunner
49 smartphone.
50
41config SND_S3C24XX_SOC_JIVE_WM8750 51config SND_S3C24XX_SOC_JIVE_WM8750
42 tristate "SoC I2S Audio support for Jive" 52 tristate "SoC I2S Audio support for Jive"
43 depends on SND_S3C24XX_SOC && MACH_JIVE 53 depends on SND_S3C24XX_SOC && MACH_JIVE
@@ -57,7 +67,7 @@ config SND_S3C24XX_SOC_SMDK2443_WM9710
57 67
58config SND_S3C24XX_SOC_LN2440SBC_ALC650 68config SND_S3C24XX_SOC_LN2440SBC_ALC650
59 tristate "SoC AC97 Audio support for LN2440SBC - ALC650" 69 tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
60 depends on SND_S3C24XX_SOC 70 depends on SND_S3C24XX_SOC && ARCH_S3C2410
61 select SND_S3C2443_SOC_AC97 71 select SND_S3C2443_SOC_AC97
62 select SND_SOC_AC97_CODEC 72 select SND_SOC_AC97_CODEC
63 help 73 help
@@ -66,7 +76,26 @@ config SND_S3C24XX_SOC_LN2440SBC_ALC650
66 76
67config SND_S3C24XX_SOC_S3C24XX_UDA134X 77config SND_S3C24XX_SOC_S3C24XX_UDA134X
68 tristate "SoC I2S Audio support UDA134X wired to a S3C24XX" 78 tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
69 depends on SND_S3C24XX_SOC 79 depends on SND_S3C24XX_SOC && ARCH_S3C2410
70 select SND_S3C24XX_SOC_I2S 80 select SND_S3C24XX_SOC_I2S
71 select SND_SOC_L3 81 select SND_SOC_L3
72 select SND_SOC_UDA134X 82 select SND_SOC_UDA134X
83
84config SND_S3C24XX_SOC_SIMTEC
85 tristate
86 help
87 Internal node for common S3C24XX/Simtec suppor
88
89config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23
90 tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
91 depends on SND_S3C24XX_SOC && ARCH_S3C2410
92 select SND_S3C24XX_SOC_I2S
93 select SND_SOC_TLV320AIC23
94 select SND_S3C24XX_SOC_SIMTEC
95
96config SND_S3C24XX_SOC_SIMTEC_HERMES
97 tristate "SoC I2S Audio support for Simtec Hermes board"
98 depends on SND_S3C24XX_SOC && ARCH_S3C2410
99 select SND_S3C24XX_SOC_I2S
100 select SND_SOC_TLV320AIC3X
101 select SND_S3C24XX_SOC_SIMTEC
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
index 07a93a2ebe5f..99f5a7dd3fc6 100644
--- a/sound/soc/s3c24xx/Makefile
+++ b/sound/soc/s3c24xx/Makefile
@@ -16,12 +16,21 @@ obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
16# S3C24XX Machine Support 16# S3C24XX Machine Support
17snd-soc-jive-wm8750-objs := jive_wm8750.o 17snd-soc-jive-wm8750-objs := jive_wm8750.o
18snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o 18snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
19snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
19snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o 20snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
20snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o 21snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
21snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o 22snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
23snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
24snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
25snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
22 26
23obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o 27obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o
24obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o 28obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
29obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
25obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o 30obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
26obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o 31obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
27obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o 32obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
33obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o
34obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
35obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
36
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
new file mode 100644
index 000000000000..0c52e36ddd87
--- /dev/null
+++ b/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
@@ -0,0 +1,498 @@
1/*
2 * neo1973_gta02_wm8753.c -- SoC audio for Openmoko Freerunner(GTA02)
3 *
4 * Copyright 2007 Openmoko Inc
5 * Author: Graeme Gregory <graeme@openmoko.org>
6 * Copyright 2007 Wolfson Microelectronics PLC.
7 * Author: Graeme Gregory <linux@wolfsonmicro.com>
8 * Copyright 2009 Wolfson Microelectronics
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/timer.h>
19#include <linux/interrupt.h>
20#include <linux/platform_device.h>
21#include <linux/gpio.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/soc.h>
25#include <sound/soc-dapm.h>
26
27#include <asm/mach-types.h>
28
29#include <plat/regs-iis.h>
30
31#include <mach/regs-clock.h>
32#include <asm/io.h>
33#include <mach/gta02.h>
34#include "../codecs/wm8753.h"
35#include "s3c24xx-pcm.h"
36#include "s3c24xx-i2s.h"
37
38static struct snd_soc_card neo1973_gta02;
39
40static int neo1973_gta02_hifi_hw_params(struct snd_pcm_substream *substream,
41 struct snd_pcm_hw_params *params)
42{
43 struct snd_soc_pcm_runtime *rtd = substream->private_data;
44 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
45 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
46 unsigned int pll_out = 0, bclk = 0;
47 int ret = 0;
48 unsigned long iis_clkrate;
49
50 iis_clkrate = s3c24xx_i2s_get_clockrate();
51
52 switch (params_rate(params)) {
53 case 8000:
54 case 16000:
55 pll_out = 12288000;
56 break;
57 case 48000:
58 bclk = WM8753_BCLK_DIV_4;
59 pll_out = 12288000;
60 break;
61 case 96000:
62 bclk = WM8753_BCLK_DIV_2;
63 pll_out = 12288000;
64 break;
65 case 11025:
66 bclk = WM8753_BCLK_DIV_16;
67 pll_out = 11289600;
68 break;
69 case 22050:
70 bclk = WM8753_BCLK_DIV_8;
71 pll_out = 11289600;
72 break;
73 case 44100:
74 bclk = WM8753_BCLK_DIV_4;
75 pll_out = 11289600;
76 break;
77 case 88200:
78 bclk = WM8753_BCLK_DIV_2;
79 pll_out = 11289600;
80 break;
81 }
82
83 /* set codec DAI configuration */
84 ret = snd_soc_dai_set_fmt(codec_dai,
85 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
86 SND_SOC_DAIFMT_CBM_CFM);
87 if (ret < 0)
88 return ret;
89
90 /* set cpu DAI configuration */
91 ret = snd_soc_dai_set_fmt(cpu_dai,
92 SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
93 SND_SOC_DAIFMT_CBM_CFM);
94 if (ret < 0)
95 return ret;
96
97 /* set the codec system clock for DAC and ADC */
98 ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
99 SND_SOC_CLOCK_IN);
100 if (ret < 0)
101 return ret;
102
103 /* set MCLK division for sample rate */
104 ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
105 S3C2410_IISMOD_32FS);
106 if (ret < 0)
107 return ret;
108
109 /* set codec BCLK division for sample rate */
110 ret = snd_soc_dai_set_clkdiv(codec_dai,
111 WM8753_BCLKDIV, bclk);
112 if (ret < 0)
113 return ret;
114
115 /* set prescaler division for sample rate */
116 ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
117 S3C24XX_PRESCALE(4, 4));
118 if (ret < 0)
119 return ret;
120
121 /* codec PLL input is PCLK/4 */
122 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1,
123 iis_clkrate / 4, pll_out);
124 if (ret < 0)
125 return ret;
126
127 return 0;
128}
129
130static int neo1973_gta02_hifi_hw_free(struct snd_pcm_substream *substream)
131{
132 struct snd_soc_pcm_runtime *rtd = substream->private_data;
133 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
134
135 /* disable the PLL */
136 return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0);
137}
138
139/*
140 * Neo1973 WM8753 HiFi DAI opserations.
141 */
142static struct snd_soc_ops neo1973_gta02_hifi_ops = {
143 .hw_params = neo1973_gta02_hifi_hw_params,
144 .hw_free = neo1973_gta02_hifi_hw_free,
145};
146
147static int neo1973_gta02_voice_hw_params(
148 struct snd_pcm_substream *substream,
149 struct snd_pcm_hw_params *params)
150{
151 struct snd_soc_pcm_runtime *rtd = substream->private_data;
152 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
153 unsigned int pcmdiv = 0;
154 int ret = 0;
155 unsigned long iis_clkrate;
156
157 iis_clkrate = s3c24xx_i2s_get_clockrate();
158
159 if (params_rate(params) != 8000)
160 return -EINVAL;
161 if (params_channels(params) != 1)
162 return -EINVAL;
163
164 pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
165
166 /* todo: gg check mode (DSP_B) against CSR datasheet */
167 /* set codec DAI configuration */
168 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
169 SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
170 if (ret < 0)
171 return ret;
172
173 /* set the codec system clock for DAC and ADC */
174 ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK,
175 12288000, SND_SOC_CLOCK_IN);
176 if (ret < 0)
177 return ret;
178
179 /* set codec PCM division for sample rate */
180 ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV,
181 pcmdiv);
182 if (ret < 0)
183 return ret;
184
185 /* configue and enable PLL for 12.288MHz output */
186 ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2,
187 iis_clkrate / 4, 12288000);
188 if (ret < 0)
189 return ret;
190
191 return 0;
192}
193
194static int neo1973_gta02_voice_hw_free(struct snd_pcm_substream *substream)
195{
196 struct snd_soc_pcm_runtime *rtd = substream->private_data;
197 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
198
199 /* disable the PLL */
200 return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0);
201}
202
203static struct snd_soc_ops neo1973_gta02_voice_ops = {
204 .hw_params = neo1973_gta02_voice_hw_params,
205 .hw_free = neo1973_gta02_voice_hw_free,
206};
207
208#define LM4853_AMP 1
209#define LM4853_SPK 2
210
211static u8 lm4853_state;
212
213/* This has no effect, it exists only to maintain compatibility with
214 * existing ALSA state files.
215 */
216static int lm4853_set_state(struct snd_kcontrol *kcontrol,
217 struct snd_ctl_elem_value *ucontrol)
218{
219 int val = ucontrol->value.integer.value[0];
220
221 if (val)
222 lm4853_state |= LM4853_AMP;
223 else
224 lm4853_state &= ~LM4853_AMP;
225
226 return 0;
227}
228
229static int lm4853_get_state(struct snd_kcontrol *kcontrol,
230 struct snd_ctl_elem_value *ucontrol)
231{
232 ucontrol->value.integer.value[0] = lm4853_state & LM4853_AMP;
233
234 return 0;
235}
236
237static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
238 struct snd_ctl_elem_value *ucontrol)
239{
240 int val = ucontrol->value.integer.value[0];
241
242 if (val) {
243 lm4853_state |= LM4853_SPK;
244 gpio_set_value(GTA02_GPIO_HP_IN, 0);
245 } else {
246 lm4853_state &= ~LM4853_SPK;
247 gpio_set_value(GTA02_GPIO_HP_IN, 1);
248 }
249
250 return 0;
251}
252
253static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
254 struct snd_ctl_elem_value *ucontrol)
255{
256 ucontrol->value.integer.value[0] = (lm4853_state & LM4853_SPK) >> 1;
257
258 return 0;
259}
260
261static int lm4853_event(struct snd_soc_dapm_widget *w,
262 struct snd_kcontrol *k,
263 int event)
264{
265 gpio_set_value(GTA02_GPIO_AMP_SHUT, SND_SOC_DAPM_EVENT_OFF(value));
266
267 return 0;
268}
269
270static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
271 SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
272 SND_SOC_DAPM_LINE("GSM Line Out", NULL),
273 SND_SOC_DAPM_LINE("GSM Line In", NULL),
274 SND_SOC_DAPM_MIC("Headset Mic", NULL),
275 SND_SOC_DAPM_MIC("Handset Mic", NULL),
276 SND_SOC_DAPM_SPK("Handset Spk", NULL),
277};
278
279
280/* example machine audio_mapnections */
281static const struct snd_soc_dapm_route audio_map[] = {
282
283 /* Connections to the lm4853 amp */
284 {"Stereo Out", NULL, "LOUT1"},
285 {"Stereo Out", NULL, "ROUT1"},
286
287 /* Connections to the GSM Module */
288 {"GSM Line Out", NULL, "MONO1"},
289 {"GSM Line Out", NULL, "MONO2"},
290 {"RXP", NULL, "GSM Line In"},
291 {"RXN", NULL, "GSM Line In"},
292
293 /* Connections to Headset */
294 {"MIC1", NULL, "Mic Bias"},
295 {"Mic Bias", NULL, "Headset Mic"},
296
297 /* Call Mic */
298 {"MIC2", NULL, "Mic Bias"},
299 {"MIC2N", NULL, "Mic Bias"},
300 {"Mic Bias", NULL, "Handset Mic"},
301
302 /* Call Speaker */
303 {"Handset Spk", NULL, "LOUT2"},
304 {"Handset Spk", NULL, "ROUT2"},
305
306 /* Connect the ALC pins */
307 {"ACIN", NULL, "ACOP"},
308};
309
310static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = {
311 SOC_DAPM_PIN_SWITCH("Stereo Out"),
312 SOC_DAPM_PIN_SWITCH("GSM Line Out"),
313 SOC_DAPM_PIN_SWITCH("GSM Line In"),
314 SOC_DAPM_PIN_SWITCH("Headset Mic"),
315 SOC_DAPM_PIN_SWITCH("Handset Mic"),
316 SOC_DAPM_PIN_SWITCH("Handset Spk"),
317
318 /* This has no effect, it exists only to maintain compatibility with
319 * existing ALSA state files.
320 */
321 SOC_SINGLE_EXT("Amp State Switch", 6, 0, 1, 0,
322 lm4853_get_state,
323 lm4853_set_state),
324 SOC_SINGLE_EXT("Amp Spk Switch", 7, 0, 1, 0,
325 lm4853_get_spk,
326 lm4853_set_spk),
327};
328
329/*
330 * This is an example machine initialisation for a wm8753 connected to a
331 * neo1973 GTA02.
332 */
333static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
334{
335 int err;
336
337 /* set up NC codec pins */
338 snd_soc_dapm_nc_pin(codec, "OUT3");
339 snd_soc_dapm_nc_pin(codec, "OUT4");
340 snd_soc_dapm_nc_pin(codec, "LINE1");
341 snd_soc_dapm_nc_pin(codec, "LINE2");
342
343 /* Add neo1973 gta02 specific widgets */
344 snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
345 ARRAY_SIZE(wm8753_dapm_widgets));
346
347 /* add neo1973 gta02 specific controls */
348 err = snd_soc_add_controls(codec, wm8753_neo1973_gta02_controls,
349 ARRAY_SIZE(wm8753_neo1973_gta02_controls));
350
351 if (err < 0)
352 return err;
353
354 /* set up neo1973 gta02 specific audio path audio_map */
355 snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
356
357 /* set endpoints to default off mode */
358 snd_soc_dapm_disable_pin(codec, "Stereo Out");
359 snd_soc_dapm_disable_pin(codec, "GSM Line Out");
360 snd_soc_dapm_disable_pin(codec, "GSM Line In");
361 snd_soc_dapm_disable_pin(codec, "Headset Mic");
362 snd_soc_dapm_disable_pin(codec, "Handset Mic");
363 snd_soc_dapm_disable_pin(codec, "Handset Spk");
364
365 snd_soc_dapm_sync(codec);
366
367 return 0;
368}
369
370/*
371 * BT Codec DAI
372 */
373static struct snd_soc_dai bt_dai = {
374 .name = "Bluetooth",
375 .id = 0,
376 .playback = {
377 .channels_min = 1,
378 .channels_max = 1,
379 .rates = SNDRV_PCM_RATE_8000,
380 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
381 .capture = {
382 .channels_min = 1,
383 .channels_max = 1,
384 .rates = SNDRV_PCM_RATE_8000,
385 .formats = SNDRV_PCM_FMTBIT_S16_LE,},
386};
387
388static struct snd_soc_dai_link neo1973_gta02_dai[] = {
389{ /* Hifi Playback - for similatious use with voice below */
390 .name = "WM8753",
391 .stream_name = "WM8753 HiFi",
392 .cpu_dai = &s3c24xx_i2s_dai,
393 .codec_dai = &wm8753_dai[WM8753_DAI_HIFI],
394 .init = neo1973_gta02_wm8753_init,
395 .ops = &neo1973_gta02_hifi_ops,
396},
397{ /* Voice via BT */
398 .name = "Bluetooth",
399 .stream_name = "Voice",
400 .cpu_dai = &bt_dai,
401 .codec_dai = &wm8753_dai[WM8753_DAI_VOICE],
402 .ops = &neo1973_gta02_voice_ops,
403},
404};
405
406static struct snd_soc_card neo1973_gta02 = {
407 .name = "neo1973-gta02",
408 .platform = &s3c24xx_soc_platform,
409 .dai_link = neo1973_gta02_dai,
410 .num_links = ARRAY_SIZE(neo1973_gta02_dai),
411};
412
413static struct snd_soc_device neo1973_gta02_snd_devdata = {
414 .card = &neo1973_gta02,
415 .codec_dev = &soc_codec_dev_wm8753,
416};
417
418static struct platform_device *neo1973_gta02_snd_device;
419
420static int __init neo1973_gta02_init(void)
421{
422 int ret;
423
424 if (!machine_is_neo1973_gta02()) {
425 printk(KERN_INFO
426 "Only GTA02 is supported by this ASoC driver\n");
427 return -ENODEV;
428 }
429
430 /* register bluetooth DAI here */
431 ret = snd_soc_register_dai(&bt_dai);
432 if (ret)
433 return ret;
434
435 neo1973_gta02_snd_device = platform_device_alloc("soc-audio", -1);
436 if (!neo1973_gta02_snd_device)
437 return -ENOMEM;
438
439 platform_set_drvdata(neo1973_gta02_snd_device,
440 &neo1973_gta02_snd_devdata);
441 neo1973_gta02_snd_devdata.dev = &neo1973_gta02_snd_device->dev;
442 ret = platform_device_add(neo1973_gta02_snd_device);
443
444 if (ret) {
445 platform_device_put(neo1973_gta02_snd_device);
446 return ret;
447 }
448
449 /* Initialise GPIOs used by amp */
450 ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
451 if (ret) {
452 pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
453 goto err_unregister_device;
454 }
455
456 ret = gpio_direction_output(GTA02_GPIO_AMP_HP_IN, 1);
457 if (ret) {
458 pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_HP_IN);
459 goto err_free_gpio_hp_in;
460 }
461
462 ret = gpio_request(GTA02_GPIO_AMP_SHUT, "GTA02_AMP_SHUT");
463 if (ret) {
464 pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_AMP_SHUT);
465 goto err_free_gpio_hp_in;
466 }
467
468 ret = gpio_direction_output(GTA02_GPIO_AMP_SHUT, 1);
469 if (ret) {
470 pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_AMP_SHUT);
471 goto err_free_gpio_amp_shut;
472 }
473
474 return 0;
475
476err_free_gpio_amp_shut:
477 gpio_free(GTA02_GPIO_AMP_SHUT);
478err_free_gpio_hp_in:
479 gpio_free(GTA02_GPIO_HP_IN);
480err_unregister_device:
481 platform_device_unregister(neo1973_gta02_snd_device);
482 return ret;
483}
484module_init(neo1973_gta02_init);
485
486static void __exit neo1973_gta02_exit(void)
487{
488 snd_soc_unregister_dai(&bt_dai);
489 platform_device_unregister(neo1973_gta02_snd_device);
490 gpio_free(GTA02_GPIO_HP_IN);
491 gpio_free(GTA02_GPIO_AMP_SHUT);
492}
493module_exit(neo1973_gta02_exit);
494
495/* Module information */
496MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org");
497MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 GTA02");
498MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/s3c24xx/s3c-i2s-v2.c
index 1a283170ca92..aa7af0b8d421 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/s3c24xx/s3c-i2s-v2.c
@@ -36,6 +36,7 @@
36#include <mach/dma.h> 36#include <mach/dma.h>
37 37
38#include "s3c-i2s-v2.h" 38#include "s3c-i2s-v2.h"
39#include "s3c24xx-pcm.h"
39 40
40#undef S3C_IIS_V2_SUPPORTED 41#undef S3C_IIS_V2_SUPPORTED
41 42
@@ -357,19 +358,19 @@ static int s3c2412_i2s_hw_params(struct snd_pcm_substream *substream,
357#endif 358#endif
358 359
359#ifdef CONFIG_PLAT_S3C64XX 360#ifdef CONFIG_PLAT_S3C64XX
360 iismod &= ~0x606; 361 iismod &= ~(S3C64XX_IISMOD_BLC_MASK | S3C2412_IISMOD_BCLK_MASK);
361 /* Sample size */ 362 /* Sample size */
362 switch (params_format(params)) { 363 switch (params_format(params)) {
363 case SNDRV_PCM_FORMAT_S8: 364 case SNDRV_PCM_FORMAT_S8:
364 /* 8 bit sample, 16fs BCLK */ 365 /* 8 bit sample, 16fs BCLK */
365 iismod |= 0x2004; 366 iismod |= (S3C64XX_IISMOD_BLC_8BIT | S3C2412_IISMOD_BCLK_16FS);
366 break; 367 break;
367 case SNDRV_PCM_FORMAT_S16_LE: 368 case SNDRV_PCM_FORMAT_S16_LE:
368 /* 16 bit sample, 32fs BCLK */ 369 /* 16 bit sample, 32fs BCLK */
369 break; 370 break;
370 case SNDRV_PCM_FORMAT_S24_LE: 371 case SNDRV_PCM_FORMAT_S24_LE:
371 /* 24 bit sample, 48fs BCLK */ 372 /* 24 bit sample, 48fs BCLK */
372 iismod |= 0x4002; 373 iismod |= (S3C64XX_IISMOD_BLC_24BIT | S3C2412_IISMOD_BCLK_48FS);
373 break; 374 break;
374 } 375 }
375#endif 376#endif
@@ -387,6 +388,8 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
387 int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE); 388 int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
388 unsigned long irqs; 389 unsigned long irqs;
389 int ret = 0; 390 int ret = 0;
391 int channel = ((struct s3c24xx_pcm_dma_params *)
392 rtd->dai->cpu_dai->dma_data)->channel;
390 393
391 pr_debug("Entered %s\n", __func__); 394 pr_debug("Entered %s\n", __func__);
392 395
@@ -416,6 +419,14 @@ static int s3c2412_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
416 s3c2412_snd_txctrl(i2s, 1); 419 s3c2412_snd_txctrl(i2s, 1);
417 420
418 local_irq_restore(irqs); 421 local_irq_restore(irqs);
422
423 /*
424 * Load the next buffer to DMA to meet the reqirement
425 * of the auto reload mechanism of S3C24XX.
426 * This call won't bother S3C64XX.
427 */
428 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
429
419 break; 430 break;
420 431
421 case SNDRV_PCM_TRIGGER_STOP: 432 case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/soc/s3c24xx/s3c2443-ac97.c b/sound/soc/s3c24xx/s3c2443-ac97.c
index 3f03d5ddfacd..fc1beb0930b9 100644
--- a/sound/soc/s3c24xx/s3c2443-ac97.c
+++ b/sound/soc/s3c24xx/s3c2443-ac97.c
@@ -47,7 +47,7 @@ static struct s3c24xx_ac97_info s3c24xx_ac97;
47 47
48static DECLARE_COMPLETION(ac97_completion); 48static DECLARE_COMPLETION(ac97_completion);
49static u32 codec_ready; 49static u32 codec_ready;
50static DECLARE_MUTEX(ac97_mutex); 50static DEFINE_MUTEX(ac97_mutex);
51 51
52static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97, 52static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
53 unsigned short reg) 53 unsigned short reg)
@@ -56,7 +56,7 @@ static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
56 u32 ac_codec_cmd; 56 u32 ac_codec_cmd;
57 u32 stat, addr, data; 57 u32 stat, addr, data;
58 58
59 down(&ac97_mutex); 59 mutex_lock(&ac97_mutex);
60 60
61 codec_ready = S3C_AC97_GLBSTAT_CODECREADY; 61 codec_ready = S3C_AC97_GLBSTAT_CODECREADY;
62 ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD); 62 ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
@@ -79,7 +79,7 @@ static unsigned short s3c2443_ac97_read(struct snd_ac97 *ac97,
79 printk(KERN_ERR "s3c24xx-ac97: req addr = %02x," 79 printk(KERN_ERR "s3c24xx-ac97: req addr = %02x,"
80 " rep addr = %02x\n", reg, addr); 80 " rep addr = %02x\n", reg, addr);
81 81
82 up(&ac97_mutex); 82 mutex_unlock(&ac97_mutex);
83 83
84 return (unsigned short)data; 84 return (unsigned short)data;
85} 85}
@@ -90,7 +90,7 @@ static void s3c2443_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
90 u32 ac_glbctrl; 90 u32 ac_glbctrl;
91 u32 ac_codec_cmd; 91 u32 ac_codec_cmd;
92 92
93 down(&ac97_mutex); 93 mutex_lock(&ac97_mutex);
94 94
95 codec_ready = S3C_AC97_GLBSTAT_CODECREADY; 95 codec_ready = S3C_AC97_GLBSTAT_CODECREADY;
96 ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD); 96 ac_codec_cmd = readl(s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
@@ -109,7 +109,7 @@ static void s3c2443_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
109 ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ; 109 ac_codec_cmd |= S3C_AC97_CODEC_CMD_READ;
110 writel(ac_codec_cmd, s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD); 110 writel(ac_codec_cmd, s3c24xx_ac97.regs + S3C_AC97_CODEC_CMD);
111 111
112 up(&ac97_mutex); 112 mutex_unlock(&ac97_mutex);
113 113
114} 114}
115 115
@@ -290,6 +290,9 @@ static int s3c2443_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
290 struct snd_soc_dai *dai) 290 struct snd_soc_dai *dai)
291{ 291{
292 u32 ac_glbctrl; 292 u32 ac_glbctrl;
293 struct snd_soc_pcm_runtime *rtd = substream->private_data;
294 int channel = ((struct s3c24xx_pcm_dma_params *)
295 rtd->dai->cpu_dai->dma_data)->channel;
293 296
294 ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL); 297 ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
295 switch (cmd) { 298 switch (cmd) {
@@ -312,6 +315,8 @@ static int s3c2443_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
312 } 315 }
313 writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL); 316 writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
314 317
318 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
319
315 return 0; 320 return 0;
316} 321}
317 322
@@ -334,6 +339,9 @@ static int s3c2443_ac97_mic_trigger(struct snd_pcm_substream *substream,
334 int cmd, struct snd_soc_dai *dai) 339 int cmd, struct snd_soc_dai *dai)
335{ 340{
336 u32 ac_glbctrl; 341 u32 ac_glbctrl;
342 struct snd_soc_pcm_runtime *rtd = substream->private_data;
343 int channel = ((struct s3c24xx_pcm_dma_params *)
344 rtd->dai->cpu_dai->dma_data)->channel;
337 345
338 ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL); 346 ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
339 switch (cmd) { 347 switch (cmd) {
@@ -349,6 +357,8 @@ static int s3c2443_ac97_mic_trigger(struct snd_pcm_substream *substream,
349 } 357 }
350 writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL); 358 writel(ac_glbctrl, s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
351 359
360 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
361
352 return 0; 362 return 0;
353} 363}
354 364
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/s3c24xx/s3c24xx-i2s.c
index 556e35f0ab73..40e2c4790f0d 100644
--- a/sound/soc/s3c24xx/s3c24xx-i2s.c
+++ b/sound/soc/s3c24xx/s3c24xx-i2s.c
@@ -279,6 +279,9 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
279 struct snd_soc_dai *dai) 279 struct snd_soc_dai *dai)
280{ 280{
281 int ret = 0; 281 int ret = 0;
282 struct snd_soc_pcm_runtime *rtd = substream->private_data;
283 int channel = ((struct s3c24xx_pcm_dma_params *)
284 rtd->dai->cpu_dai->dma_data)->channel;
282 285
283 pr_debug("Entered %s\n", __func__); 286 pr_debug("Entered %s\n", __func__);
284 287
@@ -296,6 +299,8 @@ static int s3c24xx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
296 s3c24xx_snd_rxctrl(1); 299 s3c24xx_snd_rxctrl(1);
297 else 300 else
298 s3c24xx_snd_txctrl(1); 301 s3c24xx_snd_txctrl(1);
302
303 s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STARTED);
299 break; 304 break;
300 case SNDRV_PCM_TRIGGER_STOP: 305 case SNDRV_PCM_TRIGGER_STOP:
301 case SNDRV_PCM_TRIGGER_SUSPEND: 306 case SNDRV_PCM_TRIGGER_SUSPEND:
diff --git a/sound/soc/s3c24xx/s3c24xx-pcm.c b/sound/soc/s3c24xx/s3c24xx-pcm.c
index eecfa5eba06b..5cbbdc80fde3 100644
--- a/sound/soc/s3c24xx/s3c24xx-pcm.c
+++ b/sound/soc/s3c24xx/s3c24xx-pcm.c
@@ -255,7 +255,6 @@ static int s3c24xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
255 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 255 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
256 prtd->state |= ST_RUNNING; 256 prtd->state |= ST_RUNNING;
257 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); 257 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START);
258 s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STARTED);
259 break; 258 break;
260 259
261 case SNDRV_PCM_TRIGGER_STOP: 260 case SNDRV_PCM_TRIGGER_STOP:
@@ -318,6 +317,7 @@ static int s3c24xx_pcm_open(struct snd_pcm_substream *substream)
318 317
319 pr_debug("Entered %s\n", __func__); 318 pr_debug("Entered %s\n", __func__);
320 319
320 snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
321 snd_soc_set_runtime_hwparams(substream, &s3c24xx_pcm_hardware); 321 snd_soc_set_runtime_hwparams(substream, &s3c24xx_pcm_hardware);
322 322
323 prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL); 323 prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL);
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/s3c24xx/s3c24xx_simtec.c
new file mode 100644
index 000000000000..1966e0d5652d
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.c
@@ -0,0 +1,394 @@
1/* sound/soc/s3c24xx/s3c24xx_simtec.c
2 *
3 * Copyright 2009 Simtec Electronics
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/platform_device.h>
13#include <linux/gpio.h>
14#include <linux/clk.h>
15#include <linux/i2c.h>
16
17#include <sound/core.h>
18#include <sound/pcm.h>
19#include <sound/soc.h>
20#include <sound/soc-dapm.h>
21
22#include <plat/audio-simtec.h>
23
24#include "s3c24xx-pcm.h"
25#include "s3c24xx-i2s.h"
26#include "s3c24xx_simtec.h"
27
28static struct s3c24xx_audio_simtec_pdata *pdata;
29static struct clk *xtal_clk;
30
31static int spk_gain;
32static int spk_unmute;
33
34/**
35 * speaker_gain_get - read the speaker gain setting.
36 * @kcontrol: The control for the speaker gain.
37 * @ucontrol: The value that needs to be updated.
38 *
39 * Read the value for the AMP gain control.
40 */
41static int speaker_gain_get(struct snd_kcontrol *kcontrol,
42 struct snd_ctl_elem_value *ucontrol)
43{
44 ucontrol->value.integer.value[0] = spk_gain;
45 return 0;
46}
47
48/**
49 * speaker_gain_set - set the value of the speaker amp gain
50 * @value: The value to write.
51 */
52static void speaker_gain_set(int value)
53{
54 gpio_set_value_cansleep(pdata->amp_gain[0], value & 1);
55 gpio_set_value_cansleep(pdata->amp_gain[1], value >> 1);
56}
57
58/**
59 * speaker_gain_put - set the speaker gain setting.
60 * @kcontrol: The control for the speaker gain.
61 * @ucontrol: The value that needs to be set.
62 *
63 * Set the value of the speaker gain from the specified
64 * @ucontrol setting.
65 *
66 * Note, if the speaker amp is muted, then we do not set a gain value
67 * as at-least one of the ICs that is fitted will try and power up even
68 * if the main control is set to off.
69 */
70static int speaker_gain_put(struct snd_kcontrol *kcontrol,
71 struct snd_ctl_elem_value *ucontrol)
72{
73 int value = ucontrol->value.integer.value[0];
74
75 spk_gain = value;
76
77 if (!spk_unmute)
78 speaker_gain_set(value);
79
80 return 0;
81}
82
83static const struct snd_kcontrol_new amp_gain_controls[] = {
84 SOC_SINGLE_EXT("Speaker Gain", 0, 0, 3, 0,
85 speaker_gain_get, speaker_gain_put),
86};
87
88/**
89 * spk_unmute_state - set the unmute state of the speaker
90 * @to: zero to unmute, non-zero to ununmute.
91 */
92static void spk_unmute_state(int to)
93{
94 pr_debug("%s: to=%d\n", __func__, to);
95
96 spk_unmute = to;
97 gpio_set_value(pdata->amp_gpio, to);
98
99 /* if we're umuting, also re-set the gain */
100 if (to && pdata->amp_gain[0] > 0)
101 speaker_gain_set(spk_gain);
102}
103
104/**
105 * speaker_unmute_get - read the speaker unmute setting.
106 * @kcontrol: The control for the speaker gain.
107 * @ucontrol: The value that needs to be updated.
108 *
109 * Read the value for the AMP gain control.
110 */
111static int speaker_unmute_get(struct snd_kcontrol *kcontrol,
112 struct snd_ctl_elem_value *ucontrol)
113{
114 ucontrol->value.integer.value[0] = spk_unmute;
115 return 0;
116}
117
118/**
119 * speaker_unmute_put - set the speaker unmute setting.
120 * @kcontrol: The control for the speaker gain.
121 * @ucontrol: The value that needs to be set.
122 *
123 * Set the value of the speaker gain from the specified
124 * @ucontrol setting.
125 */
126static int speaker_unmute_put(struct snd_kcontrol *kcontrol,
127 struct snd_ctl_elem_value *ucontrol)
128{
129 spk_unmute_state(ucontrol->value.integer.value[0]);
130 return 0;
131}
132
133/* This is added as a manual control as the speaker amps create clicks
134 * when their power state is changed, which are far more noticeable than
135 * anything produced by the CODEC itself.
136 */
137static const struct snd_kcontrol_new amp_unmute_controls[] = {
138 SOC_SINGLE_EXT("Speaker Switch", 0, 0, 1, 0,
139 speaker_unmute_get, speaker_unmute_put),
140};
141
142void simtec_audio_init(struct snd_soc_codec *codec)
143{
144 if (pdata->amp_gpio > 0) {
145 pr_debug("%s: adding amp routes\n", __func__);
146
147 snd_soc_add_controls(codec, amp_unmute_controls,
148 ARRAY_SIZE(amp_unmute_controls));
149 }
150
151 if (pdata->amp_gain[0] > 0) {
152 pr_debug("%s: adding amp controls\n", __func__);
153 snd_soc_add_controls(codec, amp_gain_controls,
154 ARRAY_SIZE(amp_gain_controls));
155 }
156}
157EXPORT_SYMBOL_GPL(simtec_audio_init);
158
159#define CODEC_CLOCK 12000000
160
161/**
162 * simtec_hw_params - update hardware parameters
163 * @substream: The audio substream instance.
164 * @params: The parameters requested.
165 *
166 * Update the codec data routing and configuration settings
167 * from the supplied data.
168 */
169static int simtec_hw_params(struct snd_pcm_substream *substream,
170 struct snd_pcm_hw_params *params)
171{
172 struct snd_soc_pcm_runtime *rtd = substream->private_data;
173 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
174 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
175 int ret;
176
177 /* Set the CODEC as the bus clock master, I2S */
178 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
179 SND_SOC_DAIFMT_NB_NF |
180 SND_SOC_DAIFMT_CBM_CFM);
181 if (ret) {
182 pr_err("%s: failed set cpu dai format\n", __func__);
183 return ret;
184 }
185
186 /* Set the CODEC as the bus clock master */
187 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
188 SND_SOC_DAIFMT_NB_NF |
189 SND_SOC_DAIFMT_CBM_CFM);
190 if (ret) {
191 pr_err("%s: failed set codec dai format\n", __func__);
192 return ret;
193 }
194
195 ret = snd_soc_dai_set_sysclk(codec_dai, 0,
196 CODEC_CLOCK, SND_SOC_CLOCK_IN);
197 if (ret) {
198 pr_err( "%s: failed setting codec sysclk\n", __func__);
199 return ret;
200 }
201
202 if (pdata->use_mpllin) {
203 ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_MPLL,
204 0, SND_SOC_CLOCK_OUT);
205
206 if (ret) {
207 pr_err("%s: failed to set MPLLin as clksrc\n",
208 __func__);
209 return ret;
210 }
211 }
212
213 if (pdata->output_cdclk) {
214 int cdclk_scale;
215
216 cdclk_scale = clk_get_rate(xtal_clk) / CODEC_CLOCK;
217 cdclk_scale--;
218
219 ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
220 cdclk_scale);
221 }
222
223 return 0;
224}
225
226static int simtec_call_startup(struct s3c24xx_audio_simtec_pdata *pd)
227{
228 /* call any board supplied startup code, this currently only
229 * covers the bast/vr1000 which have a CPLD in the way of the
230 * LRCLK */
231 if (pd->startup)
232 pd->startup();
233
234 return 0;
235}
236
237static struct snd_soc_ops simtec_snd_ops = {
238 .hw_params = simtec_hw_params,
239};
240
241/**
242 * attach_gpio_amp - get and configure the necessary gpios
243 * @dev: The device we're probing.
244 * @pd: The platform data supplied by the board.
245 *
246 * If there is a GPIO based amplifier attached to the board, claim
247 * the necessary GPIO lines for it, and set default values.
248 */
249static int attach_gpio_amp(struct device *dev,
250 struct s3c24xx_audio_simtec_pdata *pd)
251{
252 int ret;
253
254 /* attach gpio amp gain (if any) */
255 if (pdata->amp_gain[0] > 0) {
256 ret = gpio_request(pd->amp_gain[0], "gpio-amp-gain0");
257 if (ret) {
258 dev_err(dev, "cannot get amp gpio gain0\n");
259 return ret;
260 }
261
262 ret = gpio_request(pd->amp_gain[1], "gpio-amp-gain1");
263 if (ret) {
264 dev_err(dev, "cannot get amp gpio gain1\n");
265 gpio_free(pdata->amp_gain[0]);
266 return ret;
267 }
268
269 gpio_direction_output(pd->amp_gain[0], 0);
270 gpio_direction_output(pd->amp_gain[1], 0);
271 }
272
273 /* note, curently we assume GPA0 isn't valid amp */
274 if (pdata->amp_gpio > 0) {
275 ret = gpio_request(pd->amp_gpio, "gpio-amp");
276 if (ret) {
277 dev_err(dev, "cannot get amp gpio %d (%d)\n",
278 pd->amp_gpio, ret);
279 goto err_amp;
280 }
281
282 /* set the amp off at startup */
283 spk_unmute_state(0);
284 }
285
286 return 0;
287
288err_amp:
289 if (pd->amp_gain[0] > 0) {
290 gpio_free(pd->amp_gain[0]);
291 gpio_free(pd->amp_gain[1]);
292 }
293
294 return ret;
295}
296
297static void detach_gpio_amp(struct s3c24xx_audio_simtec_pdata *pd)
298{
299 if (pd->amp_gain[0] > 0) {
300 gpio_free(pd->amp_gain[0]);
301 gpio_free(pd->amp_gain[1]);
302 }
303
304 if (pd->amp_gpio > 0)
305 gpio_free(pd->amp_gpio);
306}
307
308#ifdef CONFIG_PM
309int simtec_audio_resume(struct device *dev)
310{
311 simtec_call_startup(pdata);
312 return 0;
313}
314
315struct dev_pm_ops simtec_audio_pmops = {
316 .resume = simtec_audio_resume,
317};
318EXPORT_SYMBOL_GPL(simtec_audio_pmops);
319#endif
320
321int __devinit simtec_audio_core_probe(struct platform_device *pdev,
322 struct snd_soc_device *socdev)
323{
324 struct platform_device *snd_dev;
325 int ret;
326
327 socdev->card->dai_link->ops = &simtec_snd_ops;
328
329 pdata = pdev->dev.platform_data;
330 if (!pdata) {
331 dev_err(&pdev->dev, "no platform data supplied\n");
332 return -EINVAL;
333 }
334
335 simtec_call_startup(pdata);
336
337 xtal_clk = clk_get(&pdev->dev, "xtal");
338 if (IS_ERR(xtal_clk)) {
339 dev_err(&pdev->dev, "could not get clkout0\n");
340 return -EINVAL;
341 }
342
343 dev_info(&pdev->dev, "xtal rate is %ld\n", clk_get_rate(xtal_clk));
344
345 ret = attach_gpio_amp(&pdev->dev, pdata);
346 if (ret)
347 goto err_clk;
348
349 snd_dev = platform_device_alloc("soc-audio", -1);
350 if (!snd_dev) {
351 dev_err(&pdev->dev, "failed to alloc soc-audio devicec\n");
352 ret = -ENOMEM;
353 goto err_gpio;
354 }
355
356 platform_set_drvdata(snd_dev, socdev);
357 socdev->dev = &snd_dev->dev;
358
359 ret = platform_device_add(snd_dev);
360 if (ret) {
361 dev_err(&pdev->dev, "failed to add soc-audio dev\n");
362 goto err_pdev;
363 }
364
365 platform_set_drvdata(pdev, snd_dev);
366 return 0;
367
368err_pdev:
369 platform_device_put(snd_dev);
370
371err_gpio:
372 detach_gpio_amp(pdata);
373
374err_clk:
375 clk_put(xtal_clk);
376 return ret;
377}
378EXPORT_SYMBOL_GPL(simtec_audio_core_probe);
379
380int __devexit simtec_audio_remove(struct platform_device *pdev)
381{
382 struct platform_device *snd_dev = platform_get_drvdata(pdev);
383
384 platform_device_unregister(snd_dev);
385
386 detach_gpio_amp(pdata);
387 clk_put(xtal_clk);
388 return 0;
389}
390EXPORT_SYMBOL_GPL(simtec_audio_remove);
391
392MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
393MODULE_DESCRIPTION("ALSA SoC Simtec Audio common support");
394MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/s3c24xx/s3c24xx_simtec.h
new file mode 100644
index 000000000000..2714203af161
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec.h
@@ -0,0 +1,22 @@
1/* sound/soc/s3c24xx/s3c24xx_simtec.h
2 *
3 * Copyright 2009 Simtec Electronics
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10extern void simtec_audio_init(struct snd_soc_codec *codec);
11
12extern int simtec_audio_core_probe(struct platform_device *pdev,
13 struct snd_soc_device *socdev);
14
15extern int simtec_audio_remove(struct platform_device *pdev);
16
17#ifdef CONFIG_PM
18extern struct dev_pm_ops simtec_audio_pmops;
19#define simtec_audio_pm &simtec_audio_pmops
20#else
21#define simtec_audio_pm NULL
22#endif
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
new file mode 100644
index 000000000000..8346bd96eaf5
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
@@ -0,0 +1,153 @@
1/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
2 *
3 * Copyright 2009 Simtec Electronics
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10#include <linux/module.h>
11#include <linux/clk.h>
12#include <linux/platform_device.h>
13
14#include <sound/core.h>
15#include <sound/pcm.h>
16#include <sound/soc.h>
17#include <sound/soc-dapm.h>
18
19#include <plat/audio-simtec.h>
20
21#include "s3c24xx-pcm.h"
22#include "s3c24xx-i2s.h"
23#include "s3c24xx_simtec.h"
24
25#include "../codecs/tlv320aic3x.h"
26
27static const struct snd_soc_dapm_widget dapm_widgets[] = {
28 SND_SOC_DAPM_LINE("GSM Out", NULL),
29 SND_SOC_DAPM_LINE("GSM In", NULL),
30 SND_SOC_DAPM_LINE("Line In", NULL),
31 SND_SOC_DAPM_LINE("Line Out", NULL),
32 SND_SOC_DAPM_LINE("ZV", NULL),
33 SND_SOC_DAPM_MIC("Mic Jack", NULL),
34 SND_SOC_DAPM_HP("Headphone Jack", NULL),
35};
36
37static const struct snd_soc_dapm_route base_map[] = {
38 /* Headphone connected to HP{L,R}OUT and HP{L,R}COM */
39
40 { "Headphone Jack", NULL, "HPLOUT" },
41 { "Headphone Jack", NULL, "HPLCOM" },
42 { "Headphone Jack", NULL, "HPROUT" },
43 { "Headphone Jack", NULL, "HPRCOM" },
44
45 /* ZV connected to Line1 */
46
47 { "LINE1L", NULL, "ZV" },
48 { "LINE1R", NULL, "ZV" },
49
50 /* Line In connected to Line2 */
51
52 { "LINE2L", NULL, "Line In" },
53 { "LINE2R", NULL, "Line In" },
54
55 /* Microphone connected to MIC3R and MIC_BIAS */
56
57 { "MIC3L", NULL, "Mic Jack" },
58
59 /* GSM connected to MONO_LOUT and MIC3L (in) */
60
61 { "GSM Out", NULL, "MONO_LOUT" },
62 { "MIC3L", NULL, "GSM In" },
63
64 /* Speaker is connected to LINEOUT{LN,LP,RN,RP}, however we are
65 * not using the DAPM to power it up and down as there it makes
66 * a click when powering up. */
67};
68
69/**
70 * simtec_hermes_init - initialise and add controls
71 * @codec; The codec instance to attach to.
72 *
73 * Attach our controls and configure the necessary codec
74 * mappings for our sound card instance.
75*/
76static int simtec_hermes_init(struct snd_soc_codec *codec)
77{
78 snd_soc_dapm_new_controls(codec, dapm_widgets,
79 ARRAY_SIZE(dapm_widgets));
80
81 snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
82
83 snd_soc_dapm_enable_pin(codec, "Headphone Jack");
84 snd_soc_dapm_enable_pin(codec, "Line In");
85 snd_soc_dapm_enable_pin(codec, "Line Out");
86 snd_soc_dapm_enable_pin(codec, "Mic Jack");
87
88 simtec_audio_init(codec);
89 snd_soc_dapm_sync(codec);
90
91 return 0;
92}
93
94static struct aic3x_setup_data codec_setup = {
95};
96
97static struct snd_soc_dai_link simtec_dai_aic33 = {
98 .name = "tlv320aic33",
99 .stream_name = "TLV320AIC33",
100 .cpu_dai = &s3c24xx_i2s_dai,
101 .codec_dai = &aic3x_dai,
102 .init = simtec_hermes_init,
103};
104
105/* simtec audio machine driver */
106static struct snd_soc_card snd_soc_machine_simtec_aic33 = {
107 .name = "Simtec-Hermes",
108 .platform = &s3c24xx_soc_platform,
109 .dai_link = &simtec_dai_aic33,
110 .num_links = 1,
111};
112
113/* simtec audio subsystem */
114static struct snd_soc_device simtec_snd_devdata_aic33 = {
115 .card = &snd_soc_machine_simtec_aic33,
116 .codec_dev = &soc_codec_dev_aic3x,
117 .codec_data = &codec_setup,
118};
119
120static int __devinit simtec_audio_hermes_probe(struct platform_device *pd)
121{
122 dev_info(&pd->dev, "probing....\n");
123 return simtec_audio_core_probe(pd, &simtec_snd_devdata_aic33);
124}
125
126static struct platform_driver simtec_audio_hermes_platdrv = {
127 .driver = {
128 .owner = THIS_MODULE,
129 .name = "s3c24xx-simtec-hermes-snd",
130 .pm = simtec_audio_pm,
131 },
132 .probe = simtec_audio_hermes_probe,
133 .remove = __devexit_p(simtec_audio_remove),
134};
135
136MODULE_ALIAS("platform:s3c24xx-simtec-hermes-snd");
137
138static int __init simtec_hermes_modinit(void)
139{
140 return platform_driver_register(&simtec_audio_hermes_platdrv);
141}
142
143static void __exit simtec_hermes_modexit(void)
144{
145 platform_driver_unregister(&simtec_audio_hermes_platdrv);
146}
147
148module_init(simtec_hermes_modinit);
149module_exit(simtec_hermes_modexit);
150
151MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
152MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
153MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
new file mode 100644
index 000000000000..25797e096175
--- /dev/null
+++ b/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
@@ -0,0 +1,137 @@
1/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
2 *
3 * Copyright 2009 Simtec Electronics
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8*/
9
10#include <linux/module.h>
11#include <linux/clk.h>
12#include <linux/platform_device.h>
13
14#include <sound/core.h>
15#include <sound/pcm.h>
16#include <sound/soc.h>
17#include <sound/soc-dapm.h>
18
19#include <plat/audio-simtec.h>
20
21#include "s3c24xx-pcm.h"
22#include "s3c24xx-i2s.h"
23#include "s3c24xx_simtec.h"
24
25#include "../codecs/tlv320aic23.h"
26
27/* supported machines:
28 *
29 * Machine Connections AMP
30 * ------- ----------- ---
31 * BAST MIC, HPOUT, LOUT, LIN TPA2001D1 (HPOUTL,R) (gain hardwired)
32 * VR1000 HPOUT, LIN None
33 * VR2000 LIN, LOUT, MIC, HP LM4871 (HPOUTL,R)
34 * DePicture LIN, LOUT, MIC, HP LM4871 (HPOUTL,R)
35 * Anubis LIN, LOUT, MIC, HP TPA2001D1 (HPOUTL,R)
36 */
37
38static const struct snd_soc_dapm_widget dapm_widgets[] = {
39 SND_SOC_DAPM_HP("Headphone Jack", NULL),
40 SND_SOC_DAPM_LINE("Line In", NULL),
41 SND_SOC_DAPM_LINE("Line Out", NULL),
42 SND_SOC_DAPM_MIC("Mic Jack", NULL),
43};
44
45static const struct snd_soc_dapm_route base_map[] = {
46 { "Headphone Jack", NULL, "LHPOUT"},
47 { "Headphone Jack", NULL, "RHPOUT"},
48
49 { "Line Out", NULL, "LOUT" },
50 { "Line Out", NULL, "ROUT" },
51
52 { "LLINEIN", NULL, "Line In"},
53 { "RLINEIN", NULL, "Line In"},
54
55 { "MICIN", NULL, "Mic Jack"},
56};
57
58/**
59 * simtec_tlv320aic23_init - initialise and add controls
60 * @codec; The codec instance to attach to.
61 *
62 * Attach our controls and configure the necessary codec
63 * mappings for our sound card instance.
64*/
65static int simtec_tlv320aic23_init(struct snd_soc_codec *codec)
66{
67 snd_soc_dapm_new_controls(codec, dapm_widgets,
68 ARRAY_SIZE(dapm_widgets));
69
70 snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
71
72 snd_soc_dapm_enable_pin(codec, "Headphone Jack");
73 snd_soc_dapm_enable_pin(codec, "Line In");
74 snd_soc_dapm_enable_pin(codec, "Line Out");
75 snd_soc_dapm_enable_pin(codec, "Mic Jack");
76
77 simtec_audio_init(codec);
78 snd_soc_dapm_sync(codec);
79
80 return 0;
81}
82
83static struct snd_soc_dai_link simtec_dai_aic23 = {
84 .name = "tlv320aic23",
85 .stream_name = "TLV320AIC23",
86 .cpu_dai = &s3c24xx_i2s_dai,
87 .codec_dai = &tlv320aic23_dai,
88 .init = simtec_tlv320aic23_init,
89};
90
91/* simtec audio machine driver */
92static struct snd_soc_card snd_soc_machine_simtec_aic23 = {
93 .name = "Simtec",
94 .platform = &s3c24xx_soc_platform,
95 .dai_link = &simtec_dai_aic23,
96 .num_links = 1,
97};
98
99/* simtec audio subsystem */
100static struct snd_soc_device simtec_snd_devdata_aic23 = {
101 .card = &snd_soc_machine_simtec_aic23,
102 .codec_dev = &soc_codec_dev_tlv320aic23,
103};
104
105static int __devinit simtec_audio_tlv320aic23_probe(struct platform_device *pd)
106{
107 return simtec_audio_core_probe(pd, &simtec_snd_devdata_aic23);
108}
109
110static struct platform_driver simtec_audio_tlv320aic23_platdrv = {
111 .driver = {
112 .owner = THIS_MODULE,
113 .name = "s3c24xx-simtec-tlv320aic23",
114 .pm = simtec_audio_pm,
115 },
116 .probe = simtec_audio_tlv320aic23_probe,
117 .remove = __devexit_p(simtec_audio_remove),
118};
119
120MODULE_ALIAS("platform:s3c24xx-simtec-tlv320aic23");
121
122static int __init simtec_tlv320aic23_modinit(void)
123{
124 return platform_driver_register(&simtec_audio_tlv320aic23_platdrv);
125}
126
127static void __exit simtec_tlv320aic23_modexit(void)
128{
129 platform_driver_unregister(&simtec_audio_tlv320aic23_platdrv);
130}
131
132module_init(simtec_tlv320aic23_modinit);
133module_exit(simtec_tlv320aic23_modexit);
134
135MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
136MODULE_DESCRIPTION("ALSA SoC Simtec Audio support");
137MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index b5f95f9781c1..c1b40ac22c05 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/i2c.h>
17#include <sound/core.h> 18#include <sound/core.h>
18#include <sound/pcm.h> 19#include <sound/pcm.h>
19#include <sound/soc.h> 20#include <sound/soc.h>
@@ -189,8 +190,6 @@ static struct snd_soc_card snd_soc_card_s6105 = {
189 190
190/* s6105 audio private data */ 191/* s6105 audio private data */
191static struct aic3x_setup_data s6105_aic3x_setup = { 192static struct aic3x_setup_data s6105_aic3x_setup = {
192 .i2c_bus = 0,
193 .i2c_address = 0x18,
194}; 193};
195 194
196/* s6105 audio subsystem */ 195/* s6105 audio subsystem */
@@ -211,10 +210,19 @@ static struct s6000_snd_platform_data __initdata s6105_snd_data = {
211 210
212static struct platform_device *s6105_snd_device; 211static struct platform_device *s6105_snd_device;
213 212
213/* temporary i2c device creation until this can be moved into the machine
214 * support file.
215*/
216static struct i2c_board_info i2c_device[] = {
217 { I2C_BOARD_INFO("tlv320aic33", 0x18), }
218};
219
214static int __init s6105_init(void) 220static int __init s6105_init(void)
215{ 221{
216 int ret; 222 int ret;
217 223
224 i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
225
218 s6105_snd_device = platform_device_alloc("soc-audio", -1); 226 s6105_snd_device = platform_device_alloc("soc-audio", -1);
219 if (!s6105_snd_device) 227 if (!s6105_snd_device)
220 return -ENOMEM; 228 return -ENOMEM;
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 54bd604012af..9154b4363db3 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -20,7 +20,12 @@ config SND_SOC_SH4_HAC
20config SND_SOC_SH4_SSI 20config SND_SOC_SH4_SSI
21 tristate 21 tristate
22 22
23 23config SND_SOC_SH4_FSI
24 tristate "SH4 FSI support"
25 depends on CPU_SUBTYPE_SH7724
26 select SH_DMA
27 help
28 This option enables FSI sound support
24 29
25## 30##
26## Boards 31## Boards
@@ -35,4 +40,12 @@ config SND_SH7760_AC97
35 This option enables generic sound support for the first 40 This option enables generic sound support for the first
36 AC97 unit of the SH7760. 41 AC97 unit of the SH7760.
37 42
43config SND_FSI_AK4642
44 bool "FSI-AK4642 sound support"
45 depends on SND_SOC_SH4_FSI
46 select SND_SOC_AK4642
47 help
48 This option enables generic sound support for the
49 FSI - AK4642 unit
50
38endmenu 51endmenu
diff --git a/sound/soc/sh/Makefile b/sound/soc/sh/Makefile
index a8e8ab81cc6a..a6997872f24e 100644
--- a/sound/soc/sh/Makefile
+++ b/sound/soc/sh/Makefile
@@ -5,10 +5,14 @@ obj-$(CONFIG_SND_SOC_PCM_SH7760) += snd-soc-dma-sh7760.o
5## audio units found on some SH-4 5## audio units found on some SH-4
6snd-soc-hac-objs := hac.o 6snd-soc-hac-objs := hac.o
7snd-soc-ssi-objs := ssi.o 7snd-soc-ssi-objs := ssi.o
8snd-soc-fsi-objs := fsi.o
8obj-$(CONFIG_SND_SOC_SH4_HAC) += snd-soc-hac.o 9obj-$(CONFIG_SND_SOC_SH4_HAC) += snd-soc-hac.o
9obj-$(CONFIG_SND_SOC_SH4_SSI) += snd-soc-ssi.o 10obj-$(CONFIG_SND_SOC_SH4_SSI) += snd-soc-ssi.o
11obj-$(CONFIG_SND_SOC_SH4_FSI) += snd-soc-fsi.o
10 12
11## boards 13## boards
12snd-soc-sh7760-ac97-objs := sh7760-ac97.o 14snd-soc-sh7760-ac97-objs := sh7760-ac97.o
15snd-soc-fsi-ak4642-objs := fsi-ak4642.o
13 16
14obj-$(CONFIG_SND_SH7760_AC97) += snd-soc-sh7760-ac97.o 17obj-$(CONFIG_SND_SH7760_AC97) += snd-soc-sh7760-ac97.o
18obj-$(CONFIG_SND_FSI_AK4642) += snd-soc-fsi-ak4642.o
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
new file mode 100644
index 000000000000..c7af09729c6e
--- /dev/null
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -0,0 +1,107 @@
1/*
2 * FSI-AK464x sound support for ms7724se
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/platform_device.h>
15#include <linux/i2c.h>
16#include <linux/io.h>
17#include <sound/core.h>
18#include <sound/pcm.h>
19#include <sound/soc.h>
20#include <sound/soc-dapm.h>
21
22#include <sound/sh_fsi.h>
23#include <../sound/soc/codecs/ak4642.h>
24
25static struct snd_soc_dai_link fsi_dai_link = {
26 .name = "AK4642",
27 .stream_name = "AK4642",
28 .cpu_dai = &fsi_soc_dai[0], /* fsi */
29 .codec_dai = &ak4642_dai,
30 .ops = NULL,
31};
32
33static struct snd_soc_card fsi_soc_card = {
34 .name = "FSI",
35 .platform = &fsi_soc_platform,
36 .dai_link = &fsi_dai_link,
37 .num_links = 1,
38};
39
40static struct snd_soc_device fsi_snd_devdata = {
41 .card = &fsi_soc_card,
42 .codec_dev = &soc_codec_dev_ak4642,
43};
44
45#define AK4642_BUS 0
46#define AK4642_ADR 0x12
47static int ak4642_add_i2c_device(void)
48{
49 struct i2c_board_info info;
50 struct i2c_adapter *adapter;
51 struct i2c_client *client;
52
53 memset(&info, 0, sizeof(struct i2c_board_info));
54 info.addr = AK4642_ADR;
55 strlcpy(info.type, "ak4642", I2C_NAME_SIZE);
56
57 adapter = i2c_get_adapter(AK4642_BUS);
58 if (!adapter) {
59 printk(KERN_DEBUG "can't get i2c adapter\n");
60 return -ENODEV;
61 }
62
63 client = i2c_new_device(adapter, &info);
64 i2c_put_adapter(adapter);
65 if (!client) {
66 printk(KERN_DEBUG "can't add i2c device\n");
67 return -ENODEV;
68 }
69
70 return 0;
71}
72
73static struct platform_device *fsi_snd_device;
74
75static int __init fsi_ak4642_init(void)
76{
77 int ret = -ENOMEM;
78
79 ak4642_add_i2c_device();
80
81 fsi_snd_device = platform_device_alloc("soc-audio", -1);
82 if (!fsi_snd_device)
83 goto out;
84
85 platform_set_drvdata(fsi_snd_device,
86 &fsi_snd_devdata);
87 fsi_snd_devdata.dev = &fsi_snd_device->dev;
88 ret = platform_device_add(fsi_snd_device);
89
90 if (ret)
91 platform_device_put(fsi_snd_device);
92
93out:
94 return ret;
95}
96
97static void __exit fsi_ak4642_exit(void)
98{
99 platform_device_unregister(fsi_snd_device);
100}
101
102module_init(fsi_ak4642_init);
103module_exit(fsi_ak4642_exit);
104
105MODULE_LICENSE("GPL");
106MODULE_DESCRIPTION("Generic SH4 FSI-AK4642 sound card");
107MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
new file mode 100644
index 000000000000..44123248b630
--- /dev/null
+++ b/sound/soc/sh/fsi.c
@@ -0,0 +1,1004 @@
1/*
2 * Fifo-attached Serial Interface (FSI) support for SH7724
3 *
4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on ssi.c
8 * Copyright (c) 2007 Manuel Lauss <mano@roarinelk.homelinux.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/delay.h>
19#include <linux/list.h>
20#include <linux/clk.h>
21#include <linux/io.h>
22#include <sound/core.h>
23#include <sound/pcm.h>
24#include <sound/initval.h>
25#include <sound/soc.h>
26#include <sound/pcm_params.h>
27#include <sound/sh_fsi.h>
28#include <asm/atomic.h>
29#include <asm/dma.h>
30#include <asm/dma-sh.h>
31
32#define DO_FMT 0x0000
33#define DOFF_CTL 0x0004
34#define DOFF_ST 0x0008
35#define DI_FMT 0x000C
36#define DIFF_CTL 0x0010
37#define DIFF_ST 0x0014
38#define CKG1 0x0018
39#define CKG2 0x001C
40#define DIDT 0x0020
41#define DODT 0x0024
42#define MUTE_ST 0x0028
43#define REG_END MUTE_ST
44
45#define INT_ST 0x0200
46#define IEMSK 0x0204
47#define IMSK 0x0208
48#define MUTE 0x020C
49#define CLK_RST 0x0210
50#define SOFT_RST 0x0214
51#define MREG_START INT_ST
52#define MREG_END SOFT_RST
53
54/* DO_FMT */
55/* DI_FMT */
56#define CR_FMT(param) ((param) << 4)
57# define CR_MONO 0x0
58# define CR_MONO_D 0x1
59# define CR_PCM 0x2
60# define CR_I2S 0x3
61# define CR_TDM 0x4
62# define CR_TDM_D 0x5
63
64/* DOFF_CTL */
65/* DIFF_CTL */
66#define IRQ_HALF 0x00100000
67#define FIFO_CLR 0x00000001
68
69/* DOFF_ST */
70#define ERR_OVER 0x00000010
71#define ERR_UNDER 0x00000001
72
73/* CLK_RST */
74#define B_CLK 0x00000010
75#define A_CLK 0x00000001
76
77/* INT_ST */
78#define INT_B_IN (1 << 12)
79#define INT_B_OUT (1 << 8)
80#define INT_A_IN (1 << 4)
81#define INT_A_OUT (1 << 0)
82
83#define FSI_RATES SNDRV_PCM_RATE_8000_96000
84
85#define FSI_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
86
87/************************************************************************
88
89
90 struct
91
92
93************************************************************************/
94struct fsi_priv {
95 void __iomem *base;
96 struct snd_pcm_substream *substream;
97
98 int fifo_max;
99 int chan;
100 int dma_chan;
101
102 int byte_offset;
103 int period_len;
104 int buffer_len;
105 int periods;
106};
107
108struct fsi_master {
109 void __iomem *base;
110 int irq;
111 struct clk *clk;
112 struct fsi_priv fsia;
113 struct fsi_priv fsib;
114 struct sh_fsi_platform_info *info;
115};
116
117static struct fsi_master *master;
118
119/************************************************************************
120
121
122 basic read write function
123
124
125************************************************************************/
126static int __fsi_reg_write(u32 reg, u32 data)
127{
128 /* valid data area is 24bit */
129 data &= 0x00ffffff;
130
131 return ctrl_outl(data, reg);
132}
133
134static u32 __fsi_reg_read(u32 reg)
135{
136 return ctrl_inl(reg);
137}
138
139static int __fsi_reg_mask_set(u32 reg, u32 mask, u32 data)
140{
141 u32 val = __fsi_reg_read(reg);
142
143 val &= ~mask;
144 val |= data & mask;
145
146 return __fsi_reg_write(reg, val);
147}
148
149static int fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data)
150{
151 if (reg > REG_END)
152 return -1;
153
154 return __fsi_reg_write((u32)(fsi->base + reg), data);
155}
156
157static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg)
158{
159 if (reg > REG_END)
160 return 0;
161
162 return __fsi_reg_read((u32)(fsi->base + reg));
163}
164
165static int fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
166{
167 if (reg > REG_END)
168 return -1;
169
170 return __fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
171}
172
173static int fsi_master_write(u32 reg, u32 data)
174{
175 if ((reg < MREG_START) ||
176 (reg > MREG_END))
177 return -1;
178
179 return __fsi_reg_write((u32)(master->base + reg), data);
180}
181
182static u32 fsi_master_read(u32 reg)
183{
184 if ((reg < MREG_START) ||
185 (reg > MREG_END))
186 return 0;
187
188 return __fsi_reg_read((u32)(master->base + reg));
189}
190
191static int fsi_master_mask_set(u32 reg, u32 mask, u32 data)
192{
193 if ((reg < MREG_START) ||
194 (reg > MREG_END))
195 return -1;
196
197 return __fsi_reg_mask_set((u32)(master->base + reg), mask, data);
198}
199
200/************************************************************************
201
202
203 basic function
204
205
206************************************************************************/
207static struct fsi_priv *fsi_get(struct snd_pcm_substream *substream)
208{
209 struct snd_soc_pcm_runtime *rtd;
210 struct fsi_priv *fsi = NULL;
211
212 if (!substream || !master)
213 return NULL;
214
215 rtd = substream->private_data;
216 switch (rtd->dai->cpu_dai->id) {
217 case 0:
218 fsi = &master->fsia;
219 break;
220 case 1:
221 fsi = &master->fsib;
222 break;
223 }
224
225 return fsi;
226}
227
228static int fsi_is_port_a(struct fsi_priv *fsi)
229{
230 /* return
231 * 1 : port a
232 * 0 : port b
233 */
234
235 if (fsi == &master->fsia)
236 return 1;
237
238 return 0;
239}
240
241static u32 fsi_get_info_flags(struct fsi_priv *fsi)
242{
243 int is_porta = fsi_is_port_a(fsi);
244
245 return is_porta ? master->info->porta_flags :
246 master->info->portb_flags;
247}
248
249static int fsi_is_master_mode(struct fsi_priv *fsi, int is_play)
250{
251 u32 mode;
252 u32 flags = fsi_get_info_flags(fsi);
253
254 mode = is_play ? SH_FSI_OUT_SLAVE_MODE : SH_FSI_IN_SLAVE_MODE;
255
256 /* return
257 * 1 : master mode
258 * 0 : slave mode
259 */
260
261 return (mode & flags) != mode;
262}
263
264static u32 fsi_port_ab_io_bit(struct fsi_priv *fsi, int is_play)
265{
266 int is_porta = fsi_is_port_a(fsi);
267 u32 data;
268
269 if (is_porta)
270 data = is_play ? (1 << 0) : (1 << 4);
271 else
272 data = is_play ? (1 << 8) : (1 << 12);
273
274 return data;
275}
276
277static void fsi_stream_push(struct fsi_priv *fsi,
278 struct snd_pcm_substream *substream,
279 u32 buffer_len,
280 u32 period_len)
281{
282 fsi->substream = substream;
283 fsi->buffer_len = buffer_len;
284 fsi->period_len = period_len;
285 fsi->byte_offset = 0;
286 fsi->periods = 0;
287}
288
289static void fsi_stream_pop(struct fsi_priv *fsi)
290{
291 fsi->substream = NULL;
292 fsi->buffer_len = 0;
293 fsi->period_len = 0;
294 fsi->byte_offset = 0;
295 fsi->periods = 0;
296}
297
298static int fsi_get_fifo_residue(struct fsi_priv *fsi, int is_play)
299{
300 u32 status;
301 u32 reg = is_play ? DOFF_ST : DIFF_ST;
302 int residue;
303
304 status = fsi_reg_read(fsi, reg);
305 residue = 0x1ff & (status >> 8);
306 residue *= fsi->chan;
307
308 return residue;
309}
310
311static int fsi_get_residue(struct fsi_priv *fsi, int is_play)
312{
313 int residue;
314 int width;
315 struct snd_pcm_runtime *runtime;
316
317 runtime = fsi->substream->runtime;
318
319 /* get 1 channel data width */
320 width = frames_to_bytes(runtime, 1) / fsi->chan;
321
322 if (2 == width)
323 residue = fsi_get_fifo_residue(fsi, is_play);
324 else
325 residue = get_dma_residue(fsi->dma_chan);
326
327 return residue;
328}
329
330/************************************************************************
331
332
333 basic dma function
334
335
336************************************************************************/
337#define PORTA_DMA 0
338#define PORTB_DMA 1
339
340static int fsi_get_dma_chan(void)
341{
342 if (0 != request_dma(PORTA_DMA, "fsia"))
343 return -EIO;
344
345 if (0 != request_dma(PORTB_DMA, "fsib")) {
346 free_dma(PORTA_DMA);
347 return -EIO;
348 }
349
350 master->fsia.dma_chan = PORTA_DMA;
351 master->fsib.dma_chan = PORTB_DMA;
352
353 return 0;
354}
355
356static void fsi_free_dma_chan(void)
357{
358 dma_wait_for_completion(PORTA_DMA);
359 dma_wait_for_completion(PORTB_DMA);
360 free_dma(PORTA_DMA);
361 free_dma(PORTB_DMA);
362
363 master->fsia.dma_chan = -1;
364 master->fsib.dma_chan = -1;
365}
366
367/************************************************************************
368
369
370 ctrl function
371
372
373************************************************************************/
374static void fsi_irq_enable(struct fsi_priv *fsi, int is_play)
375{
376 u32 data = fsi_port_ab_io_bit(fsi, is_play);
377
378 fsi_master_mask_set(IMSK, data, data);
379 fsi_master_mask_set(IEMSK, data, data);
380}
381
382static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
383{
384 u32 data = fsi_port_ab_io_bit(fsi, is_play);
385
386 fsi_master_mask_set(IMSK, data, 0);
387 fsi_master_mask_set(IEMSK, data, 0);
388}
389
390static void fsi_clk_ctrl(struct fsi_priv *fsi, int enable)
391{
392 u32 val = fsi_is_port_a(fsi) ? (1 << 0) : (1 << 4);
393
394 if (enable)
395 fsi_master_mask_set(CLK_RST, val, val);
396 else
397 fsi_master_mask_set(CLK_RST, val, 0);
398}
399
400static void fsi_irq_init(struct fsi_priv *fsi, int is_play)
401{
402 u32 data;
403 u32 ctrl;
404
405 data = fsi_port_ab_io_bit(fsi, is_play);
406 ctrl = is_play ? DOFF_CTL : DIFF_CTL;
407
408 /* set IMSK */
409 fsi_irq_disable(fsi, is_play);
410
411 /* set interrupt generation factor */
412 fsi_reg_write(fsi, ctrl, IRQ_HALF);
413
414 /* clear FIFO */
415 fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
416
417 /* clear interrupt factor */
418 fsi_master_mask_set(INT_ST, data, 0);
419}
420
421static void fsi_soft_all_reset(void)
422{
423 u32 status = fsi_master_read(SOFT_RST);
424
425 /* port AB reset */
426 status &= 0x000000ff;
427 fsi_master_write(SOFT_RST, status);
428 mdelay(10);
429
430 /* soft reset */
431 status &= 0x000000f0;
432 fsi_master_write(SOFT_RST, status);
433 status |= 0x00000001;
434 fsi_master_write(SOFT_RST, status);
435 mdelay(10);
436}
437
438static void fsi_16data_push(struct fsi_priv *fsi,
439 struct snd_pcm_runtime *runtime,
440 int send)
441{
442 u16 *dma_start;
443 u32 snd;
444 int i;
445
446 /* get dma start position for FSI */
447 dma_start = (u16 *)runtime->dma_area;
448 dma_start += fsi->byte_offset / 2;
449
450 /*
451 * soft dma
452 * FSI can not use DMA when 16bpp
453 */
454 for (i = 0; i < send; i++) {
455 snd = (u32)dma_start[i];
456 fsi_reg_write(fsi, DODT, snd << 8);
457 }
458}
459
460static void fsi_32data_push(struct fsi_priv *fsi,
461 struct snd_pcm_runtime *runtime,
462 int send)
463{
464 u32 *dma_start;
465
466 /* get dma start position for FSI */
467 dma_start = (u32 *)runtime->dma_area;
468 dma_start += fsi->byte_offset / 4;
469
470 dma_wait_for_completion(fsi->dma_chan);
471 dma_configure_channel(fsi->dma_chan, (SM_INC|0x400|TS_32|TM_BUR));
472 dma_write(fsi->dma_chan, (u32)dma_start,
473 (u32)(fsi->base + DODT), send * 4);
474}
475
476/* playback interrupt */
477static int fsi_data_push(struct fsi_priv *fsi)
478{
479 struct snd_pcm_runtime *runtime;
480 struct snd_pcm_substream *substream = NULL;
481 int send;
482 int fifo_free;
483 int width;
484
485 if (!fsi ||
486 !fsi->substream ||
487 !fsi->substream->runtime)
488 return -EINVAL;
489
490 runtime = fsi->substream->runtime;
491
492 /* FSI FIFO has limit.
493 * So, this driver can not send periods data at a time
494 */
495 if (fsi->byte_offset >=
496 fsi->period_len * (fsi->periods + 1)) {
497
498 substream = fsi->substream;
499 fsi->periods = (fsi->periods + 1) % runtime->periods;
500
501 if (0 == fsi->periods)
502 fsi->byte_offset = 0;
503 }
504
505 /* get 1 channel data width */
506 width = frames_to_bytes(runtime, 1) / fsi->chan;
507
508 /* get send size for alsa */
509 send = (fsi->buffer_len - fsi->byte_offset) / width;
510
511 /* get FIFO free size */
512 fifo_free = (fsi->fifo_max * fsi->chan) - fsi_get_fifo_residue(fsi, 1);
513
514 /* size check */
515 if (fifo_free < send)
516 send = fifo_free;
517
518 if (2 == width)
519 fsi_16data_push(fsi, runtime, send);
520 else if (4 == width)
521 fsi_32data_push(fsi, runtime, send);
522 else
523 return -EINVAL;
524
525 fsi->byte_offset += send * width;
526
527 fsi_irq_enable(fsi, 1);
528
529 if (substream)
530 snd_pcm_period_elapsed(substream);
531
532 return 0;
533}
534
535static irqreturn_t fsi_interrupt(int irq, void *data)
536{
537 u32 status = fsi_master_read(SOFT_RST) & ~0x00000010;
538 u32 int_st = fsi_master_read(INT_ST);
539
540 /* clear irq status */
541 fsi_master_write(SOFT_RST, status);
542 fsi_master_write(SOFT_RST, status | 0x00000010);
543
544 if (int_st & INT_A_OUT)
545 fsi_data_push(&master->fsia);
546 if (int_st & INT_B_OUT)
547 fsi_data_push(&master->fsib);
548
549 fsi_master_write(INT_ST, 0x0000000);
550
551 return IRQ_HANDLED;
552}
553
554/************************************************************************
555
556
557 dai ops
558
559
560************************************************************************/
561static int fsi_dai_startup(struct snd_pcm_substream *substream,
562 struct snd_soc_dai *dai)
563{
564 struct fsi_priv *fsi = fsi_get(substream);
565 const char *msg;
566 u32 flags = fsi_get_info_flags(fsi);
567 u32 fmt;
568 u32 reg;
569 u32 data;
570 int is_play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
571 int is_master;
572 int ret = 0;
573
574 clk_enable(master->clk);
575
576 /* CKG1 */
577 data = is_play ? (1 << 0) : (1 << 4);
578 is_master = fsi_is_master_mode(fsi, is_play);
579 if (is_master)
580 fsi_reg_mask_set(fsi, CKG1, data, data);
581 else
582 fsi_reg_mask_set(fsi, CKG1, data, 0);
583
584 /* clock inversion (CKG2) */
585 data = 0;
586 switch (SH_FSI_INVERSION_MASK & flags) {
587 case SH_FSI_LRM_INV:
588 data = 1 << 12;
589 break;
590 case SH_FSI_BRM_INV:
591 data = 1 << 8;
592 break;
593 case SH_FSI_LRS_INV:
594 data = 1 << 4;
595 break;
596 case SH_FSI_BRS_INV:
597 data = 1 << 0;
598 break;
599 }
600 fsi_reg_write(fsi, CKG2, data);
601
602 /* do fmt, di fmt */
603 data = 0;
604 reg = is_play ? DO_FMT : DI_FMT;
605 fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
606 switch (fmt) {
607 case SH_FSI_FMT_MONO:
608 msg = "MONO";
609 data = CR_FMT(CR_MONO);
610 fsi->chan = 1;
611 break;
612 case SH_FSI_FMT_MONO_DELAY:
613 msg = "MONO Delay";
614 data = CR_FMT(CR_MONO_D);
615 fsi->chan = 1;
616 break;
617 case SH_FSI_FMT_PCM:
618 msg = "PCM";
619 data = CR_FMT(CR_PCM);
620 fsi->chan = 2;
621 break;
622 case SH_FSI_FMT_I2S:
623 msg = "I2S";
624 data = CR_FMT(CR_I2S);
625 fsi->chan = 2;
626 break;
627 case SH_FSI_FMT_TDM:
628 msg = "TDM";
629 data = CR_FMT(CR_TDM) | (fsi->chan - 1);
630 fsi->chan = is_play ?
631 SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
632 break;
633 case SH_FSI_FMT_TDM_DELAY:
634 msg = "TDM Delay";
635 data = CR_FMT(CR_TDM_D) | (fsi->chan - 1);
636 fsi->chan = is_play ?
637 SH_FSI_GET_CH_O(flags) : SH_FSI_GET_CH_I(flags);
638 break;
639 default:
640 dev_err(dai->dev, "unknown format.\n");
641 return -EINVAL;
642 }
643
644 switch (fsi->chan) {
645 case 1:
646 fsi->fifo_max = 256;
647 break;
648 case 2:
649 fsi->fifo_max = 128;
650 break;
651 case 3:
652 case 4:
653 fsi->fifo_max = 64;
654 break;
655 case 5:
656 case 6:
657 case 7:
658 case 8:
659 fsi->fifo_max = 32;
660 break;
661 default:
662 dev_err(dai->dev, "channel size error.\n");
663 return -EINVAL;
664 }
665
666 fsi_reg_write(fsi, reg, data);
667 dev_dbg(dai->dev, "use %s format (%d channel) use %d DMAC\n",
668 msg, fsi->chan, fsi->dma_chan);
669
670 /*
671 * clear clk reset if master mode
672 */
673 if (is_master)
674 fsi_clk_ctrl(fsi, 1);
675
676 /* irq setting */
677 fsi_irq_init(fsi, is_play);
678
679 return ret;
680}
681
682static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
683 struct snd_soc_dai *dai)
684{
685 struct fsi_priv *fsi = fsi_get(substream);
686 int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
687
688 fsi_irq_disable(fsi, is_play);
689 fsi_clk_ctrl(fsi, 0);
690
691 clk_disable(master->clk);
692}
693
694static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
695 struct snd_soc_dai *dai)
696{
697 struct fsi_priv *fsi = fsi_get(substream);
698 struct snd_pcm_runtime *runtime = substream->runtime;
699 int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
700 int ret = 0;
701
702 /* capture not supported */
703 if (!is_play)
704 return -ENODEV;
705
706 switch (cmd) {
707 case SNDRV_PCM_TRIGGER_START:
708 fsi_stream_push(fsi, substream,
709 frames_to_bytes(runtime, runtime->buffer_size),
710 frames_to_bytes(runtime, runtime->period_size));
711 ret = fsi_data_push(fsi);
712 break;
713 case SNDRV_PCM_TRIGGER_STOP:
714 fsi_irq_disable(fsi, is_play);
715 fsi_stream_pop(fsi);
716 break;
717 }
718
719 return ret;
720}
721
722static struct snd_soc_dai_ops fsi_dai_ops = {
723 .startup = fsi_dai_startup,
724 .shutdown = fsi_dai_shutdown,
725 .trigger = fsi_dai_trigger,
726};
727
728/************************************************************************
729
730
731 pcm ops
732
733
734************************************************************************/
735static struct snd_pcm_hardware fsi_pcm_hardware = {
736 .info = SNDRV_PCM_INFO_INTERLEAVED |
737 SNDRV_PCM_INFO_MMAP |
738 SNDRV_PCM_INFO_MMAP_VALID |
739 SNDRV_PCM_INFO_PAUSE,
740 .formats = FSI_FMTS,
741 .rates = FSI_RATES,
742 .rate_min = 8000,
743 .rate_max = 192000,
744 .channels_min = 1,
745 .channels_max = 2,
746 .buffer_bytes_max = 64 * 1024,
747 .period_bytes_min = 32,
748 .period_bytes_max = 8192,
749 .periods_min = 1,
750 .periods_max = 32,
751 .fifo_size = 256,
752};
753
754static int fsi_pcm_open(struct snd_pcm_substream *substream)
755{
756 struct snd_pcm_runtime *runtime = substream->runtime;
757 int ret = 0;
758
759 snd_soc_set_runtime_hwparams(substream, &fsi_pcm_hardware);
760
761 ret = snd_pcm_hw_constraint_integer(runtime,
762 SNDRV_PCM_HW_PARAM_PERIODS);
763
764 return ret;
765}
766
767static int fsi_hw_params(struct snd_pcm_substream *substream,
768 struct snd_pcm_hw_params *hw_params)
769{
770 return snd_pcm_lib_malloc_pages(substream,
771 params_buffer_bytes(hw_params));
772}
773
774static int fsi_hw_free(struct snd_pcm_substream *substream)
775{
776 return snd_pcm_lib_free_pages(substream);
777}
778
779static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
780{
781 struct snd_pcm_runtime *runtime = substream->runtime;
782 struct fsi_priv *fsi = fsi_get(substream);
783 int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
784 long location;
785
786 location = (fsi->byte_offset - 1) - fsi_get_residue(fsi, is_play);
787 if (location < 0)
788 location = 0;
789
790 return bytes_to_frames(runtime, location);
791}
792
793static struct snd_pcm_ops fsi_pcm_ops = {
794 .open = fsi_pcm_open,
795 .ioctl = snd_pcm_lib_ioctl,
796 .hw_params = fsi_hw_params,
797 .hw_free = fsi_hw_free,
798 .pointer = fsi_pointer,
799};
800
801/************************************************************************
802
803
804 snd_soc_platform
805
806
807************************************************************************/
808#define PREALLOC_BUFFER (32 * 1024)
809#define PREALLOC_BUFFER_MAX (32 * 1024)
810
811static void fsi_pcm_free(struct snd_pcm *pcm)
812{
813 snd_pcm_lib_preallocate_free_for_all(pcm);
814}
815
816static int fsi_pcm_new(struct snd_card *card,
817 struct snd_soc_dai *dai,
818 struct snd_pcm *pcm)
819{
820 /*
821 * dont use SNDRV_DMA_TYPE_DEV, since it will oops the SH kernel
822 * in MMAP mode (i.e. aplay -M)
823 */
824 return snd_pcm_lib_preallocate_pages_for_all(
825 pcm,
826 SNDRV_DMA_TYPE_CONTINUOUS,
827 snd_dma_continuous_data(GFP_KERNEL),
828 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
829}
830
831/************************************************************************
832
833
834 alsa struct
835
836
837************************************************************************/
838struct snd_soc_dai fsi_soc_dai[] = {
839 {
840 .name = "FSIA",
841 .id = 0,
842 .playback = {
843 .rates = FSI_RATES,
844 .formats = FSI_FMTS,
845 .channels_min = 1,
846 .channels_max = 8,
847 },
848 /* capture not supported */
849 .ops = &fsi_dai_ops,
850 },
851 {
852 .name = "FSIB",
853 .id = 1,
854 .playback = {
855 .rates = FSI_RATES,
856 .formats = FSI_FMTS,
857 .channels_min = 1,
858 .channels_max = 8,
859 },
860 /* capture not supported */
861 .ops = &fsi_dai_ops,
862 },
863};
864EXPORT_SYMBOL_GPL(fsi_soc_dai);
865
866struct snd_soc_platform fsi_soc_platform = {
867 .name = "fsi-pcm",
868 .pcm_ops = &fsi_pcm_ops,
869 .pcm_new = fsi_pcm_new,
870 .pcm_free = fsi_pcm_free,
871};
872EXPORT_SYMBOL_GPL(fsi_soc_platform);
873
874/************************************************************************
875
876
877 platform function
878
879
880************************************************************************/
881static int fsi_probe(struct platform_device *pdev)
882{
883 struct resource *res;
884 char clk_name[8];
885 unsigned int irq;
886 int ret;
887
888 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
889 irq = platform_get_irq(pdev, 0);
890 if (!res || !irq) {
891 dev_err(&pdev->dev, "Not enough FSI platform resources.\n");
892 ret = -ENODEV;
893 goto exit;
894 }
895
896 master = kzalloc(sizeof(*master), GFP_KERNEL);
897 if (!master) {
898 dev_err(&pdev->dev, "Could not allocate master\n");
899 ret = -ENOMEM;
900 goto exit;
901 }
902
903 master->base = ioremap_nocache(res->start, resource_size(res));
904 if (!master->base) {
905 ret = -ENXIO;
906 dev_err(&pdev->dev, "Unable to ioremap FSI registers.\n");
907 goto exit_kfree;
908 }
909
910 master->irq = irq;
911 master->info = pdev->dev.platform_data;
912 master->fsia.base = master->base;
913 master->fsib.base = master->base + 0x40;
914
915 master->fsia.dma_chan = -1;
916 master->fsib.dma_chan = -1;
917
918 ret = fsi_get_dma_chan();
919 if (ret < 0) {
920 dev_err(&pdev->dev, "cannot get dma api\n");
921 goto exit_iounmap;
922 }
923
924 /* FSI is based on SPU mstp */
925 snprintf(clk_name, sizeof(clk_name), "spu%d", pdev->id);
926 master->clk = clk_get(NULL, clk_name);
927 if (IS_ERR(master->clk)) {
928 dev_err(&pdev->dev, "cannot get %s mstp\n", clk_name);
929 ret = -EIO;
930 goto exit_free_dma;
931 }
932
933 fsi_soc_dai[0].dev = &pdev->dev;
934 fsi_soc_dai[1].dev = &pdev->dev;
935
936 fsi_soft_all_reset();
937
938 ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, "fsi", master);
939 if (ret) {
940 dev_err(&pdev->dev, "irq request err\n");
941 goto exit_free_dma;
942 }
943
944 ret = snd_soc_register_platform(&fsi_soc_platform);
945 if (ret < 0) {
946 dev_err(&pdev->dev, "cannot snd soc register\n");
947 goto exit_free_irq;
948 }
949
950 return snd_soc_register_dais(fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
951
952exit_free_irq:
953 free_irq(irq, master);
954exit_free_dma:
955 fsi_free_dma_chan();
956exit_iounmap:
957 iounmap(master->base);
958exit_kfree:
959 kfree(master);
960 master = NULL;
961exit:
962 return ret;
963}
964
965static int fsi_remove(struct platform_device *pdev)
966{
967 snd_soc_unregister_dais(fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
968 snd_soc_unregister_platform(&fsi_soc_platform);
969
970 clk_put(master->clk);
971
972 fsi_free_dma_chan();
973
974 free_irq(master->irq, master);
975
976 iounmap(master->base);
977 kfree(master);
978 master = NULL;
979 return 0;
980}
981
982static struct platform_driver fsi_driver = {
983 .driver = {
984 .name = "sh_fsi",
985 },
986 .probe = fsi_probe,
987 .remove = fsi_remove,
988};
989
990static int __init fsi_mobile_init(void)
991{
992 return platform_driver_register(&fsi_driver);
993}
994
995static void __exit fsi_mobile_exit(void)
996{
997 platform_driver_unregister(&fsi_driver);
998}
999module_init(fsi_mobile_init);
1000module_exit(fsi_mobile_exit);
1001
1002MODULE_LICENSE("GPL");
1003MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
1004MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
new file mode 100644
index 000000000000..c8ceddc2a26c
--- /dev/null
+++ b/sound/soc/soc-cache.c
@@ -0,0 +1,218 @@
1/*
2 * soc-cache.c -- ASoC register cache helpers
3 *
4 * Copyright 2009 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/i2c.h>
15#include <linux/spi/spi.h>
16#include <sound/soc.h>
17
18static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
19 unsigned int reg)
20{
21 u16 *cache = codec->reg_cache;
22 if (reg >= codec->reg_cache_size)
23 return -1;
24 return cache[reg];
25}
26
27static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
28 unsigned int value)
29{
30 u16 *cache = codec->reg_cache;
31 u8 data[2];
32 int ret;
33
34 BUG_ON(codec->volatile_register);
35
36 data[0] = (reg << 1) | ((value >> 8) & 0x0001);
37 data[1] = value & 0x00ff;
38
39 if (reg < codec->reg_cache_size)
40 cache[reg] = value;
41 ret = codec->hw_write(codec->control_data, data, 2);
42 if (ret == 2)
43 return 0;
44 if (ret < 0)
45 return ret;
46 else
47 return -EIO;
48}
49
50#if defined(CONFIG_SPI_MASTER)
51static int snd_soc_7_9_spi_write(void *control_data, const char *data,
52 int len)
53{
54 struct spi_device *spi = control_data;
55 struct spi_transfer t;
56 struct spi_message m;
57 u8 msg[2];
58
59 if (len <= 0)
60 return 0;
61
62 msg[0] = data[0];
63 msg[1] = data[1];
64
65 spi_message_init(&m);
66 memset(&t, 0, (sizeof t));
67
68 t.tx_buf = &msg[0];
69 t.len = len;
70
71 spi_message_add_tail(&t, &m);
72 spi_sync(spi, &m);
73
74 return len;
75}
76#else
77#define snd_soc_7_9_spi_write NULL
78#endif
79
80static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
81 unsigned int value)
82{
83 u16 *reg_cache = codec->reg_cache;
84 u8 data[3];
85
86 data[0] = reg;
87 data[1] = (value >> 8) & 0xff;
88 data[2] = value & 0xff;
89
90 if (!snd_soc_codec_volatile_register(codec, reg))
91 reg_cache[reg] = value;
92
93 if (codec->hw_write(codec->control_data, data, 3) == 3)
94 return 0;
95 else
96 return -EIO;
97}
98
99static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
100 unsigned int reg)
101{
102 u16 *cache = codec->reg_cache;
103
104 if (reg >= codec->reg_cache_size ||
105 snd_soc_codec_volatile_register(codec, reg))
106 return codec->hw_read(codec, reg);
107 else
108 return cache[reg];
109}
110
111#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
112static unsigned int snd_soc_8_16_read_i2c(struct snd_soc_codec *codec,
113 unsigned int r)
114{
115 struct i2c_msg xfer[2];
116 u8 reg = r;
117 u16 data;
118 int ret;
119 struct i2c_client *client = codec->control_data;
120
121 /* Write register */
122 xfer[0].addr = client->addr;
123 xfer[0].flags = 0;
124 xfer[0].len = 1;
125 xfer[0].buf = &reg;
126
127 /* Read data */
128 xfer[1].addr = client->addr;
129 xfer[1].flags = I2C_M_RD;
130 xfer[1].len = 2;
131 xfer[1].buf = (u8 *)&data;
132
133 ret = i2c_transfer(client->adapter, xfer, 2);
134 if (ret != 2) {
135 dev_err(&client->dev, "i2c_transfer() returned %d\n", ret);
136 return 0;
137 }
138
139 return (data >> 8) | ((data & 0xff) << 8);
140}
141#else
142#define snd_soc_8_16_read_i2c NULL
143#endif
144
145static struct {
146 int addr_bits;
147 int data_bits;
148 int (*write)(struct snd_soc_codec *codec, unsigned int, unsigned int);
149 int (*spi_write)(void *, const char *, int);
150 unsigned int (*read)(struct snd_soc_codec *, unsigned int);
151 unsigned int (*i2c_read)(struct snd_soc_codec *, unsigned int);
152} io_types[] = {
153 { 7, 9, snd_soc_7_9_write, snd_soc_7_9_spi_write, snd_soc_7_9_read },
154 { 8, 16, snd_soc_8_16_write, NULL, snd_soc_8_16_read,
155 snd_soc_8_16_read_i2c },
156};
157
158/**
159 * snd_soc_codec_set_cache_io: Set up standard I/O functions.
160 *
161 * @codec: CODEC to configure.
162 * @type: Type of cache.
163 * @addr_bits: Number of bits of register address data.
164 * @data_bits: Number of bits of data per register.
165 * @control: Control bus used.
166 *
167 * Register formats are frequently shared between many I2C and SPI
168 * devices. In order to promote code reuse the ASoC core provides
169 * some standard implementations of CODEC read and write operations
170 * which can be set up using this function.
171 *
172 * The caller is responsible for allocating and initialising the
173 * actual cache.
174 *
175 * Note that at present this code cannot be used by CODECs with
176 * volatile registers.
177 */
178int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
179 int addr_bits, int data_bits,
180 enum snd_soc_control_type control)
181{
182 int i;
183
184 for (i = 0; i < ARRAY_SIZE(io_types); i++)
185 if (io_types[i].addr_bits == addr_bits &&
186 io_types[i].data_bits == data_bits)
187 break;
188 if (i == ARRAY_SIZE(io_types)) {
189 printk(KERN_ERR
190 "No I/O functions for %d bit address %d bit data\n",
191 addr_bits, data_bits);
192 return -EINVAL;
193 }
194
195 codec->write = io_types[i].write;
196 codec->read = io_types[i].read;
197
198 switch (control) {
199 case SND_SOC_CUSTOM:
200 break;
201
202 case SND_SOC_I2C:
203#if defined(CONFIG_I2C) || (defined(CONFIG_I2C_MODULE) && defined(MODULE))
204 codec->hw_write = (hw_write_t)i2c_master_send;
205#endif
206 if (io_types[i].i2c_read)
207 codec->hw_read = io_types[i].i2c_read;
208 break;
209
210 case SND_SOC_SPI:
211 if (io_types[i].spi_write)
212 codec->hw_write = io_types[i].spi_write;
213 break;
214 }
215
216 return 0;
217}
218EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1d70829464ef..7ff04ad2a97e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -28,6 +28,7 @@
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/debugfs.h> 29#include <linux/debugfs.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <sound/ac97_codec.h>
31#include <sound/core.h> 32#include <sound/core.h>
32#include <sound/pcm.h> 33#include <sound/pcm.h>
33#include <sound/pcm_params.h> 34#include <sound/pcm_params.h>
@@ -619,8 +620,9 @@ static struct snd_pcm_ops soc_pcm_ops = {
619 620
620#ifdef CONFIG_PM 621#ifdef CONFIG_PM
621/* powers down audio subsystem for suspend */ 622/* powers down audio subsystem for suspend */
622static int soc_suspend(struct platform_device *pdev, pm_message_t state) 623static int soc_suspend(struct device *dev)
623{ 624{
625 struct platform_device *pdev = to_platform_device(dev);
624 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 626 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
625 struct snd_soc_card *card = socdev->card; 627 struct snd_soc_card *card = socdev->card;
626 struct snd_soc_platform *platform = card->platform; 628 struct snd_soc_platform *platform = card->platform;
@@ -656,7 +658,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
656 snd_pcm_suspend_all(card->dai_link[i].pcm); 658 snd_pcm_suspend_all(card->dai_link[i].pcm);
657 659
658 if (card->suspend_pre) 660 if (card->suspend_pre)
659 card->suspend_pre(pdev, state); 661 card->suspend_pre(pdev, PMSG_SUSPEND);
660 662
661 for (i = 0; i < card->num_links; i++) { 663 for (i = 0; i < card->num_links; i++) {
662 struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; 664 struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -682,7 +684,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
682 } 684 }
683 685
684 if (codec_dev->suspend) 686 if (codec_dev->suspend)
685 codec_dev->suspend(pdev, state); 687 codec_dev->suspend(pdev, PMSG_SUSPEND);
686 688
687 for (i = 0; i < card->num_links; i++) { 689 for (i = 0; i < card->num_links; i++) {
688 struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai; 690 struct snd_soc_dai *cpu_dai = card->dai_link[i].cpu_dai;
@@ -691,7 +693,7 @@ static int soc_suspend(struct platform_device *pdev, pm_message_t state)
691 } 693 }
692 694
693 if (card->suspend_post) 695 if (card->suspend_post)
694 card->suspend_post(pdev, state); 696 card->suspend_post(pdev, PMSG_SUSPEND);
695 697
696 return 0; 698 return 0;
697} 699}
@@ -765,8 +767,9 @@ static void soc_resume_deferred(struct work_struct *work)
765} 767}
766 768
767/* powers up audio subsystem after a suspend */ 769/* powers up audio subsystem after a suspend */
768static int soc_resume(struct platform_device *pdev) 770static int soc_resume(struct device *dev)
769{ 771{
772 struct platform_device *pdev = to_platform_device(dev);
770 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 773 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
771 struct snd_soc_card *card = socdev->card; 774 struct snd_soc_card *card = socdev->card;
772 struct snd_soc_dai *cpu_dai = card->dai_link[0].cpu_dai; 775 struct snd_soc_dai *cpu_dai = card->dai_link[0].cpu_dai;
@@ -788,6 +791,44 @@ static int soc_resume(struct platform_device *pdev)
788 return 0; 791 return 0;
789} 792}
790 793
794/**
795 * snd_soc_suspend_device: Notify core of device suspend
796 *
797 * @dev: Device being suspended.
798 *
799 * In order to ensure that the entire audio subsystem is suspended in a
800 * coordinated fashion ASoC devices should suspend themselves when
801 * called by ASoC. When the standard kernel suspend process asks the
802 * device to suspend it should call this function to initiate a suspend
803 * of the entire ASoC card.
804 *
805 * \note Currently this function is stubbed out.
806 */
807int snd_soc_suspend_device(struct device *dev)
808{
809 return 0;
810}
811EXPORT_SYMBOL_GPL(snd_soc_suspend_device);
812
813/**
814 * snd_soc_resume_device: Notify core of device resume
815 *
816 * @dev: Device being resumed.
817 *
818 * In order to ensure that the entire audio subsystem is resumed in a
819 * coordinated fashion ASoC devices should resume themselves when called
820 * by ASoC. When the standard kernel resume process asks the device
821 * to resume it should call this function. Once all the components of
822 * the card have notified that they are ready to be resumed the card
823 * will be resumed.
824 *
825 * \note Currently this function is stubbed out.
826 */
827int snd_soc_resume_device(struct device *dev)
828{
829 return 0;
830}
831EXPORT_SYMBOL_GPL(snd_soc_resume_device);
791#else 832#else
792#define soc_suspend NULL 833#define soc_suspend NULL
793#define soc_resume NULL 834#define soc_resume NULL
@@ -981,16 +1022,39 @@ static int soc_remove(struct platform_device *pdev)
981 return 0; 1022 return 0;
982} 1023}
983 1024
1025static int soc_poweroff(struct device *dev)
1026{
1027 struct platform_device *pdev = to_platform_device(dev);
1028 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
1029 struct snd_soc_card *card = socdev->card;
1030
1031 if (!card->instantiated)
1032 return 0;
1033
1034 /* Flush out pmdown_time work - we actually do want to run it
1035 * now, we're shutting down so no imminent restart. */
1036 run_delayed_work(&card->delayed_work);
1037
1038 snd_soc_dapm_shutdown(socdev);
1039
1040 return 0;
1041}
1042
1043static struct dev_pm_ops soc_pm_ops = {
1044 .suspend = soc_suspend,
1045 .resume = soc_resume,
1046 .poweroff = soc_poweroff,
1047};
1048
984/* ASoC platform driver */ 1049/* ASoC platform driver */
985static struct platform_driver soc_driver = { 1050static struct platform_driver soc_driver = {
986 .driver = { 1051 .driver = {
987 .name = "soc-audio", 1052 .name = "soc-audio",
988 .owner = THIS_MODULE, 1053 .owner = THIS_MODULE,
1054 .pm = &soc_pm_ops,
989 }, 1055 },
990 .probe = soc_probe, 1056 .probe = soc_probe,
991 .remove = soc_remove, 1057 .remove = soc_remove,
992 .suspend = soc_suspend,
993 .resume = soc_resume,
994}; 1058};
995 1059
996/* create a new pcm */ 1060/* create a new pcm */
@@ -1062,6 +1126,23 @@ static int soc_new_pcm(struct snd_soc_device *socdev,
1062 return ret; 1126 return ret;
1063} 1127}
1064 1128
1129/**
1130 * snd_soc_codec_volatile_register: Report if a register is volatile.
1131 *
1132 * @codec: CODEC to query.
1133 * @reg: Register to query.
1134 *
1135 * Boolean function indiciating if a CODEC register is volatile.
1136 */
1137int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg)
1138{
1139 if (codec->volatile_register)
1140 return codec->volatile_register(reg);
1141 else
1142 return 0;
1143}
1144EXPORT_SYMBOL_GPL(snd_soc_codec_volatile_register);
1145
1065/* codec register dump */ 1146/* codec register dump */
1066static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf) 1147static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
1067{ 1148{
@@ -1075,6 +1156,9 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
1075 1156
1076 count += sprintf(buf, "%s registers\n", codec->name); 1157 count += sprintf(buf, "%s registers\n", codec->name);
1077 for (i = 0; i < codec->reg_cache_size; i += step) { 1158 for (i = 0; i < codec->reg_cache_size; i += step) {
1159 if (codec->readable_register && !codec->readable_register(i))
1160 continue;
1161
1078 count += sprintf(buf + count, "%2x: ", i); 1162 count += sprintf(buf + count, "%2x: ", i);
1079 if (count >= PAGE_SIZE - 1) 1163 if (count >= PAGE_SIZE - 1)
1080 break; 1164 break;
@@ -1183,10 +1267,18 @@ static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
1183 if (!codec->debugfs_pop_time) 1267 if (!codec->debugfs_pop_time)
1184 printk(KERN_WARNING 1268 printk(KERN_WARNING
1185 "Failed to create pop time debugfs file\n"); 1269 "Failed to create pop time debugfs file\n");
1270
1271 codec->debugfs_dapm = debugfs_create_dir("dapm", debugfs_root);
1272 if (!codec->debugfs_dapm)
1273 printk(KERN_WARNING
1274 "Failed to create DAPM debugfs directory\n");
1275
1276 snd_soc_dapm_debugfs_init(codec);
1186} 1277}
1187 1278
1188static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec) 1279static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
1189{ 1280{
1281 debugfs_remove_recursive(codec->debugfs_dapm);
1190 debugfs_remove(codec->debugfs_pop_time); 1282 debugfs_remove(codec->debugfs_pop_time);
1191 debugfs_remove(codec->debugfs_reg); 1283 debugfs_remove(codec->debugfs_reg);
1192} 1284}
@@ -1264,10 +1356,10 @@ EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
1264 * Returns 1 for change else 0. 1356 * Returns 1 for change else 0.
1265 */ 1357 */
1266int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, 1358int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
1267 unsigned short mask, unsigned short value) 1359 unsigned int mask, unsigned int value)
1268{ 1360{
1269 int change; 1361 int change;
1270 unsigned short old, new; 1362 unsigned int old, new;
1271 1363
1272 mutex_lock(&io_mutex); 1364 mutex_lock(&io_mutex);
1273 old = snd_soc_read(codec, reg); 1365 old = snd_soc_read(codec, reg);
@@ -1294,10 +1386,10 @@ EXPORT_SYMBOL_GPL(snd_soc_update_bits);
1294 * Returns 1 for change else 0. 1386 * Returns 1 for change else 0.
1295 */ 1387 */
1296int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg, 1388int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
1297 unsigned short mask, unsigned short value) 1389 unsigned int mask, unsigned int value)
1298{ 1390{
1299 int change; 1391 int change;
1300 unsigned short old, new; 1392 unsigned int old, new;
1301 1393
1302 mutex_lock(&io_mutex); 1394 mutex_lock(&io_mutex);
1303 old = snd_soc_read(codec, reg); 1395 old = snd_soc_read(codec, reg);
@@ -1381,8 +1473,11 @@ int snd_soc_init_card(struct snd_soc_device *socdev)
1381 continue; 1473 continue;
1382 } 1474 }
1383 } 1475 }
1384 if (card->dai_link[i].codec_dai->ac97_control) 1476 if (card->dai_link[i].codec_dai->ac97_control) {
1385 ac97 = 1; 1477 ac97 = 1;
1478 snd_ac97_dev_add_pdata(codec->ac97,
1479 card->dai_link[i].cpu_dai->ac97_pdata);
1480 }
1386 } 1481 }
1387 snprintf(codec->card->shortname, sizeof(codec->card->shortname), 1482 snprintf(codec->card->shortname, sizeof(codec->card->shortname),
1388 "%s", card->name); 1483 "%s", card->name);
@@ -1586,7 +1681,7 @@ int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
1586{ 1681{
1587 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 1682 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
1588 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1683 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1589 unsigned short val, bitmask; 1684 unsigned int val, bitmask;
1590 1685
1591 for (bitmask = 1; bitmask < e->max; bitmask <<= 1) 1686 for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
1592 ; 1687 ;
@@ -1615,8 +1710,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
1615{ 1710{
1616 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 1711 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
1617 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1712 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1618 unsigned short val; 1713 unsigned int val;
1619 unsigned short mask, bitmask; 1714 unsigned int mask, bitmask;
1620 1715
1621 for (bitmask = 1; bitmask < e->max; bitmask <<= 1) 1716 for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
1622 ; 1717 ;
@@ -1652,7 +1747,7 @@ int snd_soc_get_value_enum_double(struct snd_kcontrol *kcontrol,
1652{ 1747{
1653 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 1748 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
1654 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1749 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1655 unsigned short reg_val, val, mux; 1750 unsigned int reg_val, val, mux;
1656 1751
1657 reg_val = snd_soc_read(codec, e->reg); 1752 reg_val = snd_soc_read(codec, e->reg);
1658 val = (reg_val >> e->shift_l) & e->mask; 1753 val = (reg_val >> e->shift_l) & e->mask;
@@ -1691,8 +1786,8 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
1691{ 1786{
1692 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 1787 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
1693 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1788 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1694 unsigned short val; 1789 unsigned int val;
1695 unsigned short mask; 1790 unsigned int mask;
1696 1791
1697 if (ucontrol->value.enumerated.item[0] > e->max - 1) 1792 if (ucontrol->value.enumerated.item[0] > e->max - 1)
1698 return -EINVAL; 1793 return -EINVAL;
@@ -1852,7 +1947,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
1852 int max = mc->max; 1947 int max = mc->max;
1853 unsigned int mask = (1 << fls(max)) - 1; 1948 unsigned int mask = (1 << fls(max)) - 1;
1854 unsigned int invert = mc->invert; 1949 unsigned int invert = mc->invert;
1855 unsigned short val, val2, val_mask; 1950 unsigned int val, val2, val_mask;
1856 1951
1857 val = (ucontrol->value.integer.value[0] & mask); 1952 val = (ucontrol->value.integer.value[0] & mask);
1858 if (invert) 1953 if (invert)
@@ -1918,7 +2013,7 @@ int snd_soc_get_volsw_2r(struct snd_kcontrol *kcontrol,
1918 unsigned int reg2 = mc->rreg; 2013 unsigned int reg2 = mc->rreg;
1919 unsigned int shift = mc->shift; 2014 unsigned int shift = mc->shift;
1920 int max = mc->max; 2015 int max = mc->max;
1921 unsigned int mask = (1<<fls(max))-1; 2016 unsigned int mask = (1 << fls(max)) - 1;
1922 unsigned int invert = mc->invert; 2017 unsigned int invert = mc->invert;
1923 2018
1924 ucontrol->value.integer.value[0] = 2019 ucontrol->value.integer.value[0] =
@@ -1958,7 +2053,7 @@ int snd_soc_put_volsw_2r(struct snd_kcontrol *kcontrol,
1958 unsigned int mask = (1 << fls(max)) - 1; 2053 unsigned int mask = (1 << fls(max)) - 1;
1959 unsigned int invert = mc->invert; 2054 unsigned int invert = mc->invert;
1960 int err; 2055 int err;
1961 unsigned short val, val2, val_mask; 2056 unsigned int val, val2, val_mask;
1962 2057
1963 val_mask = mask << shift; 2058 val_mask = mask << shift;
1964 val = (ucontrol->value.integer.value[0] & mask); 2059 val = (ucontrol->value.integer.value[0] & mask);
@@ -2050,7 +2145,7 @@ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
2050 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); 2145 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
2051 unsigned int reg = mc->reg; 2146 unsigned int reg = mc->reg;
2052 int min = mc->min; 2147 int min = mc->min;
2053 unsigned short val; 2148 unsigned int val;
2054 2149
2055 val = (ucontrol->value.integer.value[0]+min) & 0xff; 2150 val = (ucontrol->value.integer.value[0]+min) & 0xff;
2056 val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8; 2151 val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
@@ -2136,17 +2231,20 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
2136/** 2231/**
2137 * snd_soc_dai_set_tdm_slot - configure DAI TDM. 2232 * snd_soc_dai_set_tdm_slot - configure DAI TDM.
2138 * @dai: DAI 2233 * @dai: DAI
2139 * @mask: DAI specific mask representing used slots. 2234 * @tx_mask: bitmask representing active TX slots.
2235 * @rx_mask: bitmask representing active RX slots.
2140 * @slots: Number of slots in use. 2236 * @slots: Number of slots in use.
2237 * @slot_width: Width in bits for each slot.
2141 * 2238 *
2142 * Configures a DAI for TDM operation. Both mask and slots are codec and DAI 2239 * Configures a DAI for TDM operation. Both mask and slots are codec and DAI
2143 * specific. 2240 * specific.
2144 */ 2241 */
2145int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, 2242int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai,
2146 unsigned int mask, int slots) 2243 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
2147{ 2244{
2148 if (dai->ops && dai->ops->set_tdm_slot) 2245 if (dai->ops && dai->ops->set_tdm_slot)
2149 return dai->ops->set_tdm_slot(dai, mask, slots); 2246 return dai->ops->set_tdm_slot(dai, tx_mask, rx_mask,
2247 slots, slot_width);
2150 else 2248 else
2151 return -EINVAL; 2249 return -EINVAL;
2152} 2250}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 21c69074aa17..0d8b08ef8731 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -37,6 +37,7 @@
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/debugfs.h>
40#include <sound/core.h> 41#include <sound/core.h>
41#include <sound/pcm.h> 42#include <sound/pcm.h>
42#include <sound/pcm_params.h> 43#include <sound/pcm_params.h>
@@ -52,19 +53,41 @@
52 53
53/* dapm power sequences - make this per codec in the future */ 54/* dapm power sequences - make this per codec in the future */
54static int dapm_up_seq[] = { 55static int dapm_up_seq[] = {
55 snd_soc_dapm_pre, snd_soc_dapm_supply, snd_soc_dapm_micbias, 56 [snd_soc_dapm_pre] = 0,
56 snd_soc_dapm_mic, snd_soc_dapm_mux, snd_soc_dapm_value_mux, 57 [snd_soc_dapm_supply] = 1,
57 snd_soc_dapm_dac, snd_soc_dapm_mixer, snd_soc_dapm_mixer_named_ctl, 58 [snd_soc_dapm_micbias] = 2,
58 snd_soc_dapm_pga, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk, 59 [snd_soc_dapm_aif_in] = 3,
59 snd_soc_dapm_post 60 [snd_soc_dapm_aif_out] = 3,
61 [snd_soc_dapm_mic] = 4,
62 [snd_soc_dapm_mux] = 5,
63 [snd_soc_dapm_value_mux] = 5,
64 [snd_soc_dapm_dac] = 6,
65 [snd_soc_dapm_mixer] = 7,
66 [snd_soc_dapm_mixer_named_ctl] = 7,
67 [snd_soc_dapm_pga] = 8,
68 [snd_soc_dapm_adc] = 9,
69 [snd_soc_dapm_hp] = 10,
70 [snd_soc_dapm_spk] = 10,
71 [snd_soc_dapm_post] = 11,
60}; 72};
61 73
62static int dapm_down_seq[] = { 74static int dapm_down_seq[] = {
63 snd_soc_dapm_pre, snd_soc_dapm_adc, snd_soc_dapm_hp, snd_soc_dapm_spk, 75 [snd_soc_dapm_pre] = 0,
64 snd_soc_dapm_pga, snd_soc_dapm_mixer_named_ctl, snd_soc_dapm_mixer, 76 [snd_soc_dapm_adc] = 1,
65 snd_soc_dapm_dac, snd_soc_dapm_mic, snd_soc_dapm_micbias, 77 [snd_soc_dapm_hp] = 2,
66 snd_soc_dapm_mux, snd_soc_dapm_value_mux, snd_soc_dapm_supply, 78 [snd_soc_dapm_spk] = 2,
67 snd_soc_dapm_post 79 [snd_soc_dapm_pga] = 4,
80 [snd_soc_dapm_mixer_named_ctl] = 5,
81 [snd_soc_dapm_mixer] = 5,
82 [snd_soc_dapm_dac] = 6,
83 [snd_soc_dapm_mic] = 7,
84 [snd_soc_dapm_micbias] = 8,
85 [snd_soc_dapm_mux] = 9,
86 [snd_soc_dapm_value_mux] = 9,
87 [snd_soc_dapm_aif_in] = 10,
88 [snd_soc_dapm_aif_out] = 10,
89 [snd_soc_dapm_supply] = 11,
90 [snd_soc_dapm_post] = 12,
68}; 91};
69 92
70static void pop_wait(u32 pop_time) 93static void pop_wait(u32 pop_time)
@@ -130,8 +153,12 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
130 153
131 if (card->set_bias_level) 154 if (card->set_bias_level)
132 ret = card->set_bias_level(card, level); 155 ret = card->set_bias_level(card, level);
133 if (ret == 0 && codec->set_bias_level) 156 if (ret == 0) {
134 ret = codec->set_bias_level(codec, level); 157 if (codec->set_bias_level)
158 ret = codec->set_bias_level(codec, level);
159 else
160 codec->bias_level = level;
161 }
135 162
136 return ret; 163 return ret;
137} 164}
@@ -206,6 +233,8 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
206 case snd_soc_dapm_micbias: 233 case snd_soc_dapm_micbias:
207 case snd_soc_dapm_vmid: 234 case snd_soc_dapm_vmid:
208 case snd_soc_dapm_supply: 235 case snd_soc_dapm_supply:
236 case snd_soc_dapm_aif_in:
237 case snd_soc_dapm_aif_out:
209 p->connect = 1; 238 p->connect = 1;
210 break; 239 break;
211 /* does effect routing - dynamically connected */ 240 /* does effect routing - dynamically connected */
@@ -268,7 +297,7 @@ static int dapm_connect_mixer(struct snd_soc_codec *codec,
268static int dapm_update_bits(struct snd_soc_dapm_widget *widget) 297static int dapm_update_bits(struct snd_soc_dapm_widget *widget)
269{ 298{
270 int change, power; 299 int change, power;
271 unsigned short old, new; 300 unsigned int old, new;
272 struct snd_soc_codec *codec = widget->codec; 301 struct snd_soc_codec *codec = widget->codec;
273 302
274 /* check for valid widgets */ 303 /* check for valid widgets */
@@ -479,8 +508,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget)
479 if (widget->id == snd_soc_dapm_supply) 508 if (widget->id == snd_soc_dapm_supply)
480 return 0; 509 return 0;
481 510
482 if (widget->id == snd_soc_dapm_adc && widget->active) 511 switch (widget->id) {
483 return 1; 512 case snd_soc_dapm_adc:
513 case snd_soc_dapm_aif_out:
514 if (widget->active)
515 return 1;
516 default:
517 break;
518 }
484 519
485 if (widget->connected) { 520 if (widget->connected) {
486 /* connected pin ? */ 521 /* connected pin ? */
@@ -519,8 +554,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget)
519 return 0; 554 return 0;
520 555
521 /* active stream ? */ 556 /* active stream ? */
522 if (widget->id == snd_soc_dapm_dac && widget->active) 557 switch (widget->id) {
523 return 1; 558 case snd_soc_dapm_dac:
559 case snd_soc_dapm_aif_in:
560 if (widget->active)
561 return 1;
562 default:
563 break;
564 }
524 565
525 if (widget->connected) { 566 if (widget->connected) {
526 /* connected pin ? */ 567 /* connected pin ? */
@@ -689,53 +730,211 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
689 return power; 730 return power;
690} 731}
691 732
692/* 733static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
693 * Scan a single DAPM widget for a complete audio path and update the 734 struct snd_soc_dapm_widget *b,
694 * power status appropriately. 735 int sort[])
695 */
696static int dapm_power_widget(struct snd_soc_codec *codec, int event,
697 struct snd_soc_dapm_widget *w)
698{ 736{
699 int ret; 737 if (sort[a->id] != sort[b->id])
738 return sort[a->id] - sort[b->id];
739 if (a->reg != b->reg)
740 return a->reg - b->reg;
700 741
701 switch (w->id) { 742 return 0;
702 case snd_soc_dapm_pre: 743}
703 if (!w->event)
704 return 0;
705 744
706 if (event == SND_SOC_DAPM_STREAM_START) { 745/* Insert a widget in order into a DAPM power sequence. */
707 ret = w->event(w, 746static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
708 NULL, SND_SOC_DAPM_PRE_PMU); 747 struct list_head *list,
748 int sort[])
749{
750 struct snd_soc_dapm_widget *w;
751
752 list_for_each_entry(w, list, power_list)
753 if (dapm_seq_compare(new_widget, w, sort) < 0) {
754 list_add_tail(&new_widget->power_list, &w->power_list);
755 return;
756 }
757
758 list_add_tail(&new_widget->power_list, list);
759}
760
761/* Apply the coalesced changes from a DAPM sequence */
762static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
763 struct list_head *pending)
764{
765 struct snd_soc_dapm_widget *w;
766 int reg, power, ret;
767 unsigned int value = 0;
768 unsigned int mask = 0;
769 unsigned int cur_mask;
770
771 reg = list_first_entry(pending, struct snd_soc_dapm_widget,
772 power_list)->reg;
773
774 list_for_each_entry(w, pending, power_list) {
775 cur_mask = 1 << w->shift;
776 BUG_ON(reg != w->reg);
777
778 if (w->invert)
779 power = !w->power;
780 else
781 power = w->power;
782
783 mask |= cur_mask;
784 if (power)
785 value |= cur_mask;
786
787 pop_dbg(codec->pop_time,
788 "pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
789 w->name, reg, value, mask);
790
791 /* power up pre event */
792 if (w->power && w->event &&
793 (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
794 pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
795 w->name);
796 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
709 if (ret < 0) 797 if (ret < 0)
710 return ret; 798 pr_err("%s: pre event failed: %d\n",
711 } else if (event == SND_SOC_DAPM_STREAM_STOP) { 799 w->name, ret);
712 ret = w->event(w, 800 }
713 NULL, SND_SOC_DAPM_PRE_PMD); 801
802 /* power down pre event */
803 if (!w->power && w->event &&
804 (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
805 pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
806 w->name);
807 ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
714 if (ret < 0) 808 if (ret < 0)
715 return ret; 809 pr_err("%s: pre event failed: %d\n",
810 w->name, ret);
716 } 811 }
717 return 0;
718 812
719 case snd_soc_dapm_post: 813 /* Lower PGA volume to reduce pops */
720 if (!w->event) 814 if (w->id == snd_soc_dapm_pga && !w->power)
721 return 0; 815 dapm_set_pga(w, w->power);
816 }
722 817
723 if (event == SND_SOC_DAPM_STREAM_START) { 818 if (reg >= 0) {
819 pop_dbg(codec->pop_time,
820 "pop test : Applying 0x%x/0x%x to %x in %dms\n",
821 value, mask, reg, codec->pop_time);
822 pop_wait(codec->pop_time);
823 snd_soc_update_bits(codec, reg, mask, value);
824 }
825
826 list_for_each_entry(w, pending, power_list) {
827 /* Raise PGA volume to reduce pops */
828 if (w->id == snd_soc_dapm_pga && w->power)
829 dapm_set_pga(w, w->power);
830
831 /* power up post event */
832 if (w->power && w->event &&
833 (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
834 pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
835 w->name);
724 ret = w->event(w, 836 ret = w->event(w,
725 NULL, SND_SOC_DAPM_POST_PMU); 837 NULL, SND_SOC_DAPM_POST_PMU);
726 if (ret < 0) 838 if (ret < 0)
727 return ret; 839 pr_err("%s: post event failed: %d\n",
728 } else if (event == SND_SOC_DAPM_STREAM_STOP) { 840 w->name, ret);
729 ret = w->event(w, 841 }
730 NULL, SND_SOC_DAPM_POST_PMD); 842
843 /* power down post event */
844 if (!w->power && w->event &&
845 (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
846 pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
847 w->name);
848 ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
731 if (ret < 0) 849 if (ret < 0)
732 return ret; 850 pr_err("%s: post event failed: %d\n",
851 w->name, ret);
733 } 852 }
734 return 0; 853 }
854}
735 855
736 default: 856/* Apply a DAPM power sequence.
737 return dapm_generic_apply_power(w); 857 *
858 * We walk over a pre-sorted list of widgets to apply power to. In
859 * order to minimise the number of writes to the device required
860 * multiple widgets will be updated in a single write where possible.
861 * Currently anything that requires more than a single write is not
862 * handled.
863 */
864static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
865 int event, int sort[])
866{
867 struct snd_soc_dapm_widget *w, *n;
868 LIST_HEAD(pending);
869 int cur_sort = -1;
870 int cur_reg = SND_SOC_NOPM;
871 int ret;
872
873 list_for_each_entry_safe(w, n, list, power_list) {
874 ret = 0;
875
876 /* Do we need to apply any queued changes? */
877 if (sort[w->id] != cur_sort || w->reg != cur_reg) {
878 if (!list_empty(&pending))
879 dapm_seq_run_coalesced(codec, &pending);
880
881 INIT_LIST_HEAD(&pending);
882 cur_sort = -1;
883 cur_reg = SND_SOC_NOPM;
884 }
885
886 switch (w->id) {
887 case snd_soc_dapm_pre:
888 if (!w->event)
889 list_for_each_entry_safe_continue(w, n, list,
890 power_list);
891
892 if (event == SND_SOC_DAPM_STREAM_START)
893 ret = w->event(w,
894 NULL, SND_SOC_DAPM_PRE_PMU);
895 else if (event == SND_SOC_DAPM_STREAM_STOP)
896 ret = w->event(w,
897 NULL, SND_SOC_DAPM_PRE_PMD);
898 break;
899
900 case snd_soc_dapm_post:
901 if (!w->event)
902 list_for_each_entry_safe_continue(w, n, list,
903 power_list);
904
905 if (event == SND_SOC_DAPM_STREAM_START)
906 ret = w->event(w,
907 NULL, SND_SOC_DAPM_POST_PMU);
908 else if (event == SND_SOC_DAPM_STREAM_STOP)
909 ret = w->event(w,
910 NULL, SND_SOC_DAPM_POST_PMD);
911 break;
912
913 case snd_soc_dapm_input:
914 case snd_soc_dapm_output:
915 case snd_soc_dapm_hp:
916 case snd_soc_dapm_mic:
917 case snd_soc_dapm_line:
918 case snd_soc_dapm_spk:
919 /* No register support currently */
920 ret = dapm_generic_apply_power(w);
921 break;
922
923 default:
924 /* Queue it up for application */
925 cur_sort = sort[w->id];
926 cur_reg = w->reg;
927 list_move(&w->power_list, &pending);
928 break;
929 }
930
931 if (ret < 0)
932 pr_err("Failed to apply widget power: %d\n",
933 ret);
738 } 934 }
935
936 if (!list_empty(&pending))
937 dapm_seq_run_coalesced(codec, &pending);
739} 938}
740 939
741/* 940/*
@@ -751,23 +950,22 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
751{ 950{
752 struct snd_soc_device *socdev = codec->socdev; 951 struct snd_soc_device *socdev = codec->socdev;
753 struct snd_soc_dapm_widget *w; 952 struct snd_soc_dapm_widget *w;
953 LIST_HEAD(up_list);
954 LIST_HEAD(down_list);
754 int ret = 0; 955 int ret = 0;
755 int i, power; 956 int power;
756 int sys_power = 0; 957 int sys_power = 0;
757 958
758 INIT_LIST_HEAD(&codec->up_list);
759 INIT_LIST_HEAD(&codec->down_list);
760
761 /* Check which widgets we need to power and store them in 959 /* Check which widgets we need to power and store them in
762 * lists indicating if they should be powered up or down. 960 * lists indicating if they should be powered up or down.
763 */ 961 */
764 list_for_each_entry(w, &codec->dapm_widgets, list) { 962 list_for_each_entry(w, &codec->dapm_widgets, list) {
765 switch (w->id) { 963 switch (w->id) {
766 case snd_soc_dapm_pre: 964 case snd_soc_dapm_pre:
767 list_add_tail(&codec->down_list, &w->power_list); 965 dapm_seq_insert(w, &down_list, dapm_down_seq);
768 break; 966 break;
769 case snd_soc_dapm_post: 967 case snd_soc_dapm_post:
770 list_add_tail(&codec->up_list, &w->power_list); 968 dapm_seq_insert(w, &up_list, dapm_up_seq);
771 break; 969 break;
772 970
773 default: 971 default:
@@ -782,16 +980,31 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
782 continue; 980 continue;
783 981
784 if (power) 982 if (power)
785 list_add_tail(&w->power_list, &codec->up_list); 983 dapm_seq_insert(w, &up_list, dapm_up_seq);
786 else 984 else
787 list_add_tail(&w->power_list, 985 dapm_seq_insert(w, &down_list, dapm_down_seq);
788 &codec->down_list);
789 986
790 w->power = power; 987 w->power = power;
791 break; 988 break;
792 } 989 }
793 } 990 }
794 991
992 /* If there are no DAPM widgets then try to figure out power from the
993 * event type.
994 */
995 if (list_empty(&codec->dapm_widgets)) {
996 switch (event) {
997 case SND_SOC_DAPM_STREAM_START:
998 case SND_SOC_DAPM_STREAM_RESUME:
999 sys_power = 1;
1000 break;
1001 case SND_SOC_DAPM_STREAM_NOP:
1002 sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY;
1003 default:
1004 break;
1005 }
1006 }
1007
795 /* If we're changing to all on or all off then prepare */ 1008 /* If we're changing to all on or all off then prepare */
796 if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) || 1009 if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
797 (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) { 1010 (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
@@ -802,32 +1015,10 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
802 } 1015 }
803 1016
804 /* Power down widgets first; try to avoid amplifying pops. */ 1017 /* Power down widgets first; try to avoid amplifying pops. */
805 for (i = 0; i < ARRAY_SIZE(dapm_down_seq); i++) { 1018 dapm_seq_run(codec, &down_list, event, dapm_down_seq);
806 list_for_each_entry(w, &codec->down_list, power_list) {
807 /* is widget in stream order */
808 if (w->id != dapm_down_seq[i])
809 continue;
810
811 ret = dapm_power_widget(codec, event, w);
812 if (ret != 0)
813 pr_err("Failed to power down %s: %d\n",
814 w->name, ret);
815 }
816 }
817 1019
818 /* Now power up. */ 1020 /* Now power up. */
819 for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++) { 1021 dapm_seq_run(codec, &up_list, event, dapm_up_seq);
820 list_for_each_entry(w, &codec->up_list, power_list) {
821 /* is widget in stream order */
822 if (w->id != dapm_up_seq[i])
823 continue;
824
825 ret = dapm_power_widget(codec, event, w);
826 if (ret != 0)
827 pr_err("Failed to power up %s: %d\n",
828 w->name, ret);
829 }
830 }
831 1022
832 /* If we just powered the last thing off drop to standby bias */ 1023 /* If we just powered the last thing off drop to standby bias */
833 if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) { 1024 if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
@@ -845,6 +1036,9 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
845 pr_err("Failed to apply active bias: %d\n", ret); 1036 pr_err("Failed to apply active bias: %d\n", ret);
846 } 1037 }
847 1038
1039 pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
1040 codec->pop_time);
1041
848 return 0; 1042 return 0;
849} 1043}
850 1044
@@ -881,6 +1075,8 @@ static void dbg_dump_dapm(struct snd_soc_codec* codec, const char *action)
881 case snd_soc_dapm_mixer: 1075 case snd_soc_dapm_mixer:
882 case snd_soc_dapm_mixer_named_ctl: 1076 case snd_soc_dapm_mixer_named_ctl:
883 case snd_soc_dapm_supply: 1077 case snd_soc_dapm_supply:
1078 case snd_soc_dapm_aif_in:
1079 case snd_soc_dapm_aif_out:
884 if (w->name) { 1080 if (w->name) {
885 in = is_connected_input_ep(w); 1081 in = is_connected_input_ep(w);
886 dapm_clear_walk(w->codec); 1082 dapm_clear_walk(w->codec);
@@ -906,6 +1102,92 @@ static void dbg_dump_dapm(struct snd_soc_codec* codec, const char *action)
906} 1102}
907#endif 1103#endif
908 1104
1105#ifdef CONFIG_DEBUG_FS
1106static int dapm_widget_power_open_file(struct inode *inode, struct file *file)
1107{
1108 file->private_data = inode->i_private;
1109 return 0;
1110}
1111
1112static ssize_t dapm_widget_power_read_file(struct file *file,
1113 char __user *user_buf,
1114 size_t count, loff_t *ppos)
1115{
1116 struct snd_soc_dapm_widget *w = file->private_data;
1117 char *buf;
1118 int in, out;
1119 ssize_t ret;
1120 struct snd_soc_dapm_path *p = NULL;
1121
1122 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1123 if (!buf)
1124 return -ENOMEM;
1125
1126 in = is_connected_input_ep(w);
1127 dapm_clear_walk(w->codec);
1128 out = is_connected_output_ep(w);
1129 dapm_clear_walk(w->codec);
1130
1131 ret = snprintf(buf, PAGE_SIZE, "%s: %s in %d out %d\n",
1132 w->name, w->power ? "On" : "Off", in, out);
1133
1134 if (w->active && w->sname)
1135 ret += snprintf(buf, PAGE_SIZE - ret, " stream %s active\n",
1136 w->sname);
1137
1138 list_for_each_entry(p, &w->sources, list_sink) {
1139 if (p->connect)
1140 ret += snprintf(buf + ret, PAGE_SIZE - ret,
1141 " in %s %s\n",
1142 p->name ? p->name : "static",
1143 p->source->name);
1144 }
1145 list_for_each_entry(p, &w->sinks, list_source) {
1146 if (p->connect)
1147 ret += snprintf(buf + ret, PAGE_SIZE - ret,
1148 " out %s %s\n",
1149 p->name ? p->name : "static",
1150 p->sink->name);
1151 }
1152
1153 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
1154
1155 kfree(buf);
1156 return ret;
1157}
1158
1159static const struct file_operations dapm_widget_power_fops = {
1160 .open = dapm_widget_power_open_file,
1161 .read = dapm_widget_power_read_file,
1162};
1163
1164void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
1165{
1166 struct snd_soc_dapm_widget *w;
1167 struct dentry *d;
1168
1169 if (!codec->debugfs_dapm)
1170 return;
1171
1172 list_for_each_entry(w, &codec->dapm_widgets, list) {
1173 if (!w->name)
1174 continue;
1175
1176 d = debugfs_create_file(w->name, 0444,
1177 codec->debugfs_dapm, w,
1178 &dapm_widget_power_fops);
1179 if (!d)
1180 printk(KERN_WARNING
1181 "ASoC: Failed to create %s debugfs file\n",
1182 w->name);
1183 }
1184}
1185#else
1186void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
1187{
1188}
1189#endif
1190
909/* test and update the power status of a mux widget */ 1191/* test and update the power status of a mux widget */
910static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget, 1192static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
911 struct snd_kcontrol *kcontrol, int mask, 1193 struct snd_kcontrol *kcontrol, int mask,
@@ -1138,8 +1420,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
1138 if (wsink->id == snd_soc_dapm_input) { 1420 if (wsink->id == snd_soc_dapm_input) {
1139 if (wsource->id == snd_soc_dapm_micbias || 1421 if (wsource->id == snd_soc_dapm_micbias ||
1140 wsource->id == snd_soc_dapm_mic || 1422 wsource->id == snd_soc_dapm_mic ||
1141 wsink->id == snd_soc_dapm_line || 1423 wsource->id == snd_soc_dapm_line ||
1142 wsink->id == snd_soc_dapm_output) 1424 wsource->id == snd_soc_dapm_output)
1143 wsink->ext = 1; 1425 wsink->ext = 1;
1144 } 1426 }
1145 if (wsource->id == snd_soc_dapm_output) { 1427 if (wsource->id == snd_soc_dapm_output) {
@@ -1171,6 +1453,8 @@ static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
1171 case snd_soc_dapm_pre: 1453 case snd_soc_dapm_pre:
1172 case snd_soc_dapm_post: 1454 case snd_soc_dapm_post:
1173 case snd_soc_dapm_supply: 1455 case snd_soc_dapm_supply:
1456 case snd_soc_dapm_aif_in:
1457 case snd_soc_dapm_aif_out:
1174 list_add(&path->list, &codec->dapm_paths); 1458 list_add(&path->list, &codec->dapm_paths);
1175 list_add(&path->list_sink, &wsink->sources); 1459 list_add(&path->list_sink, &wsink->sources);
1176 list_add(&path->list_source, &wsource->sinks); 1460 list_add(&path->list_source, &wsource->sinks);
@@ -1273,9 +1557,11 @@ int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
1273 dapm_new_mux(codec, w); 1557 dapm_new_mux(codec, w);
1274 break; 1558 break;
1275 case snd_soc_dapm_adc: 1559 case snd_soc_dapm_adc:
1560 case snd_soc_dapm_aif_out:
1276 w->power_check = dapm_adc_check_power; 1561 w->power_check = dapm_adc_check_power;
1277 break; 1562 break;
1278 case snd_soc_dapm_dac: 1563 case snd_soc_dapm_dac:
1564 case snd_soc_dapm_aif_in:
1279 w->power_check = dapm_dac_check_power; 1565 w->power_check = dapm_dac_check_power;
1280 break; 1566 break;
1281 case snd_soc_dapm_pga: 1567 case snd_soc_dapm_pga:
@@ -1372,7 +1658,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
1372 int max = mc->max; 1658 int max = mc->max;
1373 unsigned int mask = (1 << fls(max)) - 1; 1659 unsigned int mask = (1 << fls(max)) - 1;
1374 unsigned int invert = mc->invert; 1660 unsigned int invert = mc->invert;
1375 unsigned short val, val2, val_mask; 1661 unsigned int val, val2, val_mask;
1376 int ret; 1662 int ret;
1377 1663
1378 val = (ucontrol->value.integer.value[0] & mask); 1664 val = (ucontrol->value.integer.value[0] & mask);
@@ -1436,7 +1722,7 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
1436{ 1722{
1437 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol); 1723 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
1438 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1724 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1439 unsigned short val, bitmask; 1725 unsigned int val, bitmask;
1440 1726
1441 for (bitmask = 1; bitmask < e->max; bitmask <<= 1) 1727 for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
1442 ; 1728 ;
@@ -1464,8 +1750,8 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
1464{ 1750{
1465 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol); 1751 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
1466 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1752 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1467 unsigned short val, mux; 1753 unsigned int val, mux;
1468 unsigned short mask, bitmask; 1754 unsigned int mask, bitmask;
1469 int ret = 0; 1755 int ret = 0;
1470 1756
1471 for (bitmask = 1; bitmask < e->max; bitmask <<= 1) 1757 for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
@@ -1523,7 +1809,7 @@ int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
1523{ 1809{
1524 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol); 1810 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
1525 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1811 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1526 unsigned short reg_val, val, mux; 1812 unsigned int reg_val, val, mux;
1527 1813
1528 reg_val = snd_soc_read(widget->codec, e->reg); 1814 reg_val = snd_soc_read(widget->codec, e->reg);
1529 val = (reg_val >> e->shift_l) & e->mask; 1815 val = (reg_val >> e->shift_l) & e->mask;
@@ -1563,8 +1849,8 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
1563{ 1849{
1564 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol); 1850 struct snd_soc_dapm_widget *widget = snd_kcontrol_chip(kcontrol);
1565 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 1851 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
1566 unsigned short val, mux; 1852 unsigned int val, mux;
1567 unsigned short mask; 1853 unsigned int mask;
1568 int ret = 0; 1854 int ret = 0;
1569 1855
1570 if (ucontrol->value.enumerated.item[0] > e->max - 1) 1856 if (ucontrol->value.enumerated.item[0] > e->max - 1)
@@ -1880,6 +2166,36 @@ void snd_soc_dapm_free(struct snd_soc_device *socdev)
1880} 2166}
1881EXPORT_SYMBOL_GPL(snd_soc_dapm_free); 2167EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
1882 2168
2169/*
2170 * snd_soc_dapm_shutdown - callback for system shutdown
2171 */
2172void snd_soc_dapm_shutdown(struct snd_soc_device *socdev)
2173{
2174 struct snd_soc_codec *codec = socdev->card->codec;
2175 struct snd_soc_dapm_widget *w;
2176 LIST_HEAD(down_list);
2177 int powerdown = 0;
2178
2179 list_for_each_entry(w, &codec->dapm_widgets, list) {
2180 if (w->power) {
2181 dapm_seq_insert(w, &down_list, dapm_down_seq);
2182 w->power = 0;
2183 powerdown = 1;
2184 }
2185 }
2186
2187 /* If there were no widgets to power down we're already in
2188 * standby.
2189 */
2190 if (powerdown) {
2191 snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_PREPARE);
2192 dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
2193 snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_STANDBY);
2194 }
2195
2196 snd_soc_dapm_set_bias_level(socdev, SND_SOC_BIAS_OFF);
2197}
2198
1883/* Module information */ 2199/* Module information */
1884MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk"); 2200MODULE_AUTHOR("Liam Girdwood, lrg@slimlogic.co.uk");
1885MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC"); 2201MODULE_DESCRIPTION("Dynamic Audio Power Management core for ALSA SoC");
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 28346fb2e70c..1d455ab79490 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -73,14 +73,15 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
73 oldstatus = jack->status; 73 oldstatus = jack->status;
74 74
75 jack->status &= ~mask; 75 jack->status &= ~mask;
76 jack->status |= status; 76 jack->status |= status & mask;
77 77
78 /* The DAPM sync is expensive enough to be worth skipping */ 78 /* The DAPM sync is expensive enough to be worth skipping.
79 if (jack->status == oldstatus) 79 * However, empty mask means pin synchronization is desired. */
80 if (mask && (jack->status == oldstatus))
80 goto out; 81 goto out;
81 82
82 list_for_each_entry(pin, &jack->pins, list) { 83 list_for_each_entry(pin, &jack->pins, list) {
83 enable = pin->mask & status; 84 enable = pin->mask & jack->status;
84 85
85 if (pin->invert) 86 if (pin->invert)
86 enable = !enable; 87 enable = !enable;
@@ -220,6 +221,9 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
220 if (ret) 221 if (ret)
221 goto err; 222 goto err;
222 223
224 INIT_WORK(&gpios[i].work, gpio_work);
225 gpios[i].jack = jack;
226
223 ret = request_irq(gpio_to_irq(gpios[i].gpio), 227 ret = request_irq(gpio_to_irq(gpios[i].gpio),
224 gpio_handler, 228 gpio_handler,
225 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 229 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -228,8 +232,13 @@ int snd_soc_jack_add_gpios(struct snd_soc_jack *jack, int count,
228 if (ret) 232 if (ret)
229 goto err; 233 goto err;
230 234
231 INIT_WORK(&gpios[i].work, gpio_work); 235#ifdef CONFIG_GPIO_SYSFS
232 gpios[i].jack = jack; 236 /* Expose GPIO value over sysfs for diagnostic purposes */
237 gpio_export(gpios[i].gpio, false);
238#endif
239
240 /* Update initial jack status */
241 snd_soc_jack_gpio_detect(&gpios[i]);
233 } 242 }
234 243
235 return 0; 244 return 0;
@@ -258,6 +267,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
258 int i; 267 int i;
259 268
260 for (i = 0; i < count; i++) { 269 for (i = 0; i < count; i++) {
270#ifdef CONFIG_GPIO_SYSFS
271 gpio_unexport(gpios[i].gpio);
272#endif
261 free_irq(gpio_to_irq(gpios[i].gpio), &gpios[i]); 273 free_irq(gpio_to_irq(gpios[i].gpio), &gpios[i]);
262 gpio_free(gpios[i].gpio); 274 gpio_free(gpios[i].gpio);
263 gpios[i].jack = NULL; 275 gpios[i].jack = NULL;
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 938a58a5a244..efed64b8b026 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -297,15 +297,17 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
297static bool filter(struct dma_chan *chan, void *param) 297static bool filter(struct dma_chan *chan, void *param)
298{ 298{
299 struct txx9aclc_dmadata *dmadata = param; 299 struct txx9aclc_dmadata *dmadata = param;
300 char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */ 300 char *devname;
301 bool found = false;
301 302
302 snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name, 303 devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name,
303 (int)dmadata->dma_res->start); 304 (int)dmadata->dma_res->start);
304 if (strcmp(dev_name(chan->device->dev), devname) == 0) { 305 if (strcmp(dev_name(chan->device->dev), devname) == 0) {
305 chan->private = &dmadata->dma_slave; 306 chan->private = &dmadata->dma_slave;
306 return true; 307 found = true;
307 } 308 }
308 return false; 309 kfree(devname);
310 return found;
309} 311}
310 312
311static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev, 313static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
diff --git a/sound/sound_core.c b/sound/sound_core.c
index a41f8b127f49..bb4b88e606bb 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -128,6 +128,46 @@ extern int msnd_pinnacle_init(void);
128#endif 128#endif
129 129
130/* 130/*
131 * By default, OSS sound_core claims full legacy minor range (0-255)
132 * of SOUND_MAJOR to trap open attempts to any sound minor and
133 * requests modules using custom sound-slot/service-* module aliases.
134 * The only benefit of doing this is allowing use of custom module
135 * aliases instead of the standard char-major-* ones. This behavior
136 * prevents alternative OSS implementation and is scheduled to be
137 * removed.
138 *
139 * CONFIG_SOUND_OSS_CORE_PRECLAIM and soundcore.preclaim_oss kernel
140 * parameter are added to allow distros and developers to try and
141 * switch to alternative implementations without needing to rebuild
142 * the kernel in the meantime. If preclaim_oss is non-zero, the
143 * kernel will behave the same as before. All SOUND_MAJOR minors are
144 * preclaimed and the custom module aliases along with standard chrdev
145 * ones are emitted if a missing device is opened. If preclaim_oss is
146 * zero, sound_core only grabs what's actually in use and for missing
147 * devices only the standard chrdev aliases are requested.
148 *
149 * All these clutters are scheduled to be removed along with
150 * sound-slot/service-* module aliases. Please take a look at
151 * feature-removal-schedule.txt for details.
152 */
153#ifdef CONFIG_SOUND_OSS_CORE_PRECLAIM
154static int preclaim_oss = 1;
155#else
156static int preclaim_oss = 0;
157#endif
158
159module_param(preclaim_oss, int, 0444);
160
161static int soundcore_open(struct inode *, struct file *);
162
163static const struct file_operations soundcore_fops =
164{
165 /* We must have an owner or the module locking fails */
166 .owner = THIS_MODULE,
167 .open = soundcore_open,
168};
169
170/*
131 * Low level list operator. Scan the ordered list, find a hole and 171 * Low level list operator. Scan the ordered list, find a hole and
132 * join into it. Called with the lock asserted 172 * join into it. Called with the lock asserted
133 */ 173 */
@@ -219,8 +259,9 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
219 259
220 if (!s) 260 if (!s)
221 return -ENOMEM; 261 return -ENOMEM;
222 262
223 spin_lock(&sound_loader_lock); 263 spin_lock(&sound_loader_lock);
264retry:
224 r = __sound_insert_unit(s, list, fops, index, low, top); 265 r = __sound_insert_unit(s, list, fops, index, low, top);
225 spin_unlock(&sound_loader_lock); 266 spin_unlock(&sound_loader_lock);
226 267
@@ -231,11 +272,31 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
231 else 272 else
232 sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP); 273 sprintf(s->name, "sound/%s%d", name, r / SOUND_STEP);
233 274
275 if (!preclaim_oss) {
276 /*
277 * Something else might have grabbed the minor. If
278 * first free slot is requested, rescan with @low set
279 * to the next unit; otherwise, -EBUSY.
280 */
281 r = __register_chrdev(SOUND_MAJOR, s->unit_minor, 1, s->name,
282 &soundcore_fops);
283 if (r < 0) {
284 spin_lock(&sound_loader_lock);
285 __sound_remove_unit(list, s->unit_minor);
286 if (index < 0) {
287 low = s->unit_minor + SOUND_STEP;
288 goto retry;
289 }
290 spin_unlock(&sound_loader_lock);
291 return -EBUSY;
292 }
293 }
294
234 device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor), 295 device_create(sound_class, dev, MKDEV(SOUND_MAJOR, s->unit_minor),
235 NULL, s->name+6); 296 NULL, s->name+6);
236 return r; 297 return s->unit_minor;
237 298
238 fail: 299fail:
239 kfree(s); 300 kfree(s);
240 return r; 301 return r;
241} 302}
@@ -254,6 +315,9 @@ static void sound_remove_unit(struct sound_unit **list, int unit)
254 p = __sound_remove_unit(list, unit); 315 p = __sound_remove_unit(list, unit);
255 spin_unlock(&sound_loader_lock); 316 spin_unlock(&sound_loader_lock);
256 if (p) { 317 if (p) {
318 if (!preclaim_oss)
319 __unregister_chrdev(SOUND_MAJOR, p->unit_minor, 1,
320 p->name);
257 device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor)); 321 device_destroy(sound_class, MKDEV(SOUND_MAJOR, p->unit_minor));
258 kfree(p); 322 kfree(p);
259 } 323 }
@@ -491,19 +555,6 @@ void unregister_sound_dsp(int unit)
491 555
492EXPORT_SYMBOL(unregister_sound_dsp); 556EXPORT_SYMBOL(unregister_sound_dsp);
493 557
494/*
495 * Now our file operations
496 */
497
498static int soundcore_open(struct inode *, struct file *);
499
500static const struct file_operations soundcore_fops=
501{
502 /* We must have an owner or the module locking fails */
503 .owner = THIS_MODULE,
504 .open = soundcore_open,
505};
506
507static struct sound_unit *__look_for_unit(int chain, int unit) 558static struct sound_unit *__look_for_unit(int chain, int unit)
508{ 559{
509 struct sound_unit *s; 560 struct sound_unit *s;
@@ -539,8 +590,9 @@ static int soundcore_open(struct inode *inode, struct file *file)
539 s = __look_for_unit(chain, unit); 590 s = __look_for_unit(chain, unit);
540 if (s) 591 if (s)
541 new_fops = fops_get(s->unit_fops); 592 new_fops = fops_get(s->unit_fops);
542 if (!new_fops) { 593 if (preclaim_oss && !new_fops) {
543 spin_unlock(&sound_loader_lock); 594 spin_unlock(&sound_loader_lock);
595
544 /* 596 /*
545 * Please, don't change this order or code. 597 * Please, don't change this order or code.
546 * For ALSA slot means soundcard and OSS emulation code 598 * For ALSA slot means soundcard and OSS emulation code
@@ -550,6 +602,17 @@ static int soundcore_open(struct inode *inode, struct file *file)
550 */ 602 */
551 request_module("sound-slot-%i", unit>>4); 603 request_module("sound-slot-%i", unit>>4);
552 request_module("sound-service-%i-%i", unit>>4, chain); 604 request_module("sound-service-%i-%i", unit>>4, chain);
605
606 /*
607 * sound-slot/service-* module aliases are scheduled
608 * for removal in favor of the standard char-major-*
609 * module aliases. For the time being, generate both
610 * the legacy and standard module aliases to ease
611 * transition.
612 */
613 if (request_module("char-major-%d-%d", SOUND_MAJOR, unit) > 0)
614 request_module("char-major-%d", SOUND_MAJOR);
615
553 spin_lock(&sound_loader_lock); 616 spin_lock(&sound_loader_lock);
554 s = __look_for_unit(chain, unit); 617 s = __look_for_unit(chain, unit);
555 if (s) 618 if (s)
@@ -593,7 +656,8 @@ static void cleanup_oss_soundcore(void)
593 656
594static int __init init_oss_soundcore(void) 657static int __init init_oss_soundcore(void)
595{ 658{
596 if (register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops)==-1) { 659 if (preclaim_oss &&
660 register_chrdev(SOUND_MAJOR, "sound", &soundcore_fops) == -1) {
597 printk(KERN_ERR "soundcore: sound device already in use.\n"); 661 printk(KERN_ERR "soundcore: sound device already in use.\n");
598 return -EBUSY; 662 return -EBUSY;
599 } 663 }
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index 44b9cdc8a83b..8db0374e10d5 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -1083,6 +1083,8 @@ static int init_substream_urbs(struct snd_usb_substream *subs, unsigned int peri
1083 } else 1083 } else
1084 urb_packs = 1; 1084 urb_packs = 1;
1085 urb_packs *= packs_per_ms; 1085 urb_packs *= packs_per_ms;
1086 if (subs->syncpipe)
1087 urb_packs = min(urb_packs, 1U << subs->syncinterval);
1086 1088
1087 /* decide how many packets to be used */ 1089 /* decide how many packets to be used */
1088 if (is_playback) { 1090 if (is_playback) {
@@ -2124,8 +2126,8 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
2124 fp = list_entry(p, struct audioformat, list); 2126 fp = list_entry(p, struct audioformat, list);
2125 snd_iprintf(buffer, " Interface %d\n", fp->iface); 2127 snd_iprintf(buffer, " Interface %d\n", fp->iface);
2126 snd_iprintf(buffer, " Altset %d\n", fp->altsetting); 2128 snd_iprintf(buffer, " Altset %d\n", fp->altsetting);
2127 snd_iprintf(buffer, " Format: %#x (%d bits)\n", 2129 snd_iprintf(buffer, " Format: %s\n",
2128 fp->format, snd_pcm_format_width(fp->format)); 2130 snd_pcm_format_name(fp->format));
2129 snd_iprintf(buffer, " Channels: %d\n", fp->channels); 2131 snd_iprintf(buffer, " Channels: %d\n", fp->channels);
2130 snd_iprintf(buffer, " Endpoint: %d %s (%s)\n", 2132 snd_iprintf(buffer, " Endpoint: %d %s (%s)\n",
2131 fp->endpoint & USB_ENDPOINT_NUMBER_MASK, 2133 fp->endpoint & USB_ENDPOINT_NUMBER_MASK,
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 2fb35cc22a30..0eff19ceb7e1 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -45,6 +45,7 @@
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/timer.h> 46#include <linux/timer.h>
47#include <linux/usb.h> 47#include <linux/usb.h>
48#include <linux/wait.h>
48#include <sound/core.h> 49#include <sound/core.h>
49#include <sound/rawmidi.h> 50#include <sound/rawmidi.h>
50#include <sound/asequencer.h> 51#include <sound/asequencer.h>
@@ -62,6 +63,9 @@
62 */ 63 */
63#define ERROR_DELAY_JIFFIES (HZ / 10) 64#define ERROR_DELAY_JIFFIES (HZ / 10)
64 65
66#define OUTPUT_URBS 7
67#define INPUT_URBS 7
68
65 69
66MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 70MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
67MODULE_DESCRIPTION("USB Audio/MIDI helper module"); 71MODULE_DESCRIPTION("USB Audio/MIDI helper module");
@@ -90,7 +94,7 @@ struct snd_usb_midi_endpoint;
90 94
91struct usb_protocol_ops { 95struct usb_protocol_ops {
92 void (*input)(struct snd_usb_midi_in_endpoint*, uint8_t*, int); 96 void (*input)(struct snd_usb_midi_in_endpoint*, uint8_t*, int);
93 void (*output)(struct snd_usb_midi_out_endpoint*); 97 void (*output)(struct snd_usb_midi_out_endpoint *ep, struct urb *urb);
94 void (*output_packet)(struct urb*, uint8_t, uint8_t, uint8_t, uint8_t); 98 void (*output_packet)(struct urb*, uint8_t, uint8_t, uint8_t, uint8_t);
95 void (*init_out_endpoint)(struct snd_usb_midi_out_endpoint*); 99 void (*init_out_endpoint)(struct snd_usb_midi_out_endpoint*);
96 void (*finish_out_endpoint)(struct snd_usb_midi_out_endpoint*); 100 void (*finish_out_endpoint)(struct snd_usb_midi_out_endpoint*);
@@ -116,11 +120,15 @@ struct snd_usb_midi {
116 120
117struct snd_usb_midi_out_endpoint { 121struct snd_usb_midi_out_endpoint {
118 struct snd_usb_midi* umidi; 122 struct snd_usb_midi* umidi;
119 struct urb* urb; 123 struct out_urb_context {
120 int urb_active; 124 struct urb *urb;
125 struct snd_usb_midi_out_endpoint *ep;
126 } urbs[OUTPUT_URBS];
127 unsigned int active_urbs;
128 unsigned int drain_urbs;
121 int max_transfer; /* size of urb buffer */ 129 int max_transfer; /* size of urb buffer */
122 struct tasklet_struct tasklet; 130 struct tasklet_struct tasklet;
123 131 unsigned int next_urb;
124 spinlock_t buffer_lock; 132 spinlock_t buffer_lock;
125 133
126 struct usbmidi_out_port { 134 struct usbmidi_out_port {
@@ -139,11 +147,13 @@ struct snd_usb_midi_out_endpoint {
139 uint8_t data[2]; 147 uint8_t data[2];
140 } ports[0x10]; 148 } ports[0x10];
141 int current_port; 149 int current_port;
150
151 wait_queue_head_t drain_wait;
142}; 152};
143 153
144struct snd_usb_midi_in_endpoint { 154struct snd_usb_midi_in_endpoint {
145 struct snd_usb_midi* umidi; 155 struct snd_usb_midi* umidi;
146 struct urb* urb; 156 struct urb* urbs[INPUT_URBS];
147 struct usbmidi_in_port { 157 struct usbmidi_in_port {
148 struct snd_rawmidi_substream *substream; 158 struct snd_rawmidi_substream *substream;
149 u8 running_status_length; 159 u8 running_status_length;
@@ -251,10 +261,17 @@ static void snd_usbmidi_in_urb_complete(struct urb* urb)
251 261
252static void snd_usbmidi_out_urb_complete(struct urb* urb) 262static void snd_usbmidi_out_urb_complete(struct urb* urb)
253{ 263{
254 struct snd_usb_midi_out_endpoint* ep = urb->context; 264 struct out_urb_context *context = urb->context;
265 struct snd_usb_midi_out_endpoint* ep = context->ep;
266 unsigned int urb_index;
255 267
256 spin_lock(&ep->buffer_lock); 268 spin_lock(&ep->buffer_lock);
257 ep->urb_active = 0; 269 urb_index = context - ep->urbs;
270 ep->active_urbs &= ~(1 << urb_index);
271 if (unlikely(ep->drain_urbs)) {
272 ep->drain_urbs &= ~(1 << urb_index);
273 wake_up(&ep->drain_wait);
274 }
258 spin_unlock(&ep->buffer_lock); 275 spin_unlock(&ep->buffer_lock);
259 if (urb->status < 0) { 276 if (urb->status < 0) {
260 int err = snd_usbmidi_urb_error(urb->status); 277 int err = snd_usbmidi_urb_error(urb->status);
@@ -274,24 +291,38 @@ static void snd_usbmidi_out_urb_complete(struct urb* urb)
274 */ 291 */
275static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint* ep) 292static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint* ep)
276{ 293{
277 struct urb* urb = ep->urb; 294 unsigned int urb_index;
295 struct urb* urb;
278 unsigned long flags; 296 unsigned long flags;
279 297
280 spin_lock_irqsave(&ep->buffer_lock, flags); 298 spin_lock_irqsave(&ep->buffer_lock, flags);
281 if (ep->urb_active || ep->umidi->chip->shutdown) { 299 if (ep->umidi->chip->shutdown) {
282 spin_unlock_irqrestore(&ep->buffer_lock, flags); 300 spin_unlock_irqrestore(&ep->buffer_lock, flags);
283 return; 301 return;
284 } 302 }
285 303
286 urb->transfer_buffer_length = 0; 304 urb_index = ep->next_urb;
287 ep->umidi->usb_protocol_ops->output(ep); 305 for (;;) {
306 if (!(ep->active_urbs & (1 << urb_index))) {
307 urb = ep->urbs[urb_index].urb;
308 urb->transfer_buffer_length = 0;
309 ep->umidi->usb_protocol_ops->output(ep, urb);
310 if (urb->transfer_buffer_length == 0)
311 break;
288 312
289 if (urb->transfer_buffer_length > 0) { 313 dump_urb("sending", urb->transfer_buffer,
290 dump_urb("sending", urb->transfer_buffer, 314 urb->transfer_buffer_length);
291 urb->transfer_buffer_length); 315 urb->dev = ep->umidi->chip->dev;
292 urb->dev = ep->umidi->chip->dev; 316 if (snd_usbmidi_submit_urb(urb, GFP_ATOMIC) < 0)
293 ep->urb_active = snd_usbmidi_submit_urb(urb, GFP_ATOMIC) >= 0; 317 break;
318 ep->active_urbs |= 1 << urb_index;
319 }
320 if (++urb_index >= OUTPUT_URBS)
321 urb_index = 0;
322 if (urb_index == ep->next_urb)
323 break;
294 } 324 }
325 ep->next_urb = urb_index;
295 spin_unlock_irqrestore(&ep->buffer_lock, flags); 326 spin_unlock_irqrestore(&ep->buffer_lock, flags);
296} 327}
297 328
@@ -306,7 +337,7 @@ static void snd_usbmidi_out_tasklet(unsigned long data)
306static void snd_usbmidi_error_timer(unsigned long data) 337static void snd_usbmidi_error_timer(unsigned long data)
307{ 338{
308 struct snd_usb_midi *umidi = (struct snd_usb_midi *)data; 339 struct snd_usb_midi *umidi = (struct snd_usb_midi *)data;
309 int i; 340 unsigned int i, j;
310 341
311 spin_lock(&umidi->disc_lock); 342 spin_lock(&umidi->disc_lock);
312 if (umidi->disconnected) { 343 if (umidi->disconnected) {
@@ -317,8 +348,10 @@ static void snd_usbmidi_error_timer(unsigned long data)
317 struct snd_usb_midi_in_endpoint *in = umidi->endpoints[i].in; 348 struct snd_usb_midi_in_endpoint *in = umidi->endpoints[i].in;
318 if (in && in->error_resubmit) { 349 if (in && in->error_resubmit) {
319 in->error_resubmit = 0; 350 in->error_resubmit = 0;
320 in->urb->dev = umidi->chip->dev; 351 for (j = 0; j < INPUT_URBS; ++j) {
321 snd_usbmidi_submit_urb(in->urb, GFP_ATOMIC); 352 in->urbs[j]->dev = umidi->chip->dev;
353 snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
354 }
322 } 355 }
323 if (umidi->endpoints[i].out) 356 if (umidi->endpoints[i].out)
324 snd_usbmidi_do_output(umidi->endpoints[i].out); 357 snd_usbmidi_do_output(umidi->endpoints[i].out);
@@ -330,13 +363,14 @@ static void snd_usbmidi_error_timer(unsigned long data)
330static int send_bulk_static_data(struct snd_usb_midi_out_endpoint* ep, 363static int send_bulk_static_data(struct snd_usb_midi_out_endpoint* ep,
331 const void *data, int len) 364 const void *data, int len)
332{ 365{
333 int err; 366 int err = 0;
334 void *buf = kmemdup(data, len, GFP_KERNEL); 367 void *buf = kmemdup(data, len, GFP_KERNEL);
335 if (!buf) 368 if (!buf)
336 return -ENOMEM; 369 return -ENOMEM;
337 dump_urb("sending", buf, len); 370 dump_urb("sending", buf, len);
338 err = usb_bulk_msg(ep->umidi->chip->dev, ep->urb->pipe, buf, len, 371 if (ep->urbs[0].urb)
339 NULL, 250); 372 err = usb_bulk_msg(ep->umidi->chip->dev, ep->urbs[0].urb->pipe,
373 buf, len, NULL, 250);
340 kfree(buf); 374 kfree(buf);
341 return err; 375 return err;
342} 376}
@@ -554,9 +588,9 @@ static void snd_usbmidi_transmit_byte(struct usbmidi_out_port* port,
554 } 588 }
555} 589}
556 590
557static void snd_usbmidi_standard_output(struct snd_usb_midi_out_endpoint* ep) 591static void snd_usbmidi_standard_output(struct snd_usb_midi_out_endpoint* ep,
592 struct urb *urb)
558{ 593{
559 struct urb* urb = ep->urb;
560 int p; 594 int p;
561 595
562 /* FIXME: lower-numbered ports can starve higher-numbered ports */ 596 /* FIXME: lower-numbered ports can starve higher-numbered ports */
@@ -613,14 +647,15 @@ static void snd_usbmidi_novation_input(struct snd_usb_midi_in_endpoint* ep,
613 snd_usbmidi_input_data(ep, 0, &buffer[2], buffer[0] - 1); 647 snd_usbmidi_input_data(ep, 0, &buffer[2], buffer[0] - 1);
614} 648}
615 649
616static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep) 650static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep,
651 struct urb *urb)
617{ 652{
618 uint8_t* transfer_buffer; 653 uint8_t* transfer_buffer;
619 int count; 654 int count;
620 655
621 if (!ep->ports[0].active) 656 if (!ep->ports[0].active)
622 return; 657 return;
623 transfer_buffer = ep->urb->transfer_buffer; 658 transfer_buffer = urb->transfer_buffer;
624 count = snd_rawmidi_transmit(ep->ports[0].substream, 659 count = snd_rawmidi_transmit(ep->ports[0].substream,
625 &transfer_buffer[2], 660 &transfer_buffer[2],
626 ep->max_transfer - 2); 661 ep->max_transfer - 2);
@@ -630,7 +665,7 @@ static void snd_usbmidi_novation_output(struct snd_usb_midi_out_endpoint* ep)
630 } 665 }
631 transfer_buffer[0] = 0; 666 transfer_buffer[0] = 0;
632 transfer_buffer[1] = count; 667 transfer_buffer[1] = count;
633 ep->urb->transfer_buffer_length = 2 + count; 668 urb->transfer_buffer_length = 2 + count;
634} 669}
635 670
636static struct usb_protocol_ops snd_usbmidi_novation_ops = { 671static struct usb_protocol_ops snd_usbmidi_novation_ops = {
@@ -648,20 +683,21 @@ static void snd_usbmidi_raw_input(struct snd_usb_midi_in_endpoint* ep,
648 snd_usbmidi_input_data(ep, 0, buffer, buffer_length); 683 snd_usbmidi_input_data(ep, 0, buffer, buffer_length);
649} 684}
650 685
651static void snd_usbmidi_raw_output(struct snd_usb_midi_out_endpoint* ep) 686static void snd_usbmidi_raw_output(struct snd_usb_midi_out_endpoint* ep,
687 struct urb *urb)
652{ 688{
653 int count; 689 int count;
654 690
655 if (!ep->ports[0].active) 691 if (!ep->ports[0].active)
656 return; 692 return;
657 count = snd_rawmidi_transmit(ep->ports[0].substream, 693 count = snd_rawmidi_transmit(ep->ports[0].substream,
658 ep->urb->transfer_buffer, 694 urb->transfer_buffer,
659 ep->max_transfer); 695 ep->max_transfer);
660 if (count < 1) { 696 if (count < 1) {
661 ep->ports[0].active = 0; 697 ep->ports[0].active = 0;
662 return; 698 return;
663 } 699 }
664 ep->urb->transfer_buffer_length = count; 700 urb->transfer_buffer_length = count;
665} 701}
666 702
667static struct usb_protocol_ops snd_usbmidi_raw_ops = { 703static struct usb_protocol_ops snd_usbmidi_raw_ops = {
@@ -681,23 +717,25 @@ static void snd_usbmidi_us122l_input(struct snd_usb_midi_in_endpoint *ep,
681 snd_usbmidi_input_data(ep, 0, buffer, buffer_length); 717 snd_usbmidi_input_data(ep, 0, buffer, buffer_length);
682} 718}
683 719
684static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep) 720static void snd_usbmidi_us122l_output(struct snd_usb_midi_out_endpoint *ep,
721 struct urb *urb)
685{ 722{
686 int count; 723 int count;
687 724
688 if (!ep->ports[0].active) 725 if (!ep->ports[0].active)
689 return; 726 return;
690 count = ep->urb->dev->speed == USB_SPEED_HIGH ? 1 : 2; 727 count = snd_usb_get_speed(ep->umidi->chip->dev) == USB_SPEED_HIGH
728 ? 1 : 2;
691 count = snd_rawmidi_transmit(ep->ports[0].substream, 729 count = snd_rawmidi_transmit(ep->ports[0].substream,
692 ep->urb->transfer_buffer, 730 urb->transfer_buffer,
693 count); 731 count);
694 if (count < 1) { 732 if (count < 1) {
695 ep->ports[0].active = 0; 733 ep->ports[0].active = 0;
696 return; 734 return;
697 } 735 }
698 736
699 memset(ep->urb->transfer_buffer + count, 0xFD, 9 - count); 737 memset(urb->transfer_buffer + count, 0xFD, 9 - count);
700 ep->urb->transfer_buffer_length = count; 738 urb->transfer_buffer_length = count;
701} 739}
702 740
703static struct usb_protocol_ops snd_usbmidi_122l_ops = { 741static struct usb_protocol_ops snd_usbmidi_122l_ops = {
@@ -786,10 +824,11 @@ static void snd_usbmidi_emagic_input(struct snd_usb_midi_in_endpoint* ep,
786 } 824 }
787} 825}
788 826
789static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep) 827static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep,
828 struct urb *urb)
790{ 829{
791 int port0 = ep->current_port; 830 int port0 = ep->current_port;
792 uint8_t* buf = ep->urb->transfer_buffer; 831 uint8_t* buf = urb->transfer_buffer;
793 int buf_free = ep->max_transfer; 832 int buf_free = ep->max_transfer;
794 int length, i; 833 int length, i;
795 834
@@ -829,7 +868,7 @@ static void snd_usbmidi_emagic_output(struct snd_usb_midi_out_endpoint* ep)
829 *buf = 0xff; 868 *buf = 0xff;
830 --buf_free; 869 --buf_free;
831 } 870 }
832 ep->urb->transfer_buffer_length = ep->max_transfer - buf_free; 871 urb->transfer_buffer_length = ep->max_transfer - buf_free;
833} 872}
834 873
835static struct usb_protocol_ops snd_usbmidi_emagic_ops = { 874static struct usb_protocol_ops snd_usbmidi_emagic_ops = {
@@ -884,6 +923,35 @@ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream,
884 } 923 }
885} 924}
886 925
926static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
927{
928 struct usbmidi_out_port* port = substream->runtime->private_data;
929 struct snd_usb_midi_out_endpoint *ep = port->ep;
930 unsigned int drain_urbs;
931 DEFINE_WAIT(wait);
932 long timeout = msecs_to_jiffies(50);
933
934 /*
935 * The substream buffer is empty, but some data might still be in the
936 * currently active URBs, so we have to wait for those to complete.
937 */
938 spin_lock_irq(&ep->buffer_lock);
939 drain_urbs = ep->active_urbs;
940 if (drain_urbs) {
941 ep->drain_urbs |= drain_urbs;
942 do {
943 prepare_to_wait(&ep->drain_wait, &wait,
944 TASK_UNINTERRUPTIBLE);
945 spin_unlock_irq(&ep->buffer_lock);
946 timeout = schedule_timeout(timeout);
947 spin_lock_irq(&ep->buffer_lock);
948 drain_urbs &= ep->drain_urbs;
949 } while (drain_urbs && timeout);
950 finish_wait(&ep->drain_wait, &wait);
951 }
952 spin_unlock_irq(&ep->buffer_lock);
953}
954
887static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream) 955static int snd_usbmidi_input_open(struct snd_rawmidi_substream *substream)
888{ 956{
889 return 0; 957 return 0;
@@ -908,6 +976,7 @@ static struct snd_rawmidi_ops snd_usbmidi_output_ops = {
908 .open = snd_usbmidi_output_open, 976 .open = snd_usbmidi_output_open,
909 .close = snd_usbmidi_output_close, 977 .close = snd_usbmidi_output_close,
910 .trigger = snd_usbmidi_output_trigger, 978 .trigger = snd_usbmidi_output_trigger,
979 .drain = snd_usbmidi_output_drain,
911}; 980};
912 981
913static struct snd_rawmidi_ops snd_usbmidi_input_ops = { 982static struct snd_rawmidi_ops snd_usbmidi_input_ops = {
@@ -916,19 +985,26 @@ static struct snd_rawmidi_ops snd_usbmidi_input_ops = {
916 .trigger = snd_usbmidi_input_trigger 985 .trigger = snd_usbmidi_input_trigger
917}; 986};
918 987
988static void free_urb_and_buffer(struct snd_usb_midi *umidi, struct urb *urb,
989 unsigned int buffer_length)
990{
991 usb_buffer_free(umidi->chip->dev, buffer_length,
992 urb->transfer_buffer, urb->transfer_dma);
993 usb_free_urb(urb);
994}
995
919/* 996/*
920 * Frees an input endpoint. 997 * Frees an input endpoint.
921 * May be called when ep hasn't been initialized completely. 998 * May be called when ep hasn't been initialized completely.
922 */ 999 */
923static void snd_usbmidi_in_endpoint_delete(struct snd_usb_midi_in_endpoint* ep) 1000static void snd_usbmidi_in_endpoint_delete(struct snd_usb_midi_in_endpoint* ep)
924{ 1001{
925 if (ep->urb) { 1002 unsigned int i;
926 usb_buffer_free(ep->umidi->chip->dev, 1003
927 ep->urb->transfer_buffer_length, 1004 for (i = 0; i < INPUT_URBS; ++i)
928 ep->urb->transfer_buffer, 1005 if (ep->urbs[i])
929 ep->urb->transfer_dma); 1006 free_urb_and_buffer(ep->umidi, ep->urbs[i],
930 usb_free_urb(ep->urb); 1007 ep->urbs[i]->transfer_buffer_length);
931 }
932 kfree(ep); 1008 kfree(ep);
933} 1009}
934 1010
@@ -943,6 +1019,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
943 void* buffer; 1019 void* buffer;
944 unsigned int pipe; 1020 unsigned int pipe;
945 int length; 1021 int length;
1022 unsigned int i;
946 1023
947 rep->in = NULL; 1024 rep->in = NULL;
948 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 1025 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
@@ -950,30 +1027,36 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi* umidi,
950 return -ENOMEM; 1027 return -ENOMEM;
951 ep->umidi = umidi; 1028 ep->umidi = umidi;
952 1029
953 ep->urb = usb_alloc_urb(0, GFP_KERNEL); 1030 for (i = 0; i < INPUT_URBS; ++i) {
954 if (!ep->urb) { 1031 ep->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
955 snd_usbmidi_in_endpoint_delete(ep); 1032 if (!ep->urbs[i]) {
956 return -ENOMEM; 1033 snd_usbmidi_in_endpoint_delete(ep);
1034 return -ENOMEM;
1035 }
957 } 1036 }
958 if (ep_info->in_interval) 1037 if (ep_info->in_interval)
959 pipe = usb_rcvintpipe(umidi->chip->dev, ep_info->in_ep); 1038 pipe = usb_rcvintpipe(umidi->chip->dev, ep_info->in_ep);
960 else 1039 else
961 pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep); 1040 pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep);
962 length = usb_maxpacket(umidi->chip->dev, pipe, 0); 1041 length = usb_maxpacket(umidi->chip->dev, pipe, 0);
963 buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL, 1042 for (i = 0; i < INPUT_URBS; ++i) {
964 &ep->urb->transfer_dma); 1043 buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL,
965 if (!buffer) { 1044 &ep->urbs[i]->transfer_dma);
966 snd_usbmidi_in_endpoint_delete(ep); 1045 if (!buffer) {
967 return -ENOMEM; 1046 snd_usbmidi_in_endpoint_delete(ep);
1047 return -ENOMEM;
1048 }
1049 if (ep_info->in_interval)
1050 usb_fill_int_urb(ep->urbs[i], umidi->chip->dev,
1051 pipe, buffer, length,
1052 snd_usbmidi_in_urb_complete,
1053 ep, ep_info->in_interval);
1054 else
1055 usb_fill_bulk_urb(ep->urbs[i], umidi->chip->dev,
1056 pipe, buffer, length,
1057 snd_usbmidi_in_urb_complete, ep);
1058 ep->urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
968 } 1059 }
969 if (ep_info->in_interval)
970 usb_fill_int_urb(ep->urb, umidi->chip->dev, pipe, buffer,
971 length, snd_usbmidi_in_urb_complete, ep,
972 ep_info->in_interval);
973 else
974 usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer,
975 length, snd_usbmidi_in_urb_complete, ep);
976 ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
977 1060
978 rep->in = ep; 1061 rep->in = ep;
979 return 0; 1062 return 0;
@@ -994,12 +1077,12 @@ static unsigned int snd_usbmidi_count_bits(unsigned int x)
994 */ 1077 */
995static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep) 1078static void snd_usbmidi_out_endpoint_delete(struct snd_usb_midi_out_endpoint* ep)
996{ 1079{
997 if (ep->urb) { 1080 unsigned int i;
998 usb_buffer_free(ep->umidi->chip->dev, ep->max_transfer, 1081
999 ep->urb->transfer_buffer, 1082 for (i = 0; i < OUTPUT_URBS; ++i)
1000 ep->urb->transfer_dma); 1083 if (ep->urbs[i].urb)
1001 usb_free_urb(ep->urb); 1084 free_urb_and_buffer(ep->umidi, ep->urbs[i].urb,
1002 } 1085 ep->max_transfer);
1003 kfree(ep); 1086 kfree(ep);
1004} 1087}
1005 1088
@@ -1011,7 +1094,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
1011 struct snd_usb_midi_endpoint* rep) 1094 struct snd_usb_midi_endpoint* rep)
1012{ 1095{
1013 struct snd_usb_midi_out_endpoint* ep; 1096 struct snd_usb_midi_out_endpoint* ep;
1014 int i; 1097 unsigned int i;
1015 unsigned int pipe; 1098 unsigned int pipe;
1016 void* buffer; 1099 void* buffer;
1017 1100
@@ -1021,38 +1104,46 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
1021 return -ENOMEM; 1104 return -ENOMEM;
1022 ep->umidi = umidi; 1105 ep->umidi = umidi;
1023 1106
1024 ep->urb = usb_alloc_urb(0, GFP_KERNEL); 1107 for (i = 0; i < OUTPUT_URBS; ++i) {
1025 if (!ep->urb) { 1108 ep->urbs[i].urb = usb_alloc_urb(0, GFP_KERNEL);
1026 snd_usbmidi_out_endpoint_delete(ep); 1109 if (!ep->urbs[i].urb) {
1027 return -ENOMEM; 1110 snd_usbmidi_out_endpoint_delete(ep);
1111 return -ENOMEM;
1112 }
1113 ep->urbs[i].ep = ep;
1028 } 1114 }
1029 if (ep_info->out_interval) 1115 if (ep_info->out_interval)
1030 pipe = usb_sndintpipe(umidi->chip->dev, ep_info->out_ep); 1116 pipe = usb_sndintpipe(umidi->chip->dev, ep_info->out_ep);
1031 else 1117 else
1032 pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep); 1118 pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep);
1033 if (umidi->chip->usb_id == USB_ID(0x0a92, 0x1020)) /* ESI M4U */ 1119 if (umidi->chip->usb_id == USB_ID(0x0a92, 0x1020)) /* ESI M4U */
1034 /* FIXME: we need more URBs to get reasonable bandwidth here: */
1035 ep->max_transfer = 4; 1120 ep->max_transfer = 4;
1036 else 1121 else
1037 ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1); 1122 ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1);
1038 buffer = usb_buffer_alloc(umidi->chip->dev, ep->max_transfer, 1123 for (i = 0; i < OUTPUT_URBS; ++i) {
1039 GFP_KERNEL, &ep->urb->transfer_dma); 1124 buffer = usb_buffer_alloc(umidi->chip->dev,
1040 if (!buffer) { 1125 ep->max_transfer, GFP_KERNEL,
1041 snd_usbmidi_out_endpoint_delete(ep); 1126 &ep->urbs[i].urb->transfer_dma);
1042 return -ENOMEM; 1127 if (!buffer) {
1128 snd_usbmidi_out_endpoint_delete(ep);
1129 return -ENOMEM;
1130 }
1131 if (ep_info->out_interval)
1132 usb_fill_int_urb(ep->urbs[i].urb, umidi->chip->dev,
1133 pipe, buffer, ep->max_transfer,
1134 snd_usbmidi_out_urb_complete,
1135 &ep->urbs[i], ep_info->out_interval);
1136 else
1137 usb_fill_bulk_urb(ep->urbs[i].urb, umidi->chip->dev,
1138 pipe, buffer, ep->max_transfer,
1139 snd_usbmidi_out_urb_complete,
1140 &ep->urbs[i]);
1141 ep->urbs[i].urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1043 } 1142 }
1044 if (ep_info->out_interval)
1045 usb_fill_int_urb(ep->urb, umidi->chip->dev, pipe, buffer,
1046 ep->max_transfer, snd_usbmidi_out_urb_complete,
1047 ep, ep_info->out_interval);
1048 else
1049 usb_fill_bulk_urb(ep->urb, umidi->chip->dev,
1050 pipe, buffer, ep->max_transfer,
1051 snd_usbmidi_out_urb_complete, ep);
1052 ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1053 1143
1054 spin_lock_init(&ep->buffer_lock); 1144 spin_lock_init(&ep->buffer_lock);
1055 tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep); 1145 tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
1146 init_waitqueue_head(&ep->drain_wait);
1056 1147
1057 for (i = 0; i < 0x10; ++i) 1148 for (i = 0; i < 0x10; ++i)
1058 if (ep_info->out_cables & (1 << i)) { 1149 if (ep_info->out_cables & (1 << i)) {
@@ -1090,7 +1181,7 @@ static void snd_usbmidi_free(struct snd_usb_midi* umidi)
1090void snd_usbmidi_disconnect(struct list_head* p) 1181void snd_usbmidi_disconnect(struct list_head* p)
1091{ 1182{
1092 struct snd_usb_midi* umidi; 1183 struct snd_usb_midi* umidi;
1093 int i; 1184 unsigned int i, j;
1094 1185
1095 umidi = list_entry(p, struct snd_usb_midi, list); 1186 umidi = list_entry(p, struct snd_usb_midi, list);
1096 /* 1187 /*
@@ -1105,13 +1196,15 @@ void snd_usbmidi_disconnect(struct list_head* p)
1105 struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; 1196 struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
1106 if (ep->out) 1197 if (ep->out)
1107 tasklet_kill(&ep->out->tasklet); 1198 tasklet_kill(&ep->out->tasklet);
1108 if (ep->out && ep->out->urb) { 1199 if (ep->out) {
1109 usb_kill_urb(ep->out->urb); 1200 for (j = 0; j < OUTPUT_URBS; ++j)
1201 usb_kill_urb(ep->out->urbs[j].urb);
1110 if (umidi->usb_protocol_ops->finish_out_endpoint) 1202 if (umidi->usb_protocol_ops->finish_out_endpoint)
1111 umidi->usb_protocol_ops->finish_out_endpoint(ep->out); 1203 umidi->usb_protocol_ops->finish_out_endpoint(ep->out);
1112 } 1204 }
1113 if (ep->in) 1205 if (ep->in)
1114 usb_kill_urb(ep->in->urb); 1206 for (j = 0; j < INPUT_URBS; ++j)
1207 usb_kill_urb(ep->in->urbs[j]);
1115 /* free endpoints here; later call can result in Oops */ 1208 /* free endpoints here; later call can result in Oops */
1116 if (ep->out) { 1209 if (ep->out) {
1117 snd_usbmidi_out_endpoint_delete(ep->out); 1210 snd_usbmidi_out_endpoint_delete(ep->out);
@@ -1692,20 +1785,25 @@ static int snd_usbmidi_create_rawmidi(struct snd_usb_midi* umidi,
1692void snd_usbmidi_input_stop(struct list_head* p) 1785void snd_usbmidi_input_stop(struct list_head* p)
1693{ 1786{
1694 struct snd_usb_midi* umidi; 1787 struct snd_usb_midi* umidi;
1695 int i; 1788 unsigned int i, j;
1696 1789
1697 umidi = list_entry(p, struct snd_usb_midi, list); 1790 umidi = list_entry(p, struct snd_usb_midi, list);
1698 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) { 1791 for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
1699 struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i]; 1792 struct snd_usb_midi_endpoint* ep = &umidi->endpoints[i];
1700 if (ep->in) 1793 if (ep->in)
1701 usb_kill_urb(ep->in->urb); 1794 for (j = 0; j < INPUT_URBS; ++j)
1795 usb_kill_urb(ep->in->urbs[j]);
1702 } 1796 }
1703} 1797}
1704 1798
1705static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep) 1799static void snd_usbmidi_input_start_ep(struct snd_usb_midi_in_endpoint* ep)
1706{ 1800{
1707 if (ep) { 1801 unsigned int i;
1708 struct urb* urb = ep->urb; 1802
1803 if (!ep)
1804 return;
1805 for (i = 0; i < INPUT_URBS; ++i) {
1806 struct urb* urb = ep->urbs[i];
1709 urb->dev = ep->umidi->chip->dev; 1807 urb->dev = ep->umidi->chip->dev;
1710 snd_usbmidi_submit_urb(urb, GFP_KERNEL); 1808 snd_usbmidi_submit_urb(urb, GFP_KERNEL);
1711 } 1809 }
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index ec9cdf986928..ab5a3ac2ac47 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -86,6 +86,7 @@ struct usb_mixer_interface {
86 u8 rc_buffer[6]; 86 u8 rc_buffer[6];
87 87
88 u8 audigy2nx_leds[3]; 88 u8 audigy2nx_leds[3];
89 u8 xonar_u1_status;
89}; 90};
90 91
91 92
@@ -461,7 +462,7 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
461 unsigned int size, unsigned int __user *_tlv) 462 unsigned int size, unsigned int __user *_tlv)
462{ 463{
463 struct usb_mixer_elem_info *cval = kcontrol->private_data; 464 struct usb_mixer_elem_info *cval = kcontrol->private_data;
464 DECLARE_TLV_DB_SCALE(scale, 0, 0, 0); 465 DECLARE_TLV_DB_MINMAX(scale, 0, 0);
465 466
466 if (size < sizeof(scale)) 467 if (size < sizeof(scale))
467 return -ENOMEM; 468 return -ENOMEM;
@@ -469,7 +470,16 @@ static int mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
469 * while ALSA TLV contains in 1/100 dB unit 470 * while ALSA TLV contains in 1/100 dB unit
470 */ 471 */
471 scale[2] = (convert_signed_value(cval, cval->min) * 100) / 256; 472 scale[2] = (convert_signed_value(cval, cval->min) * 100) / 256;
472 scale[3] = (convert_signed_value(cval, cval->res) * 100) / 256; 473 scale[3] = (convert_signed_value(cval, cval->max) * 100) / 256;
474 if (scale[3] <= scale[2]) {
475 /* something is wrong; assume it's either from/to 0dB */
476 if (scale[2] < 0)
477 scale[3] = 0;
478 else if (scale[2] > 0)
479 scale[2] = 0;
480 else /* totally crap, return an error */
481 return -EINVAL;
482 }
473 if (copy_to_user(_tlv, scale, sizeof(scale))) 483 if (copy_to_user(_tlv, scale, sizeof(scale)))
474 return -EFAULT; 484 return -EFAULT;
475 return 0; 485 return 0;
@@ -2033,6 +2043,58 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
2033 } 2043 }
2034} 2044}
2035 2045
2046static int snd_xonar_u1_switch_get(struct snd_kcontrol *kcontrol,
2047 struct snd_ctl_elem_value *ucontrol)
2048{
2049 struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
2050
2051 ucontrol->value.integer.value[0] = !!(mixer->xonar_u1_status & 0x02);
2052 return 0;
2053}
2054
2055static int snd_xonar_u1_switch_put(struct snd_kcontrol *kcontrol,
2056 struct snd_ctl_elem_value *ucontrol)
2057{
2058 struct usb_mixer_interface *mixer = snd_kcontrol_chip(kcontrol);
2059 u8 old_status, new_status;
2060 int err, changed;
2061
2062 old_status = mixer->xonar_u1_status;
2063 if (ucontrol->value.integer.value[0])
2064 new_status = old_status | 0x02;
2065 else
2066 new_status = old_status & ~0x02;
2067 changed = new_status != old_status;
2068 err = snd_usb_ctl_msg(mixer->chip->dev,
2069 usb_sndctrlpipe(mixer->chip->dev, 0), 0x08,
2070 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
2071 50, 0, &new_status, 1, 100);
2072 if (err < 0)
2073 return err;
2074 mixer->xonar_u1_status = new_status;
2075 return changed;
2076}
2077
2078static struct snd_kcontrol_new snd_xonar_u1_output_switch = {
2079 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
2080 .name = "Digital Playback Switch",
2081 .info = snd_ctl_boolean_mono_info,
2082 .get = snd_xonar_u1_switch_get,
2083 .put = snd_xonar_u1_switch_put,
2084};
2085
2086static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
2087{
2088 int err;
2089
2090 err = snd_ctl_add(mixer->chip->card,
2091 snd_ctl_new1(&snd_xonar_u1_output_switch, mixer));
2092 if (err < 0)
2093 return err;
2094 mixer->xonar_u1_status = 0x05;
2095 return 0;
2096}
2097
2036int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, 2098int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
2037 int ignore_error) 2099 int ignore_error)
2038{ 2100{
@@ -2075,6 +2137,13 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
2075 snd_audigy2nx_proc_read); 2137 snd_audigy2nx_proc_read);
2076 } 2138 }
2077 2139
2140 if (mixer->chip->usb_id == USB_ID(0x0b05, 0x1739) ||
2141 mixer->chip->usb_id == USB_ID(0x0b05, 0x1743)) {
2142 err = snd_xonar_u1_controls_create(mixer);
2143 if (err < 0)
2144 goto _error;
2145 }
2146
2078 err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops); 2147 err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops);
2079 if (err < 0) 2148 if (err < 0)
2080 goto _error; 2149 goto _error;
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 6be696b0a2bb..0ff23de9e453 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -91,6 +91,10 @@ OPTIONS
91--no-samples:: 91--no-samples::
92 Don't sample. 92 Don't sample.
93 93
94-R::
95--raw-samples::
96Collect raw sample records from all opened counters (typically for tracepoint counters).
97
94SEE ALSO 98SEE ALSO
95-------- 99--------
96linkperf:perf-stat[1], linkperf:perf-list[1] 100linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index e72e93110782..59f0b846cd71 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -27,6 +27,9 @@ OPTIONS
27-n 27-n
28--show-nr-samples 28--show-nr-samples
29 Show the number of samples for each symbol 29 Show the number of samples for each symbol
30-T
31--threads
32 Show per-thread event counters
30-C:: 33-C::
31--comms=:: 34--comms=::
32 Only consider symbols in these comms. CSV that understands 35 Only consider symbols in these comms. CSV that understands
@@ -48,6 +51,16 @@ OPTIONS
48 all occurances of this separator in symbol names (and other output) 51 all occurances of this separator in symbol names (and other output)
49 with a '.' character, that thus it's the only non valid separator. 52 with a '.' character, that thus it's the only non valid separator.
50 53
54-g [type,min]::
55--call-graph::
56 Display callchains using type and min percent threshold.
57 type can be either:
58 - flat: single column, linear exposure of callchains.
59 - graph: use a graph tree, displaying absolute overhead rates.
60 - fractal: like graph, but displays relative rates. Each branch of
61 the tree is considered as a new profiled object. +
62 Default: fractal,0.5.
63
51SEE ALSO 64SEE ALSO
52-------- 65--------
53linkperf:perf-stat[1] 66linkperf:perf-stat[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index c045b4271e57..9f8d207a91bf 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -166,7 +166,35 @@ endif
166 166
167# CFLAGS and LDFLAGS are for the users to override from the command line. 167# CFLAGS and LDFLAGS are for the users to override from the command line.
168 168
169CFLAGS = $(M64) -ggdb3 -Wall -Wextra -Wstrict-prototypes -Wmissing-declarations -Wmissing-prototypes -std=gnu99 -Wdeclaration-after-statement -Werror -O6 169#
170# Include saner warnings here, which can catch bugs:
171#
172
173EXTRA_WARNINGS := -Wcast-align
174EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat
175EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-security
176EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wformat-y2k
177EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wshadow
178EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
179EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
180EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
181EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
182EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
183EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
184EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
185EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
186EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
187EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
188EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
189EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
190EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
191EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-prototypes
192EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wnested-externs
193EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wold-style-definition
194EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-prototypes
195EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wdeclaration-after-statement
196
197CFLAGS = $(M64) -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -fstack-protector-all -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS)
170LDFLAGS = -lpthread -lrt -lelf -lm 198LDFLAGS = -lpthread -lrt -lelf -lm
171ALL_CFLAGS = $(CFLAGS) 199ALL_CFLAGS = $(CFLAGS)
172ALL_LDFLAGS = $(LDFLAGS) 200ALL_LDFLAGS = $(LDFLAGS)
@@ -310,6 +338,7 @@ LIB_H += util/sigchain.h
310LIB_H += util/symbol.h 338LIB_H += util/symbol.h
311LIB_H += util/module.h 339LIB_H += util/module.h
312LIB_H += util/color.h 340LIB_H += util/color.h
341LIB_H += util/values.h
313 342
314LIB_OBJS += util/abspath.o 343LIB_OBJS += util/abspath.o
315LIB_OBJS += util/alias.o 344LIB_OBJS += util/alias.o
@@ -337,6 +366,13 @@ LIB_OBJS += util/color.o
337LIB_OBJS += util/pager.o 366LIB_OBJS += util/pager.o
338LIB_OBJS += util/header.o 367LIB_OBJS += util/header.o
339LIB_OBJS += util/callchain.o 368LIB_OBJS += util/callchain.o
369LIB_OBJS += util/values.o
370LIB_OBJS += util/debug.o
371LIB_OBJS += util/map.o
372LIB_OBJS += util/thread.o
373LIB_OBJS += util/trace-event-parse.o
374LIB_OBJS += util/trace-event-read.o
375LIB_OBJS += util/trace-event-info.o
340 376
341BUILTIN_OBJS += builtin-annotate.o 377BUILTIN_OBJS += builtin-annotate.o
342BUILTIN_OBJS += builtin-help.o 378BUILTIN_OBJS += builtin-help.o
@@ -345,6 +381,7 @@ BUILTIN_OBJS += builtin-record.o
345BUILTIN_OBJS += builtin-report.o 381BUILTIN_OBJS += builtin-report.o
346BUILTIN_OBJS += builtin-stat.o 382BUILTIN_OBJS += builtin-stat.o
347BUILTIN_OBJS += builtin-top.o 383BUILTIN_OBJS += builtin-top.o
384BUILTIN_OBJS += builtin-trace.o
348 385
349PERFLIBS = $(LIB_FILE) 386PERFLIBS = $(LIB_FILE)
350 387
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5e17de984dc8..043d85b7e254 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -17,16 +17,13 @@
17#include "util/string.h" 17#include "util/string.h"
18 18
19#include "perf.h" 19#include "perf.h"
20#include "util/debug.h"
20 21
21#include "util/parse-options.h" 22#include "util/parse-options.h"
22#include "util/parse-events.h" 23#include "util/parse-events.h"
23 24#include "util/thread.h"
24#define SHOW_KERNEL 1
25#define SHOW_USER 2
26#define SHOW_HV 4
27 25
28static char const *input_name = "perf.data"; 26static char const *input_name = "perf.data";
29static char *vmlinux = "vmlinux";
30 27
31static char default_sort_order[] = "comm,symbol"; 28static char default_sort_order[] = "comm,symbol";
32static char *sort_order = default_sort_order; 29static char *sort_order = default_sort_order;
@@ -35,13 +32,6 @@ static int force;
35static int input; 32static int input;
36static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; 33static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
37 34
38static int dump_trace = 0;
39#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
40
41static int verbose;
42
43static int modules;
44
45static int full_paths; 35static int full_paths;
46 36
47static int print_line; 37static int print_line;
@@ -49,39 +39,8 @@ static int print_line;
49static unsigned long page_size; 39static unsigned long page_size;
50static unsigned long mmap_window = 32; 40static unsigned long mmap_window = 32;
51 41
52struct ip_event { 42static struct rb_root threads;
53 struct perf_event_header header; 43static struct thread *last_match;
54 u64 ip;
55 u32 pid, tid;
56};
57
58struct mmap_event {
59 struct perf_event_header header;
60 u32 pid, tid;
61 u64 start;
62 u64 len;
63 u64 pgoff;
64 char filename[PATH_MAX];
65};
66
67struct comm_event {
68 struct perf_event_header header;
69 u32 pid, tid;
70 char comm[16];
71};
72
73struct fork_event {
74 struct perf_event_header header;
75 u32 pid, ppid;
76};
77
78typedef union event_union {
79 struct perf_event_header header;
80 struct ip_event ip;
81 struct mmap_event mmap;
82 struct comm_event comm;
83 struct fork_event fork;
84} event_t;
85 44
86 45
87struct sym_ext { 46struct sym_ext {
@@ -90,323 +49,6 @@ struct sym_ext {
90 char *path; 49 char *path;
91}; 50};
92 51
93static LIST_HEAD(dsos);
94static struct dso *kernel_dso;
95static struct dso *vdso;
96
97
98static void dsos__add(struct dso *dso)
99{
100 list_add_tail(&dso->node, &dsos);
101}
102
103static struct dso *dsos__find(const char *name)
104{
105 struct dso *pos;
106
107 list_for_each_entry(pos, &dsos, node)
108 if (strcmp(pos->name, name) == 0)
109 return pos;
110 return NULL;
111}
112
113static struct dso *dsos__findnew(const char *name)
114{
115 struct dso *dso = dsos__find(name);
116 int nr;
117
118 if (dso)
119 return dso;
120
121 dso = dso__new(name, 0);
122 if (!dso)
123 goto out_delete_dso;
124
125 nr = dso__load(dso, NULL, verbose);
126 if (nr < 0) {
127 if (verbose)
128 fprintf(stderr, "Failed to open: %s\n", name);
129 goto out_delete_dso;
130 }
131 if (!nr && verbose) {
132 fprintf(stderr,
133 "No symbols found in: %s, maybe install a debug package?\n",
134 name);
135 }
136
137 dsos__add(dso);
138
139 return dso;
140
141out_delete_dso:
142 dso__delete(dso);
143 return NULL;
144}
145
146static void dsos__fprintf(FILE *fp)
147{
148 struct dso *pos;
149
150 list_for_each_entry(pos, &dsos, node)
151 dso__fprintf(pos, fp);
152}
153
154static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
155{
156 return dso__find_symbol(dso, ip);
157}
158
159static int load_kernel(void)
160{
161 int err;
162
163 kernel_dso = dso__new("[kernel]", 0);
164 if (!kernel_dso)
165 return -1;
166
167 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
168 if (err <= 0) {
169 dso__delete(kernel_dso);
170 kernel_dso = NULL;
171 } else
172 dsos__add(kernel_dso);
173
174 vdso = dso__new("[vdso]", 0);
175 if (!vdso)
176 return -1;
177
178 vdso->find_symbol = vdso__find_symbol;
179
180 dsos__add(vdso);
181
182 return err;
183}
184
185struct map {
186 struct list_head node;
187 u64 start;
188 u64 end;
189 u64 pgoff;
190 u64 (*map_ip)(struct map *, u64);
191 struct dso *dso;
192};
193
194static u64 map__map_ip(struct map *map, u64 ip)
195{
196 return ip - map->start + map->pgoff;
197}
198
199static u64 vdso__map_ip(struct map *map __used, u64 ip)
200{
201 return ip;
202}
203
204static struct map *map__new(struct mmap_event *event)
205{
206 struct map *self = malloc(sizeof(*self));
207
208 if (self != NULL) {
209 const char *filename = event->filename;
210
211 self->start = event->start;
212 self->end = event->start + event->len;
213 self->pgoff = event->pgoff;
214
215 self->dso = dsos__findnew(filename);
216 if (self->dso == NULL)
217 goto out_delete;
218
219 if (self->dso == vdso)
220 self->map_ip = vdso__map_ip;
221 else
222 self->map_ip = map__map_ip;
223 }
224 return self;
225out_delete:
226 free(self);
227 return NULL;
228}
229
230static struct map *map__clone(struct map *self)
231{
232 struct map *map = malloc(sizeof(*self));
233
234 if (!map)
235 return NULL;
236
237 memcpy(map, self, sizeof(*self));
238
239 return map;
240}
241
242static int map__overlap(struct map *l, struct map *r)
243{
244 if (l->start > r->start) {
245 struct map *t = l;
246 l = r;
247 r = t;
248 }
249
250 if (l->end > r->start)
251 return 1;
252
253 return 0;
254}
255
256static size_t map__fprintf(struct map *self, FILE *fp)
257{
258 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
259 self->start, self->end, self->pgoff, self->dso->name);
260}
261
262
263struct thread {
264 struct rb_node rb_node;
265 struct list_head maps;
266 pid_t pid;
267 char *comm;
268};
269
270static struct thread *thread__new(pid_t pid)
271{
272 struct thread *self = malloc(sizeof(*self));
273
274 if (self != NULL) {
275 self->pid = pid;
276 self->comm = malloc(32);
277 if (self->comm)
278 snprintf(self->comm, 32, ":%d", self->pid);
279 INIT_LIST_HEAD(&self->maps);
280 }
281
282 return self;
283}
284
285static int thread__set_comm(struct thread *self, const char *comm)
286{
287 if (self->comm)
288 free(self->comm);
289 self->comm = strdup(comm);
290 return self->comm ? 0 : -ENOMEM;
291}
292
293static size_t thread__fprintf(struct thread *self, FILE *fp)
294{
295 struct map *pos;
296 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
297
298 list_for_each_entry(pos, &self->maps, node)
299 ret += map__fprintf(pos, fp);
300
301 return ret;
302}
303
304
305static struct rb_root threads;
306static struct thread *last_match;
307
308static struct thread *threads__findnew(pid_t pid)
309{
310 struct rb_node **p = &threads.rb_node;
311 struct rb_node *parent = NULL;
312 struct thread *th;
313
314 /*
315 * Font-end cache - PID lookups come in blocks,
316 * so most of the time we dont have to look up
317 * the full rbtree:
318 */
319 if (last_match && last_match->pid == pid)
320 return last_match;
321
322 while (*p != NULL) {
323 parent = *p;
324 th = rb_entry(parent, struct thread, rb_node);
325
326 if (th->pid == pid) {
327 last_match = th;
328 return th;
329 }
330
331 if (pid < th->pid)
332 p = &(*p)->rb_left;
333 else
334 p = &(*p)->rb_right;
335 }
336
337 th = thread__new(pid);
338 if (th != NULL) {
339 rb_link_node(&th->rb_node, parent, p);
340 rb_insert_color(&th->rb_node, &threads);
341 last_match = th;
342 }
343
344 return th;
345}
346
347static void thread__insert_map(struct thread *self, struct map *map)
348{
349 struct map *pos, *tmp;
350
351 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
352 if (map__overlap(pos, map)) {
353 list_del_init(&pos->node);
354 /* XXX leaks dsos */
355 free(pos);
356 }
357 }
358
359 list_add_tail(&map->node, &self->maps);
360}
361
362static int thread__fork(struct thread *self, struct thread *parent)
363{
364 struct map *map;
365
366 if (self->comm)
367 free(self->comm);
368 self->comm = strdup(parent->comm);
369 if (!self->comm)
370 return -ENOMEM;
371
372 list_for_each_entry(map, &parent->maps, node) {
373 struct map *new = map__clone(map);
374 if (!new)
375 return -ENOMEM;
376 thread__insert_map(self, new);
377 }
378
379 return 0;
380}
381
382static struct map *thread__find_map(struct thread *self, u64 ip)
383{
384 struct map *pos;
385
386 if (self == NULL)
387 return NULL;
388
389 list_for_each_entry(pos, &self->maps, node)
390 if (ip >= pos->start && ip <= pos->end)
391 return pos;
392
393 return NULL;
394}
395
396static size_t threads__fprintf(FILE *fp)
397{
398 size_t ret = 0;
399 struct rb_node *nd;
400
401 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
402 struct thread *pos = rb_entry(nd, struct thread, rb_node);
403
404 ret += thread__fprintf(pos, fp);
405 }
406
407 return ret;
408}
409
410/* 52/*
411 * histogram, sorted on item, collects counts 53 * histogram, sorted on item, collects counts
412 */ 54 */
@@ -433,7 +75,7 @@ struct hist_entry {
433struct sort_entry { 75struct sort_entry {
434 struct list_head list; 76 struct list_head list;
435 77
436 char *header; 78 const char *header;
437 79
438 int64_t (*cmp)(struct hist_entry *, struct hist_entry *); 80 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
439 int64_t (*collapse)(struct hist_entry *, struct hist_entry *); 81 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
@@ -577,7 +219,7 @@ static struct sort_entry sort_sym = {
577static int sort__need_collapse = 0; 219static int sort__need_collapse = 0;
578 220
579struct sort_dimension { 221struct sort_dimension {
580 char *name; 222 const char *name;
581 struct sort_entry *entry; 223 struct sort_entry *entry;
582 int taken; 224 int taken;
583}; 225};
@@ -830,17 +472,6 @@ static void output__resort(void)
830 } 472 }
831} 473}
832 474
833static void register_idle_thread(void)
834{
835 struct thread *thread = threads__findnew(0);
836
837 if (thread == NULL ||
838 thread__set_comm(thread, "[idle]")) {
839 fprintf(stderr, "problem inserting idle task.\n");
840 exit(-1);
841 }
842}
843
844static unsigned long total = 0, 475static unsigned long total = 0,
845 total_mmap = 0, 476 total_mmap = 0,
846 total_comm = 0, 477 total_comm = 0,
@@ -853,18 +484,20 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
853 char level; 484 char level;
854 int show = 0; 485 int show = 0;
855 struct dso *dso = NULL; 486 struct dso *dso = NULL;
856 struct thread *thread = threads__findnew(event->ip.pid); 487 struct thread *thread;
857 u64 ip = event->ip.ip; 488 u64 ip = event->ip.ip;
858 struct map *map = NULL; 489 struct map *map = NULL;
859 490
860 dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n", 491 thread = threads__findnew(event->ip.pid, &threads, &last_match);
492
493 dump_printf("%p [%p]: PERF_EVENT (IP, %d): %d: %p\n",
861 (void *)(offset + head), 494 (void *)(offset + head),
862 (void *)(long)(event->header.size), 495 (void *)(long)(event->header.size),
863 event->header.misc, 496 event->header.misc,
864 event->ip.pid, 497 event->ip.pid,
865 (void *)(long)ip); 498 (void *)(long)ip);
866 499
867 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); 500 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
868 501
869 if (thread == NULL) { 502 if (thread == NULL) {
870 fprintf(stderr, "problem processing %d event, skipping it.\n", 503 fprintf(stderr, "problem processing %d event, skipping it.\n",
@@ -878,7 +511,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
878 511
879 dso = kernel_dso; 512 dso = kernel_dso;
880 513
881 dprintf(" ...... dso: %s\n", dso->name); 514 dump_printf(" ...... dso: %s\n", dso->name);
882 515
883 } else if (event->header.misc & PERF_EVENT_MISC_USER) { 516 } else if (event->header.misc & PERF_EVENT_MISC_USER) {
884 517
@@ -899,12 +532,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
899 if ((long long)ip < 0) 532 if ((long long)ip < 0)
900 dso = kernel_dso; 533 dso = kernel_dso;
901 } 534 }
902 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); 535 dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
903 536
904 } else { 537 } else {
905 show = SHOW_HV; 538 show = SHOW_HV;
906 level = 'H'; 539 level = 'H';
907 dprintf(" ...... dso: [hypervisor]\n"); 540 dump_printf(" ...... dso: [hypervisor]\n");
908 } 541 }
909 542
910 if (show & show_mask) { 543 if (show & show_mask) {
@@ -927,10 +560,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
927static int 560static int
928process_mmap_event(event_t *event, unsigned long offset, unsigned long head) 561process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
929{ 562{
930 struct thread *thread = threads__findnew(event->mmap.pid); 563 struct thread *thread;
931 struct map *map = map__new(&event->mmap); 564 struct map *map = map__new(&event->mmap, NULL, 0);
565
566 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
932 567
933 dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 568 dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
934 (void *)(offset + head), 569 (void *)(offset + head),
935 (void *)(long)(event->header.size), 570 (void *)(long)(event->header.size),
936 event->mmap.pid, 571 event->mmap.pid,
@@ -940,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
940 event->mmap.filename); 575 event->mmap.filename);
941 576
942 if (thread == NULL || map == NULL) { 577 if (thread == NULL || map == NULL) {
943 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 578 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
944 return 0; 579 return 0;
945 } 580 }
946 581
@@ -953,16 +588,17 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
953static int 588static int
954process_comm_event(event_t *event, unsigned long offset, unsigned long head) 589process_comm_event(event_t *event, unsigned long offset, unsigned long head)
955{ 590{
956 struct thread *thread = threads__findnew(event->comm.pid); 591 struct thread *thread;
957 592
958 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 593 thread = threads__findnew(event->comm.pid, &threads, &last_match);
594 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
959 (void *)(offset + head), 595 (void *)(offset + head),
960 (void *)(long)(event->header.size), 596 (void *)(long)(event->header.size),
961 event->comm.comm, event->comm.pid); 597 event->comm.comm, event->comm.pid);
962 598
963 if (thread == NULL || 599 if (thread == NULL ||
964 thread__set_comm(thread, event->comm.comm)) { 600 thread__set_comm(thread, event->comm.comm)) {
965 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); 601 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
966 return -1; 602 return -1;
967 } 603 }
968 total_comm++; 604 total_comm++;
@@ -973,10 +609,12 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
973static int 609static int
974process_fork_event(event_t *event, unsigned long offset, unsigned long head) 610process_fork_event(event_t *event, unsigned long offset, unsigned long head)
975{ 611{
976 struct thread *thread = threads__findnew(event->fork.pid); 612 struct thread *thread;
977 struct thread *parent = threads__findnew(event->fork.ppid); 613 struct thread *parent;
978 614
979 dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 615 thread = threads__findnew(event->fork.pid, &threads, &last_match);
616 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
617 dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
980 (void *)(offset + head), 618 (void *)(offset + head),
981 (void *)(long)(event->header.size), 619 (void *)(long)(event->header.size),
982 event->fork.pid, event->fork.ppid); 620 event->fork.pid, event->fork.ppid);
@@ -989,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
989 return 0; 627 return 0;
990 628
991 if (!thread || !parent || thread__fork(thread, parent)) { 629 if (!thread || !parent || thread__fork(thread, parent)) {
992 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); 630 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
993 return -1; 631 return -1;
994 } 632 }
995 total_fork++; 633 total_fork++;
@@ -1075,7 +713,7 @@ parse_line(FILE *file, struct symbol *sym, u64 start, u64 len)
1075 const char *path = NULL; 713 const char *path = NULL;
1076 unsigned int hits = 0; 714 unsigned int hits = 0;
1077 double percent = 0.0; 715 double percent = 0.0;
1078 char *color; 716 const char *color;
1079 struct sym_ext *sym_ext = sym->priv; 717 struct sym_ext *sym_ext = sym->priv;
1080 718
1081 offset = line_ip - start; 719 offset = line_ip - start;
@@ -1157,7 +795,7 @@ static void free_source_line(struct symbol *sym, int len)
1157 795
1158/* Get the filename:line for the colored entries */ 796/* Get the filename:line for the colored entries */
1159static void 797static void
1160get_source_line(struct symbol *sym, u64 start, int len, char *filename) 798get_source_line(struct symbol *sym, u64 start, int len, const char *filename)
1161{ 799{
1162 int i; 800 int i;
1163 char cmd[PATH_MAX * 2]; 801 char cmd[PATH_MAX * 2];
@@ -1203,7 +841,7 @@ get_source_line(struct symbol *sym, u64 start, int len, char *filename)
1203 } 841 }
1204} 842}
1205 843
1206static void print_summary(char *filename) 844static void print_summary(const char *filename)
1207{ 845{
1208 struct sym_ext *sym_ext; 846 struct sym_ext *sym_ext;
1209 struct rb_node *node; 847 struct rb_node *node;
@@ -1219,7 +857,7 @@ static void print_summary(char *filename)
1219 node = rb_first(&root_sym_ext); 857 node = rb_first(&root_sym_ext);
1220 while (node) { 858 while (node) {
1221 double percent; 859 double percent;
1222 char *color; 860 const char *color;
1223 char *path; 861 char *path;
1224 862
1225 sym_ext = rb_entry(node, struct sym_ext, node); 863 sym_ext = rb_entry(node, struct sym_ext, node);
@@ -1234,7 +872,7 @@ static void print_summary(char *filename)
1234 872
1235static void annotate_sym(struct dso *dso, struct symbol *sym) 873static void annotate_sym(struct dso *dso, struct symbol *sym)
1236{ 874{
1237 char *filename = dso->name, *d_filename; 875 const char *filename = dso->name, *d_filename;
1238 u64 start, end, len; 876 u64 start, end, len;
1239 char command[PATH_MAX*2]; 877 char command[PATH_MAX*2];
1240 FILE *file; 878 FILE *file;
@@ -1244,7 +882,7 @@ static void annotate_sym(struct dso *dso, struct symbol *sym)
1244 if (sym->module) 882 if (sym->module)
1245 filename = sym->module->path; 883 filename = sym->module->path;
1246 else if (dso == kernel_dso) 884 else if (dso == kernel_dso)
1247 filename = vmlinux; 885 filename = vmlinux_name;
1248 886
1249 start = sym->obj_start; 887 start = sym->obj_start;
1250 if (!start) 888 if (!start)
@@ -1316,12 +954,12 @@ static int __cmd_annotate(void)
1316 int ret, rc = EXIT_FAILURE; 954 int ret, rc = EXIT_FAILURE;
1317 unsigned long offset = 0; 955 unsigned long offset = 0;
1318 unsigned long head = 0; 956 unsigned long head = 0;
1319 struct stat stat; 957 struct stat input_stat;
1320 event_t *event; 958 event_t *event;
1321 uint32_t size; 959 uint32_t size;
1322 char *buf; 960 char *buf;
1323 961
1324 register_idle_thread(); 962 register_idle_thread(&threads, &last_match);
1325 963
1326 input = open(input_name, O_RDONLY); 964 input = open(input_name, O_RDONLY);
1327 if (input < 0) { 965 if (input < 0) {
@@ -1329,18 +967,18 @@ static int __cmd_annotate(void)
1329 exit(-1); 967 exit(-1);
1330 } 968 }
1331 969
1332 ret = fstat(input, &stat); 970 ret = fstat(input, &input_stat);
1333 if (ret < 0) { 971 if (ret < 0) {
1334 perror("failed to stat file"); 972 perror("failed to stat file");
1335 exit(-1); 973 exit(-1);
1336 } 974 }
1337 975
1338 if (!force && (stat.st_uid != geteuid())) { 976 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
1339 fprintf(stderr, "file: %s not owned by current user\n", input_name); 977 fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
1340 exit(-1); 978 exit(-1);
1341 } 979 }
1342 980
1343 if (!stat.st_size) { 981 if (!input_stat.st_size) {
1344 fprintf(stderr, "zero-sized file, nothing to do!\n"); 982 fprintf(stderr, "zero-sized file, nothing to do!\n");
1345 exit(0); 983 exit(0);
1346 } 984 }
@@ -1367,10 +1005,10 @@ more:
1367 1005
1368 if (head + event->header.size >= page_size * mmap_window) { 1006 if (head + event->header.size >= page_size * mmap_window) {
1369 unsigned long shift = page_size * (head / page_size); 1007 unsigned long shift = page_size * (head / page_size);
1370 int ret; 1008 int munmap_ret;
1371 1009
1372 ret = munmap(buf, page_size * mmap_window); 1010 munmap_ret = munmap(buf, page_size * mmap_window);
1373 assert(ret == 0); 1011 assert(munmap_ret == 0);
1374 1012
1375 offset += shift; 1013 offset += shift;
1376 head -= shift; 1014 head -= shift;
@@ -1379,14 +1017,14 @@ more:
1379 1017
1380 size = event->header.size; 1018 size = event->header.size;
1381 1019
1382 dprintf("%p [%p]: event: %d\n", 1020 dump_printf("%p [%p]: event: %d\n",
1383 (void *)(offset + head), 1021 (void *)(offset + head),
1384 (void *)(long)event->header.size, 1022 (void *)(long)event->header.size,
1385 event->header.type); 1023 event->header.type);
1386 1024
1387 if (!size || process_event(event, offset, head) < 0) { 1025 if (!size || process_event(event, offset, head) < 0) {
1388 1026
1389 dprintf("%p [%p]: skipping unknown header type: %d\n", 1027 dump_printf("%p [%p]: skipping unknown header type: %d\n",
1390 (void *)(offset + head), 1028 (void *)(offset + head),
1391 (void *)(long)(event->header.size), 1029 (void *)(long)(event->header.size),
1392 event->header.type); 1030 event->header.type);
@@ -1406,23 +1044,23 @@ more:
1406 1044
1407 head += size; 1045 head += size;
1408 1046
1409 if (offset + head < (unsigned long)stat.st_size) 1047 if (offset + head < (unsigned long)input_stat.st_size)
1410 goto more; 1048 goto more;
1411 1049
1412 rc = EXIT_SUCCESS; 1050 rc = EXIT_SUCCESS;
1413 close(input); 1051 close(input);
1414 1052
1415 dprintf(" IP events: %10ld\n", total); 1053 dump_printf(" IP events: %10ld\n", total);
1416 dprintf(" mmap events: %10ld\n", total_mmap); 1054 dump_printf(" mmap events: %10ld\n", total_mmap);
1417 dprintf(" comm events: %10ld\n", total_comm); 1055 dump_printf(" comm events: %10ld\n", total_comm);
1418 dprintf(" fork events: %10ld\n", total_fork); 1056 dump_printf(" fork events: %10ld\n", total_fork);
1419 dprintf(" unknown events: %10ld\n", total_unknown); 1057 dump_printf(" unknown events: %10ld\n", total_unknown);
1420 1058
1421 if (dump_trace) 1059 if (dump_trace)
1422 return 0; 1060 return 0;
1423 1061
1424 if (verbose >= 3) 1062 if (verbose >= 3)
1425 threads__fprintf(stdout); 1063 threads__fprintf(stdout, &threads);
1426 1064
1427 if (verbose >= 2) 1065 if (verbose >= 2)
1428 dsos__fprintf(stdout); 1066 dsos__fprintf(stdout);
@@ -1450,7 +1088,7 @@ static const struct option options[] = {
1450 "be more verbose (show symbol address, etc)"), 1088 "be more verbose (show symbol address, etc)"),
1451 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1089 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1452 "dump raw trace in ASCII"), 1090 "dump raw trace in ASCII"),
1453 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), 1091 OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
1454 OPT_BOOLEAN('m', "modules", &modules, 1092 OPT_BOOLEAN('m', "modules", &modules,
1455 "load module symbols - WARNING: use only with -k and LIVE kernel"), 1093 "load module symbols - WARNING: use only with -k and LIVE kernel"),
1456 OPT_BOOLEAN('l', "print-line", &print_line, 1094 OPT_BOOLEAN('l', "print-line", &print_line,
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 2599d86a733b..4fb8734a796e 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -456,6 +456,7 @@ int cmd_help(int argc, const char **argv, const char *prefix __used)
456 break; 456 break;
457 case HELP_FORMAT_WEB: 457 case HELP_FORMAT_WEB:
458 show_html_page(argv[0]); 458 show_html_page(argv[0]);
459 default:
459 break; 460 break;
460 } 461 }
461 462
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 89a5ddcd1ded..99a12fe86e9f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -15,6 +15,9 @@
15#include "util/string.h" 15#include "util/string.h"
16 16
17#include "util/header.h" 17#include "util/header.h"
18#include "util/event.h"
19#include "util/debug.h"
20#include "util/trace-event.h"
18 21
19#include <unistd.h> 22#include <unistd.h>
20#include <sched.h> 23#include <sched.h>
@@ -42,7 +45,6 @@ static int inherit = 1;
42static int force = 0; 45static int force = 0;
43static int append_file = 0; 46static int append_file = 0;
44static int call_graph = 0; 47static int call_graph = 0;
45static int verbose = 0;
46static int inherit_stat = 0; 48static int inherit_stat = 0;
47static int no_samples = 0; 49static int no_samples = 0;
48static int sample_address = 0; 50static int sample_address = 0;
@@ -62,24 +64,6 @@ static int file_new = 1;
62 64
63struct perf_header *header; 65struct perf_header *header;
64 66
65struct mmap_event {
66 struct perf_event_header header;
67 u32 pid;
68 u32 tid;
69 u64 start;
70 u64 len;
71 u64 pgoff;
72 char filename[PATH_MAX];
73};
74
75struct comm_event {
76 struct perf_event_header header;
77 u32 pid;
78 u32 tid;
79 char comm[16];
80};
81
82
83struct mmap_data { 67struct mmap_data {
84 int counter; 68 int counter;
85 void *base; 69 void *base;
@@ -419,8 +403,11 @@ static void create_counter(int counter, int cpu, pid_t pid)
419 if (call_graph) 403 if (call_graph)
420 attr->sample_type |= PERF_SAMPLE_CALLCHAIN; 404 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
421 405
422 if (raw_samples) 406 if (raw_samples) {
407 attr->sample_type |= PERF_SAMPLE_TIME;
423 attr->sample_type |= PERF_SAMPLE_RAW; 408 attr->sample_type |= PERF_SAMPLE_RAW;
409 attr->sample_type |= PERF_SAMPLE_CPU;
410 }
424 411
425 attr->mmap = track; 412 attr->mmap = track;
426 attr->comm = track; 413 attr->comm = track;
@@ -563,6 +550,17 @@ static int __cmd_record(int argc, const char **argv)
563 else 550 else
564 header = perf_header__new(); 551 header = perf_header__new();
565 552
553
554 if (raw_samples) {
555 read_tracing_data(attrs, nr_counters);
556 } else {
557 for (i = 0; i < nr_counters; i++) {
558 if (attrs[i].sample_type & PERF_SAMPLE_RAW) {
559 read_tracing_data(attrs, nr_counters);
560 break;
561 }
562 }
563 }
566 atexit(atexit_header); 564 atexit(atexit_header);
567 565
568 if (!system_wide) { 566 if (!system_wide) {
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8b2ec882e6e0..cdf9a8d27bb9 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -17,19 +17,18 @@
17#include "util/string.h" 17#include "util/string.h"
18#include "util/callchain.h" 18#include "util/callchain.h"
19#include "util/strlist.h" 19#include "util/strlist.h"
20#include "util/values.h"
20 21
21#include "perf.h" 22#include "perf.h"
23#include "util/debug.h"
22#include "util/header.h" 24#include "util/header.h"
23 25
24#include "util/parse-options.h" 26#include "util/parse-options.h"
25#include "util/parse-events.h" 27#include "util/parse-events.h"
26 28
27#define SHOW_KERNEL 1 29#include "util/thread.h"
28#define SHOW_USER 2
29#define SHOW_HV 4
30 30
31static char const *input_name = "perf.data"; 31static char const *input_name = "perf.data";
32static char *vmlinux = NULL;
33 32
34static char default_sort_order[] = "comm,dso,symbol"; 33static char default_sort_order[] = "comm,dso,symbol";
35static char *sort_order = default_sort_order; 34static char *sort_order = default_sort_order;
@@ -42,18 +41,15 @@ static int force;
42static int input; 41static int input;
43static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV; 42static int show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;
44 43
45static int dump_trace = 0;
46#define dprintf(x...) do { if (dump_trace) printf(x); } while (0)
47#define cdprintf(x...) do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
48
49static int verbose;
50#define eprintf(x...) do { if (verbose) fprintf(stderr, x); } while (0)
51
52static int modules;
53
54static int full_paths; 44static int full_paths;
55static int show_nr_samples; 45static int show_nr_samples;
56 46
47static int show_threads;
48static struct perf_read_values show_threads_values;
49
50static char default_pretty_printing_style[] = "normal";
51static char *pretty_printing_style = default_pretty_printing_style;
52
57static unsigned long page_size; 53static unsigned long page_size;
58static unsigned long mmap_window = 32; 54static unsigned long mmap_window = 32;
59 55
@@ -67,6 +63,15 @@ static char callchain_default_opt[] = "fractal,0.5";
67 63
68static int callchain; 64static int callchain;
69 65
66static char __cwd[PATH_MAX];
67static char *cwd = __cwd;
68static int cwdlen;
69
70static struct rb_root threads;
71static struct thread *last_match;
72
73static struct perf_header *header;
74
70static 75static
71struct callchain_param callchain_param = { 76struct callchain_param callchain_param = {
72 .mode = CHAIN_GRAPH_REL, 77 .mode = CHAIN_GRAPH_REL,
@@ -75,59 +80,6 @@ struct callchain_param callchain_param = {
75 80
76static u64 sample_type; 81static u64 sample_type;
77 82
78struct ip_event {
79 struct perf_event_header header;
80 u64 ip;
81 u32 pid, tid;
82 unsigned char __more_data[];
83};
84
85struct mmap_event {
86 struct perf_event_header header;
87 u32 pid, tid;
88 u64 start;
89 u64 len;
90 u64 pgoff;
91 char filename[PATH_MAX];
92};
93
94struct comm_event {
95 struct perf_event_header header;
96 u32 pid, tid;
97 char comm[16];
98};
99
100struct fork_event {
101 struct perf_event_header header;
102 u32 pid, ppid;
103 u32 tid, ptid;
104};
105
106struct lost_event {
107 struct perf_event_header header;
108 u64 id;
109 u64 lost;
110};
111
112struct read_event {
113 struct perf_event_header header;
114 u32 pid,tid;
115 u64 value;
116 u64 time_enabled;
117 u64 time_running;
118 u64 id;
119};
120
121typedef union event_union {
122 struct perf_event_header header;
123 struct ip_event ip;
124 struct mmap_event mmap;
125 struct comm_event comm;
126 struct fork_event fork;
127 struct lost_event lost;
128 struct read_event read;
129} event_t;
130
131static int repsep_fprintf(FILE *fp, const char *fmt, ...) 83static int repsep_fprintf(FILE *fp, const char *fmt, ...)
132{ 84{
133 int n; 85 int n;
@@ -141,6 +93,7 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
141 n = vasprintf(&bf, fmt, ap); 93 n = vasprintf(&bf, fmt, ap);
142 if (n > 0) { 94 if (n > 0) {
143 char *sep = bf; 95 char *sep = bf;
96
144 while (1) { 97 while (1) {
145 sep = strchr(sep, *field_sep); 98 sep = strchr(sep, *field_sep);
146 if (sep == NULL) 99 if (sep == NULL)
@@ -155,396 +108,10 @@ static int repsep_fprintf(FILE *fp, const char *fmt, ...)
155 return n; 108 return n;
156} 109}
157 110
158static LIST_HEAD(dsos);
159static struct dso *kernel_dso;
160static struct dso *vdso;
161static struct dso *hypervisor_dso;
162
163static void dsos__add(struct dso *dso)
164{
165 list_add_tail(&dso->node, &dsos);
166}
167
168static struct dso *dsos__find(const char *name)
169{
170 struct dso *pos;
171
172 list_for_each_entry(pos, &dsos, node)
173 if (strcmp(pos->name, name) == 0)
174 return pos;
175 return NULL;
176}
177
178static struct dso *dsos__findnew(const char *name)
179{
180 struct dso *dso = dsos__find(name);
181 int nr;
182
183 if (dso)
184 return dso;
185
186 dso = dso__new(name, 0);
187 if (!dso)
188 goto out_delete_dso;
189
190 nr = dso__load(dso, NULL, verbose);
191 if (nr < 0) {
192 eprintf("Failed to open: %s\n", name);
193 goto out_delete_dso;
194 }
195 if (!nr)
196 eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
197
198 dsos__add(dso);
199
200 return dso;
201
202out_delete_dso:
203 dso__delete(dso);
204 return NULL;
205}
206
207static void dsos__fprintf(FILE *fp)
208{
209 struct dso *pos;
210
211 list_for_each_entry(pos, &dsos, node)
212 dso__fprintf(pos, fp);
213}
214
215static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
216{
217 return dso__find_symbol(dso, ip);
218}
219
220static int load_kernel(void)
221{
222 int err;
223
224 kernel_dso = dso__new("[kernel]", 0);
225 if (!kernel_dso)
226 return -1;
227
228 err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
229 if (err <= 0) {
230 dso__delete(kernel_dso);
231 kernel_dso = NULL;
232 } else
233 dsos__add(kernel_dso);
234
235 vdso = dso__new("[vdso]", 0);
236 if (!vdso)
237 return -1;
238
239 vdso->find_symbol = vdso__find_symbol;
240
241 dsos__add(vdso);
242
243 hypervisor_dso = dso__new("[hypervisor]", 0);
244 if (!hypervisor_dso)
245 return -1;
246 dsos__add(hypervisor_dso);
247
248 return err;
249}
250
251static char __cwd[PATH_MAX];
252static char *cwd = __cwd;
253static int cwdlen;
254
255static int strcommon(const char *pathname)
256{
257 int n = 0;
258
259 while (n < cwdlen && pathname[n] == cwd[n])
260 ++n;
261
262 return n;
263}
264
265struct map {
266 struct list_head node;
267 u64 start;
268 u64 end;
269 u64 pgoff;
270 u64 (*map_ip)(struct map *, u64);
271 struct dso *dso;
272};
273
274static u64 map__map_ip(struct map *map, u64 ip)
275{
276 return ip - map->start + map->pgoff;
277}
278
279static u64 vdso__map_ip(struct map *map __used, u64 ip)
280{
281 return ip;
282}
283
284static inline int is_anon_memory(const char *filename)
285{
286 return strcmp(filename, "//anon") == 0;
287}
288
289static struct map *map__new(struct mmap_event *event)
290{
291 struct map *self = malloc(sizeof(*self));
292
293 if (self != NULL) {
294 const char *filename = event->filename;
295 char newfilename[PATH_MAX];
296 int anon;
297
298 if (cwd) {
299 int n = strcommon(filename);
300
301 if (n == cwdlen) {
302 snprintf(newfilename, sizeof(newfilename),
303 ".%s", filename + n);
304 filename = newfilename;
305 }
306 }
307
308 anon = is_anon_memory(filename);
309
310 if (anon) {
311 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
312 filename = newfilename;
313 }
314
315 self->start = event->start;
316 self->end = event->start + event->len;
317 self->pgoff = event->pgoff;
318
319 self->dso = dsos__findnew(filename);
320 if (self->dso == NULL)
321 goto out_delete;
322
323 if (self->dso == vdso || anon)
324 self->map_ip = vdso__map_ip;
325 else
326 self->map_ip = map__map_ip;
327 }
328 return self;
329out_delete:
330 free(self);
331 return NULL;
332}
333
334static struct map *map__clone(struct map *self)
335{
336 struct map *map = malloc(sizeof(*self));
337
338 if (!map)
339 return NULL;
340
341 memcpy(map, self, sizeof(*self));
342
343 return map;
344}
345
346static int map__overlap(struct map *l, struct map *r)
347{
348 if (l->start > r->start) {
349 struct map *t = l;
350 l = r;
351 r = t;
352 }
353
354 if (l->end > r->start)
355 return 1;
356
357 return 0;
358}
359
360static size_t map__fprintf(struct map *self, FILE *fp)
361{
362 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
363 self->start, self->end, self->pgoff, self->dso->name);
364}
365
366
367struct thread {
368 struct rb_node rb_node;
369 struct list_head maps;
370 pid_t pid;
371 char *comm;
372};
373
374static struct thread *thread__new(pid_t pid)
375{
376 struct thread *self = malloc(sizeof(*self));
377
378 if (self != NULL) {
379 self->pid = pid;
380 self->comm = malloc(32);
381 if (self->comm)
382 snprintf(self->comm, 32, ":%d", self->pid);
383 INIT_LIST_HEAD(&self->maps);
384 }
385
386 return self;
387}
388
389static unsigned int dsos__col_width, 111static unsigned int dsos__col_width,
390 comms__col_width, 112 comms__col_width,
391 threads__col_width; 113 threads__col_width;
392 114
393static int thread__set_comm(struct thread *self, const char *comm)
394{
395 if (self->comm)
396 free(self->comm);
397 self->comm = strdup(comm);
398 if (!self->comm)
399 return -ENOMEM;
400
401 if (!col_width_list_str && !field_sep &&
402 (!comm_list || strlist__has_entry(comm_list, comm))) {
403 unsigned int slen = strlen(comm);
404 if (slen > comms__col_width) {
405 comms__col_width = slen;
406 threads__col_width = slen + 6;
407 }
408 }
409
410 return 0;
411}
412
413static size_t thread__fprintf(struct thread *self, FILE *fp)
414{
415 struct map *pos;
416 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
417
418 list_for_each_entry(pos, &self->maps, node)
419 ret += map__fprintf(pos, fp);
420
421 return ret;
422}
423
424
425static struct rb_root threads;
426static struct thread *last_match;
427
428static struct thread *threads__findnew(pid_t pid)
429{
430 struct rb_node **p = &threads.rb_node;
431 struct rb_node *parent = NULL;
432 struct thread *th;
433
434 /*
435 * Font-end cache - PID lookups come in blocks,
436 * so most of the time we dont have to look up
437 * the full rbtree:
438 */
439 if (last_match && last_match->pid == pid)
440 return last_match;
441
442 while (*p != NULL) {
443 parent = *p;
444 th = rb_entry(parent, struct thread, rb_node);
445
446 if (th->pid == pid) {
447 last_match = th;
448 return th;
449 }
450
451 if (pid < th->pid)
452 p = &(*p)->rb_left;
453 else
454 p = &(*p)->rb_right;
455 }
456
457 th = thread__new(pid);
458 if (th != NULL) {
459 rb_link_node(&th->rb_node, parent, p);
460 rb_insert_color(&th->rb_node, &threads);
461 last_match = th;
462 }
463
464 return th;
465}
466
467static void thread__insert_map(struct thread *self, struct map *map)
468{
469 struct map *pos, *tmp;
470
471 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
472 if (map__overlap(pos, map)) {
473 if (verbose >= 2) {
474 printf("overlapping maps:\n");
475 map__fprintf(map, stdout);
476 map__fprintf(pos, stdout);
477 }
478
479 if (map->start <= pos->start && map->end > pos->start)
480 pos->start = map->end;
481
482 if (map->end >= pos->end && map->start < pos->end)
483 pos->end = map->start;
484
485 if (verbose >= 2) {
486 printf("after collision:\n");
487 map__fprintf(pos, stdout);
488 }
489
490 if (pos->start >= pos->end) {
491 list_del_init(&pos->node);
492 free(pos);
493 }
494 }
495 }
496
497 list_add_tail(&map->node, &self->maps);
498}
499
500static int thread__fork(struct thread *self, struct thread *parent)
501{
502 struct map *map;
503
504 if (self->comm)
505 free(self->comm);
506 self->comm = strdup(parent->comm);
507 if (!self->comm)
508 return -ENOMEM;
509
510 list_for_each_entry(map, &parent->maps, node) {
511 struct map *new = map__clone(map);
512 if (!new)
513 return -ENOMEM;
514 thread__insert_map(self, new);
515 }
516
517 return 0;
518}
519
520static struct map *thread__find_map(struct thread *self, u64 ip)
521{
522 struct map *pos;
523
524 if (self == NULL)
525 return NULL;
526
527 list_for_each_entry(pos, &self->maps, node)
528 if (ip >= pos->start && ip <= pos->end)
529 return pos;
530
531 return NULL;
532}
533
534static size_t threads__fprintf(FILE *fp)
535{
536 size_t ret = 0;
537 struct rb_node *nd;
538
539 for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
540 struct thread *pos = rb_entry(nd, struct thread, rb_node);
541
542 ret += thread__fprintf(pos, fp);
543 }
544
545 return ret;
546}
547
548/* 115/*
549 * histogram, sorted on item, collects counts 116 * histogram, sorted on item, collects counts
550 */ 117 */
@@ -574,7 +141,7 @@ struct hist_entry {
574struct sort_entry { 141struct sort_entry {
575 struct list_head list; 142 struct list_head list;
576 143
577 char *header; 144 const char *header;
578 145
579 int64_t (*cmp)(struct hist_entry *, struct hist_entry *); 146 int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
580 int64_t (*collapse)(struct hist_entry *, struct hist_entry *); 147 int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
@@ -758,7 +325,7 @@ static int sort__need_collapse = 0;
758static int sort__has_parent = 0; 325static int sort__has_parent = 0;
759 326
760struct sort_dimension { 327struct sort_dimension {
761 char *name; 328 const char *name;
762 struct sort_entry *entry; 329 struct sort_entry *entry;
763 int taken; 330 int taken;
764}; 331};
@@ -773,7 +340,7 @@ static struct sort_dimension sort_dimensions[] = {
773 340
774static LIST_HEAD(hist_entry__sort_list); 341static LIST_HEAD(hist_entry__sort_list);
775 342
776static int sort_dimension__add(char *tok) 343static int sort_dimension__add(const char *tok)
777{ 344{
778 unsigned int i; 345 unsigned int i;
779 346
@@ -1032,6 +599,7 @@ hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
1032 case CHAIN_GRAPH_REL: 599 case CHAIN_GRAPH_REL:
1033 ret += callchain__fprintf_graph(fp, chain, 600 ret += callchain__fprintf_graph(fp, chain,
1034 total_samples, 1, 1); 601 total_samples, 1, 1);
602 case CHAIN_NONE:
1035 default: 603 default:
1036 break; 604 break;
1037 } 605 }
@@ -1098,6 +666,34 @@ static void dso__calc_col_width(struct dso *self)
1098 self->slen_calculated = 1; 666 self->slen_calculated = 1;
1099} 667}
1100 668
669static void thread__comm_adjust(struct thread *self)
670{
671 char *comm = self->comm;
672
673 if (!col_width_list_str && !field_sep &&
674 (!comm_list || strlist__has_entry(comm_list, comm))) {
675 unsigned int slen = strlen(comm);
676
677 if (slen > comms__col_width) {
678 comms__col_width = slen;
679 threads__col_width = slen + 6;
680 }
681 }
682}
683
684static int thread__set_comm_adjust(struct thread *self, const char *comm)
685{
686 int ret = thread__set_comm(self, comm);
687
688 if (ret)
689 return ret;
690
691 thread__comm_adjust(self);
692
693 return 0;
694}
695
696
1101static struct symbol * 697static struct symbol *
1102resolve_symbol(struct thread *thread, struct map **mapp, 698resolve_symbol(struct thread *thread, struct map **mapp,
1103 struct dso **dsop, u64 *ipp) 699 struct dso **dsop, u64 *ipp)
@@ -1141,8 +737,8 @@ got_map:
1141 if ((long long)ip < 0) 737 if ((long long)ip < 0)
1142 dso = kernel_dso; 738 dso = kernel_dso;
1143 } 739 }
1144 dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>"); 740 dump_printf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
1145 dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip); 741 dump_printf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
1146 *ipp = ip; 742 *ipp = ip;
1147 743
1148 if (dsop) 744 if (dsop)
@@ -1398,6 +994,9 @@ static size_t output__fprintf(FILE *fp, u64 total_samples)
1398 size_t ret = 0; 994 size_t ret = 0;
1399 unsigned int width; 995 unsigned int width;
1400 char *col_width = col_width_list_str; 996 char *col_width = col_width_list_str;
997 int raw_printing_style;
998
999 raw_printing_style = !strcmp(pretty_printing_style, "raw");
1401 1000
1402 init_rem_hits(); 1001 init_rem_hits();
1403 1002
@@ -1474,18 +1073,11 @@ print_entries:
1474 1073
1475 free(rem_sq_bracket); 1074 free(rem_sq_bracket);
1476 1075
1477 return ret; 1076 if (show_threads)
1478} 1077 perf_read_values_display(fp, &show_threads_values,
1078 raw_printing_style);
1479 1079
1480static void register_idle_thread(void) 1080 return ret;
1481{
1482 struct thread *thread = threads__findnew(0);
1483
1484 if (thread == NULL ||
1485 thread__set_comm(thread, "[idle]")) {
1486 fprintf(stderr, "problem inserting idle task.\n");
1487 exit(-1);
1488 }
1489} 1081}
1490 1082
1491static unsigned long total = 0, 1083static unsigned long total = 0,
@@ -1514,7 +1106,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1514 char level; 1106 char level;
1515 int show = 0; 1107 int show = 0;
1516 struct dso *dso = NULL; 1108 struct dso *dso = NULL;
1517 struct thread *thread = threads__findnew(event->ip.pid); 1109 struct thread *thread;
1518 u64 ip = event->ip.ip; 1110 u64 ip = event->ip.ip;
1519 u64 period = 1; 1111 u64 period = 1;
1520 struct map *map = NULL; 1112 struct map *map = NULL;
@@ -1522,12 +1114,14 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1522 struct ip_callchain *chain = NULL; 1114 struct ip_callchain *chain = NULL;
1523 int cpumode; 1115 int cpumode;
1524 1116
1117 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1118
1525 if (sample_type & PERF_SAMPLE_PERIOD) { 1119 if (sample_type & PERF_SAMPLE_PERIOD) {
1526 period = *(u64 *)more_data; 1120 period = *(u64 *)more_data;
1527 more_data += sizeof(u64); 1121 more_data += sizeof(u64);
1528 } 1122 }
1529 1123
1530 dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1124 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1531 (void *)(offset + head), 1125 (void *)(offset + head),
1532 (void *)(long)(event->header.size), 1126 (void *)(long)(event->header.size),
1533 event->header.misc, 1127 event->header.misc,
@@ -1540,7 +1134,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1540 1134
1541 chain = (void *)more_data; 1135 chain = (void *)more_data;
1542 1136
1543 dprintf("... chain: nr:%Lu\n", chain->nr); 1137 dump_printf("... chain: nr:%Lu\n", chain->nr);
1544 1138
1545 if (validate_chain(chain, event) < 0) { 1139 if (validate_chain(chain, event) < 0) {
1546 eprintf("call-chain problem with event, skipping it.\n"); 1140 eprintf("call-chain problem with event, skipping it.\n");
@@ -1549,11 +1143,11 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1549 1143
1550 if (dump_trace) { 1144 if (dump_trace) {
1551 for (i = 0; i < chain->nr; i++) 1145 for (i = 0; i < chain->nr; i++)
1552 dprintf("..... %2d: %016Lx\n", i, chain->ips[i]); 1146 dump_printf("..... %2d: %016Lx\n", i, chain->ips[i]);
1553 } 1147 }
1554 } 1148 }
1555 1149
1556 dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid); 1150 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1557 1151
1558 if (thread == NULL) { 1152 if (thread == NULL) {
1559 eprintf("problem processing %d event, skipping it.\n", 1153 eprintf("problem processing %d event, skipping it.\n",
@@ -1572,7 +1166,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1572 1166
1573 dso = kernel_dso; 1167 dso = kernel_dso;
1574 1168
1575 dprintf(" ...... dso: %s\n", dso->name); 1169 dump_printf(" ...... dso: %s\n", dso->name);
1576 1170
1577 } else if (cpumode == PERF_EVENT_MISC_USER) { 1171 } else if (cpumode == PERF_EVENT_MISC_USER) {
1578 1172
@@ -1585,7 +1179,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1585 1179
1586 dso = hypervisor_dso; 1180 dso = hypervisor_dso;
1587 1181
1588 dprintf(" ...... dso: [hypervisor]\n"); 1182 dump_printf(" ...... dso: [hypervisor]\n");
1589 } 1183 }
1590 1184
1591 if (show & show_mask) { 1185 if (show & show_mask) {
@@ -1611,10 +1205,12 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1611static int 1205static int
1612process_mmap_event(event_t *event, unsigned long offset, unsigned long head) 1206process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1613{ 1207{
1614 struct thread *thread = threads__findnew(event->mmap.pid); 1208 struct thread *thread;
1615 struct map *map = map__new(&event->mmap); 1209 struct map *map = map__new(&event->mmap, cwd, cwdlen);
1616 1210
1617 dprintf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1212
1213 dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1618 (void *)(offset + head), 1214 (void *)(offset + head),
1619 (void *)(long)(event->header.size), 1215 (void *)(long)(event->header.size),
1620 event->mmap.pid, 1216 event->mmap.pid,
@@ -1625,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1625 event->mmap.filename); 1221 event->mmap.filename);
1626 1222
1627 if (thread == NULL || map == NULL) { 1223 if (thread == NULL || map == NULL) {
1628 dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 1224 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1629 return 0; 1225 return 0;
1630 } 1226 }
1631 1227
@@ -1638,16 +1234,18 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1638static int 1234static int
1639process_comm_event(event_t *event, unsigned long offset, unsigned long head) 1235process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1640{ 1236{
1641 struct thread *thread = threads__findnew(event->comm.pid); 1237 struct thread *thread;
1238
1239 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1642 1240
1643 dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1241 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
1644 (void *)(offset + head), 1242 (void *)(offset + head),
1645 (void *)(long)(event->header.size), 1243 (void *)(long)(event->header.size),
1646 event->comm.comm, event->comm.pid); 1244 event->comm.comm, event->comm.pid);
1647 1245
1648 if (thread == NULL || 1246 if (thread == NULL ||
1649 thread__set_comm(thread, event->comm.comm)) { 1247 thread__set_comm_adjust(thread, event->comm.comm)) {
1650 dprintf("problem processing PERF_EVENT_COMM, skipping event.\n"); 1248 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
1651 return -1; 1249 return -1;
1652 } 1250 }
1653 total_comm++; 1251 total_comm++;
@@ -1658,10 +1256,13 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1658static int 1256static int
1659process_task_event(event_t *event, unsigned long offset, unsigned long head) 1257process_task_event(event_t *event, unsigned long offset, unsigned long head)
1660{ 1258{
1661 struct thread *thread = threads__findnew(event->fork.pid); 1259 struct thread *thread;
1662 struct thread *parent = threads__findnew(event->fork.ppid); 1260 struct thread *parent;
1663 1261
1664 dprintf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1262 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1264
1265 dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n",
1665 (void *)(offset + head), 1266 (void *)(offset + head),
1666 (void *)(long)(event->header.size), 1267 (void *)(long)(event->header.size),
1667 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", 1268 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT",
@@ -1679,7 +1280,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1679 return 0; 1280 return 0;
1680 1281
1681 if (!thread || !parent || thread__fork(thread, parent)) { 1282 if (!thread || !parent || thread__fork(thread, parent)) {
1682 dprintf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1283 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n");
1683 return -1; 1284 return -1;
1684 } 1285 }
1685 total_fork++; 1286 total_fork++;
@@ -1690,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1690static int 1291static int
1691process_lost_event(event_t *event, unsigned long offset, unsigned long head) 1292process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1692{ 1293{
1693 dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", 1294 dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
1694 (void *)(offset + head), 1295 (void *)(offset + head),
1695 (void *)(long)(event->header.size), 1296 (void *)(long)(event->header.size),
1696 event->lost.id, 1297 event->lost.id,
@@ -1701,67 +1302,24 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1701 return 0; 1302 return 0;
1702} 1303}
1703 1304
1704static void trace_event(event_t *event) 1305static int
1705{ 1306process_read_event(event_t *event, unsigned long offset, unsigned long head)
1706 unsigned char *raw_event = (void *)event;
1707 char *color = PERF_COLOR_BLUE;
1708 int i, j;
1709
1710 if (!dump_trace)
1711 return;
1712
1713 dprintf(".");
1714 cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
1715
1716 for (i = 0; i < event->header.size; i++) {
1717 if ((i & 15) == 0) {
1718 dprintf(".");
1719 cdprintf(" %04x: ", i);
1720 }
1721
1722 cdprintf(" %02x", raw_event[i]);
1723
1724 if (((i & 15) == 15) || i == event->header.size-1) {
1725 cdprintf(" ");
1726 for (j = 0; j < 15-(i & 15); j++)
1727 cdprintf(" ");
1728 for (j = 0; j < (i & 15); j++) {
1729 if (isprint(raw_event[i-15+j]))
1730 cdprintf("%c", raw_event[i-15+j]);
1731 else
1732 cdprintf(".");
1733 }
1734 cdprintf("\n");
1735 }
1736 }
1737 dprintf(".\n");
1738}
1739
1740static struct perf_header *header;
1741
1742static struct perf_counter_attr *perf_header__find_attr(u64 id)
1743{ 1307{
1744 int i; 1308 struct perf_counter_attr *attr;
1745 1309
1746 for (i = 0; i < header->attrs; i++) { 1310 attr = perf_header__find_attr(event->read.id, header);
1747 struct perf_header_attr *attr = header->attr[i];
1748 int j;
1749 1311
1750 for (j = 0; j < attr->ids; j++) { 1312 if (show_threads) {
1751 if (attr->id[j] == id) 1313 const char *name = attr ? __event_name(attr->type, attr->config)
1752 return &attr->attr; 1314 : "unknown";
1753 } 1315 perf_read_values_add_value(&show_threads_values,
1316 event->read.pid, event->read.tid,
1317 event->read.id,
1318 name,
1319 event->read.value);
1754 } 1320 }
1755 1321
1756 return NULL; 1322 dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
1757}
1758
1759static int
1760process_read_event(event_t *event, unsigned long offset, unsigned long head)
1761{
1762 struct perf_counter_attr *attr = perf_header__find_attr(event->read.id);
1763
1764 dprintf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n",
1765 (void *)(offset + head), 1323 (void *)(offset + head),
1766 (void *)(long)(event->header.size), 1324 (void *)(long)(event->header.size),
1767 event->read.pid, 1325 event->read.pid,
@@ -1813,34 +1371,22 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1813 return 0; 1371 return 0;
1814} 1372}
1815 1373
1816static u64 perf_header__sample_type(void)
1817{
1818 u64 sample_type = 0;
1819 int i;
1820
1821 for (i = 0; i < header->attrs; i++) {
1822 struct perf_header_attr *attr = header->attr[i];
1823
1824 if (!sample_type)
1825 sample_type = attr->attr.sample_type;
1826 else if (sample_type != attr->attr.sample_type)
1827 die("non matching sample_type");
1828 }
1829
1830 return sample_type;
1831}
1832
1833static int __cmd_report(void) 1374static int __cmd_report(void)
1834{ 1375{
1835 int ret, rc = EXIT_FAILURE; 1376 int ret, rc = EXIT_FAILURE;
1836 unsigned long offset = 0; 1377 unsigned long offset = 0;
1837 unsigned long head, shift; 1378 unsigned long head, shift;
1838 struct stat stat; 1379 struct stat input_stat;
1380 struct thread *idle;
1839 event_t *event; 1381 event_t *event;
1840 uint32_t size; 1382 uint32_t size;
1841 char *buf; 1383 char *buf;
1842 1384
1843 register_idle_thread(); 1385 idle = register_idle_thread(&threads, &last_match);
1386 thread__comm_adjust(idle);
1387
1388 if (show_threads)
1389 perf_read_values_init(&show_threads_values);
1844 1390
1845 input = open(input_name, O_RDONLY); 1391 input = open(input_name, O_RDONLY);
1846 if (input < 0) { 1392 if (input < 0) {
@@ -1851,18 +1397,18 @@ static int __cmd_report(void)
1851 exit(-1); 1397 exit(-1);
1852 } 1398 }
1853 1399
1854 ret = fstat(input, &stat); 1400 ret = fstat(input, &input_stat);
1855 if (ret < 0) { 1401 if (ret < 0) {
1856 perror("failed to stat file"); 1402 perror("failed to stat file");
1857 exit(-1); 1403 exit(-1);
1858 } 1404 }
1859 1405
1860 if (!force && (stat.st_uid != geteuid())) { 1406 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
1861 fprintf(stderr, "file: %s not owned by current user\n", input_name); 1407 fprintf(stderr, "file: %s not owned by current user or root\n", input_name);
1862 exit(-1); 1408 exit(-1);
1863 } 1409 }
1864 1410
1865 if (!stat.st_size) { 1411 if (!input_stat.st_size) {
1866 fprintf(stderr, "zero-sized file, nothing to do!\n"); 1412 fprintf(stderr, "zero-sized file, nothing to do!\n");
1867 exit(0); 1413 exit(0);
1868 } 1414 }
@@ -1870,7 +1416,7 @@ static int __cmd_report(void)
1870 header = perf_header__read(input); 1416 header = perf_header__read(input);
1871 head = header->data_offset; 1417 head = header->data_offset;
1872 1418
1873 sample_type = perf_header__sample_type(); 1419 sample_type = perf_header__sample_type(header);
1874 1420
1875 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { 1421 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1876 if (sort__has_parent) { 1422 if (sort__has_parent) {
@@ -1880,7 +1426,7 @@ static int __cmd_report(void)
1880 exit(-1); 1426 exit(-1);
1881 } 1427 }
1882 if (callchain) { 1428 if (callchain) {
1883 fprintf(stderr, "selected -c but no callchain data." 1429 fprintf(stderr, "selected -g but no callchain data."
1884 " Did you call perf record without" 1430 " Did you call perf record without"
1885 " -g?\n"); 1431 " -g?\n");
1886 exit(-1); 1432 exit(-1);
@@ -1930,12 +1476,12 @@ more:
1930 size = 8; 1476 size = 8;
1931 1477
1932 if (head + event->header.size >= page_size * mmap_window) { 1478 if (head + event->header.size >= page_size * mmap_window) {
1933 int ret; 1479 int munmap_ret;
1934 1480
1935 shift = page_size * (head / page_size); 1481 shift = page_size * (head / page_size);
1936 1482
1937 ret = munmap(buf, page_size * mmap_window); 1483 munmap_ret = munmap(buf, page_size * mmap_window);
1938 assert(ret == 0); 1484 assert(munmap_ret == 0);
1939 1485
1940 offset += shift; 1486 offset += shift;
1941 head -= shift; 1487 head -= shift;
@@ -1944,14 +1490,14 @@ more:
1944 1490
1945 size = event->header.size; 1491 size = event->header.size;
1946 1492
1947 dprintf("\n%p [%p]: event: %d\n", 1493 dump_printf("\n%p [%p]: event: %d\n",
1948 (void *)(offset + head), 1494 (void *)(offset + head),
1949 (void *)(long)event->header.size, 1495 (void *)(long)event->header.size,
1950 event->header.type); 1496 event->header.type);
1951 1497
1952 if (!size || process_event(event, offset, head) < 0) { 1498 if (!size || process_event(event, offset, head) < 0) {
1953 1499
1954 dprintf("%p [%p]: skipping unknown header type: %d\n", 1500 dump_printf("%p [%p]: skipping unknown header type: %d\n",
1955 (void *)(offset + head), 1501 (void *)(offset + head),
1956 (void *)(long)(event->header.size), 1502 (void *)(long)(event->header.size),
1957 event->header.type); 1503 event->header.type);
@@ -1974,25 +1520,25 @@ more:
1974 if (offset + head >= header->data_offset + header->data_size) 1520 if (offset + head >= header->data_offset + header->data_size)
1975 goto done; 1521 goto done;
1976 1522
1977 if (offset + head < (unsigned long)stat.st_size) 1523 if (offset + head < (unsigned long)input_stat.st_size)
1978 goto more; 1524 goto more;
1979 1525
1980done: 1526done:
1981 rc = EXIT_SUCCESS; 1527 rc = EXIT_SUCCESS;
1982 close(input); 1528 close(input);
1983 1529
1984 dprintf(" IP events: %10ld\n", total); 1530 dump_printf(" IP events: %10ld\n", total);
1985 dprintf(" mmap events: %10ld\n", total_mmap); 1531 dump_printf(" mmap events: %10ld\n", total_mmap);
1986 dprintf(" comm events: %10ld\n", total_comm); 1532 dump_printf(" comm events: %10ld\n", total_comm);
1987 dprintf(" fork events: %10ld\n", total_fork); 1533 dump_printf(" fork events: %10ld\n", total_fork);
1988 dprintf(" lost events: %10ld\n", total_lost); 1534 dump_printf(" lost events: %10ld\n", total_lost);
1989 dprintf(" unknown events: %10ld\n", total_unknown); 1535 dump_printf(" unknown events: %10ld\n", total_unknown);
1990 1536
1991 if (dump_trace) 1537 if (dump_trace)
1992 return 0; 1538 return 0;
1993 1539
1994 if (verbose >= 3) 1540 if (verbose >= 3)
1995 threads__fprintf(stdout); 1541 threads__fprintf(stdout, &threads);
1996 1542
1997 if (verbose >= 2) 1543 if (verbose >= 2)
1998 dsos__fprintf(stdout); 1544 dsos__fprintf(stdout);
@@ -2001,6 +1547,9 @@ done:
2001 output__resort(total); 1547 output__resort(total);
2002 output__fprintf(stdout, total); 1548 output__fprintf(stdout, total);
2003 1549
1550 if (show_threads)
1551 perf_read_values_destroy(&show_threads_values);
1552
2004 return rc; 1553 return rc;
2005} 1554}
2006 1555
@@ -2069,12 +1618,16 @@ static const struct option options[] = {
2069 "be more verbose (show symbol address, etc)"), 1618 "be more verbose (show symbol address, etc)"),
2070 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 1619 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2071 "dump raw trace in ASCII"), 1620 "dump raw trace in ASCII"),
2072 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), 1621 OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
2073 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), 1622 OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2074 OPT_BOOLEAN('m', "modules", &modules, 1623 OPT_BOOLEAN('m', "modules", &modules,
2075 "load module symbols - WARNING: use only with -k and LIVE kernel"), 1624 "load module symbols - WARNING: use only with -k and LIVE kernel"),
2076 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples, 1625 OPT_BOOLEAN('n', "show-nr-samples", &show_nr_samples,
2077 "Show a column with the number of samples"), 1626 "Show a column with the number of samples"),
1627 OPT_BOOLEAN('T', "threads", &show_threads,
1628 "Show per-thread event counters"),
1629 OPT_STRING(0, "pretty", &pretty_printing_style, "key",
1630 "pretty printing style key: normal raw"),
2078 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1631 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
2079 "sort by key(s): pid, comm, dso, symbol, parent"), 1632 "sort by key(s): pid, comm, dso, symbol, parent"),
2080 OPT_BOOLEAN('P', "full-paths", &full_paths, 1633 OPT_BOOLEAN('P', "full-paths", &full_paths,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b4b06c7903e1..61b828236c11 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -42,6 +42,8 @@
42#include "util/util.h" 42#include "util/util.h"
43#include "util/parse-options.h" 43#include "util/parse-options.h"
44#include "util/parse-events.h" 44#include "util/parse-events.h"
45#include "util/event.h"
46#include "util/debug.h"
45 47
46#include <sys/prctl.h> 48#include <sys/prctl.h>
47#include <math.h> 49#include <math.h>
@@ -60,10 +62,7 @@ static struct perf_counter_attr default_attrs[] = {
60 62
61}; 63};
62 64
63#define MAX_RUN 100
64
65static int system_wide = 0; 65static int system_wide = 0;
66static int verbose = 0;
67static unsigned int nr_cpus = 0; 66static unsigned int nr_cpus = 0;
68static int run_idx = 0; 67static int run_idx = 0;
69 68
@@ -75,26 +74,56 @@ static int null_run = 0;
75 74
76static int fd[MAX_NR_CPUS][MAX_COUNTERS]; 75static int fd[MAX_NR_CPUS][MAX_COUNTERS];
77 76
78static u64 runtime_nsecs[MAX_RUN]; 77static int event_scaled[MAX_COUNTERS];
79static u64 walltime_nsecs[MAX_RUN];
80static u64 runtime_cycles[MAX_RUN];
81 78
82static u64 event_res[MAX_RUN][MAX_COUNTERS][3]; 79struct stats
83static u64 event_scaled[MAX_RUN][MAX_COUNTERS]; 80{
81 double n, mean, M2;
82};
84 83
85static u64 event_res_avg[MAX_COUNTERS][3]; 84static void update_stats(struct stats *stats, u64 val)
86static u64 event_res_noise[MAX_COUNTERS][3]; 85{
86 double delta;
87 87
88static u64 event_scaled_avg[MAX_COUNTERS]; 88 stats->n++;
89 delta = val - stats->mean;
90 stats->mean += delta / stats->n;
91 stats->M2 += delta*(val - stats->mean);
92}
89 93
90static u64 runtime_nsecs_avg; 94static double avg_stats(struct stats *stats)
91static u64 runtime_nsecs_noise; 95{
96 return stats->mean;
97}
92 98
93static u64 walltime_nsecs_avg; 99/*
94static u64 walltime_nsecs_noise; 100 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
101 *
102 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
103 * s^2 = -------------------------------
104 * n - 1
105 *
106 * http://en.wikipedia.org/wiki/Stddev
107 *
108 * The std dev of the mean is related to the std dev by:
109 *
110 * s
111 * s_mean = -------
112 * sqrt(n)
113 *
114 */
115static double stddev_stats(struct stats *stats)
116{
117 double variance = stats->M2 / (stats->n - 1);
118 double variance_mean = variance / stats->n;
119
120 return sqrt(variance_mean);
121}
95 122
96static u64 runtime_cycles_avg; 123struct stats event_res_stats[MAX_COUNTERS][3];
97static u64 runtime_cycles_noise; 124struct stats runtime_nsecs_stats;
125struct stats walltime_nsecs_stats;
126struct stats runtime_cycles_stats;
98 127
99#define MATCH_EVENT(t, c, counter) \ 128#define MATCH_EVENT(t, c, counter) \
100 (attrs[counter].type == PERF_TYPE_##t && \ 129 (attrs[counter].type == PERF_TYPE_##t && \
@@ -149,12 +178,11 @@ static inline int nsec_counter(int counter)
149 */ 178 */
150static void read_counter(int counter) 179static void read_counter(int counter)
151{ 180{
152 u64 *count, single_count[3]; 181 u64 count[3], single_count[3];
153 unsigned int cpu; 182 unsigned int cpu;
154 size_t res, nv; 183 size_t res, nv;
155 int scaled; 184 int scaled;
156 185 int i;
157 count = event_res[run_idx][counter];
158 186
159 count[0] = count[1] = count[2] = 0; 187 count[0] = count[1] = count[2] = 0;
160 188
@@ -179,24 +207,33 @@ static void read_counter(int counter)
179 scaled = 0; 207 scaled = 0;
180 if (scale) { 208 if (scale) {
181 if (count[2] == 0) { 209 if (count[2] == 0) {
182 event_scaled[run_idx][counter] = -1; 210 event_scaled[counter] = -1;
183 count[0] = 0; 211 count[0] = 0;
184 return; 212 return;
185 } 213 }
186 214
187 if (count[2] < count[1]) { 215 if (count[2] < count[1]) {
188 event_scaled[run_idx][counter] = 1; 216 event_scaled[counter] = 1;
189 count[0] = (unsigned long long) 217 count[0] = (unsigned long long)
190 ((double)count[0] * count[1] / count[2] + 0.5); 218 ((double)count[0] * count[1] / count[2] + 0.5);
191 } 219 }
192 } 220 }
221
222 for (i = 0; i < 3; i++)
223 update_stats(&event_res_stats[counter][i], count[i]);
224
225 if (verbose) {
226 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
227 count[0], count[1], count[2]);
228 }
229
193 /* 230 /*
194 * Save the full runtime - to allow normalization during printout: 231 * Save the full runtime - to allow normalization during printout:
195 */ 232 */
196 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) 233 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
197 runtime_nsecs[run_idx] = count[0]; 234 update_stats(&runtime_nsecs_stats, count[0]);
198 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) 235 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
199 runtime_cycles[run_idx] = count[0]; 236 update_stats(&runtime_cycles_stats, count[0]);
200} 237}
201 238
202static int run_perf_stat(int argc __used, const char **argv) 239static int run_perf_stat(int argc __used, const char **argv)
@@ -270,7 +307,7 @@ static int run_perf_stat(int argc __used, const char **argv)
270 307
271 t1 = rdclock(); 308 t1 = rdclock();
272 309
273 walltime_nsecs[run_idx] = t1 - t0; 310 update_stats(&walltime_nsecs_stats, t1 - t0);
274 311
275 for (counter = 0; counter < nr_counters; counter++) 312 for (counter = 0; counter < nr_counters; counter++)
276 read_counter(counter); 313 read_counter(counter);
@@ -278,42 +315,38 @@ static int run_perf_stat(int argc __used, const char **argv)
278 return WEXITSTATUS(status); 315 return WEXITSTATUS(status);
279} 316}
280 317
281static void print_noise(u64 *count, u64 *noise) 318static void print_noise(int counter, double avg)
282{ 319{
283 if (run_count > 1) 320 if (run_count == 1)
284 fprintf(stderr, " ( +- %7.3f%% )", 321 return;
285 (double)noise[0]/(count[0]+1)*100.0); 322
323 fprintf(stderr, " ( +- %7.3f%% )",
324 100 * stddev_stats(&event_res_stats[counter][0]) / avg);
286} 325}
287 326
288static void nsec_printout(int counter, u64 *count, u64 *noise) 327static void nsec_printout(int counter, double avg)
289{ 328{
290 double msecs = (double)count[0] / 1000000; 329 double msecs = avg / 1e6;
291 330
292 fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter)); 331 fprintf(stderr, " %14.6f %-24s", msecs, event_name(counter));
293 332
294 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) { 333 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
295 if (walltime_nsecs_avg) 334 fprintf(stderr, " # %10.3f CPUs ",
296 fprintf(stderr, " # %10.3f CPUs ", 335 avg / avg_stats(&walltime_nsecs_stats));
297 (double)count[0] / (double)walltime_nsecs_avg);
298 } 336 }
299 print_noise(count, noise);
300} 337}
301 338
302static void abs_printout(int counter, u64 *count, u64 *noise) 339static void abs_printout(int counter, double avg)
303{ 340{
304 fprintf(stderr, " %14Ld %-24s", count[0], event_name(counter)); 341 fprintf(stderr, " %14.0f %-24s", avg, event_name(counter));
305 342
306 if (runtime_cycles_avg && 343 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
307 MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
308 fprintf(stderr, " # %10.3f IPC ", 344 fprintf(stderr, " # %10.3f IPC ",
309 (double)count[0] / (double)runtime_cycles_avg); 345 avg / avg_stats(&runtime_cycles_stats));
310 } else { 346 } else {
311 if (runtime_nsecs_avg) { 347 fprintf(stderr, " # %10.3f M/sec",
312 fprintf(stderr, " # %10.3f M/sec", 348 1000.0 * avg / avg_stats(&runtime_nsecs_stats));
313 (double)count[0]/runtime_nsecs_avg*1000.0);
314 }
315 } 349 }
316 print_noise(count, noise);
317} 350}
318 351
319/* 352/*
@@ -321,12 +354,8 @@ static void abs_printout(int counter, u64 *count, u64 *noise)
321 */ 354 */
322static void print_counter(int counter) 355static void print_counter(int counter)
323{ 356{
324 u64 *count, *noise; 357 double avg = avg_stats(&event_res_stats[counter][0]);
325 int scaled; 358 int scaled = event_scaled[counter];
326
327 count = event_res_avg[counter];
328 noise = event_res_noise[counter];
329 scaled = event_scaled_avg[counter];
330 359
331 if (scaled == -1) { 360 if (scaled == -1) {
332 fprintf(stderr, " %14s %-24s\n", 361 fprintf(stderr, " %14s %-24s\n",
@@ -335,110 +364,29 @@ static void print_counter(int counter)
335 } 364 }
336 365
337 if (nsec_counter(counter)) 366 if (nsec_counter(counter))
338 nsec_printout(counter, count, noise); 367 nsec_printout(counter, avg);
339 else 368 else
340 abs_printout(counter, count, noise); 369 abs_printout(counter, avg);
341
342 if (scaled)
343 fprintf(stderr, " (scaled from %.2f%%)",
344 (double) count[2] / count[1] * 100);
345
346 fprintf(stderr, "\n");
347}
348 370
349/* 371 print_noise(counter, avg);
350 * normalize_noise noise values down to stddev:
351 */
352static void normalize_noise(u64 *val)
353{
354 double res;
355 372
356 res = (double)*val / (run_count * sqrt((double)run_count)); 373 if (scaled) {
374 double avg_enabled, avg_running;
357 375
358 *val = (u64)res; 376 avg_enabled = avg_stats(&event_res_stats[counter][1]);
359} 377 avg_running = avg_stats(&event_res_stats[counter][2]);
360 378
361static void update_avg(const char *name, int idx, u64 *avg, u64 *val) 379 fprintf(stderr, " (scaled from %.2f%%)",
362{ 380 100 * avg_running / avg_enabled);
363 *avg += *val;
364
365 if (verbose > 1)
366 fprintf(stderr, "debug: %20s[%d]: %Ld\n", name, idx, *val);
367}
368/*
369 * Calculate the averages and noises:
370 */
371static void calc_avg(void)
372{
373 int i, j;
374
375 if (verbose > 1)
376 fprintf(stderr, "\n");
377
378 for (i = 0; i < run_count; i++) {
379 update_avg("runtime", 0, &runtime_nsecs_avg, runtime_nsecs + i);
380 update_avg("walltime", 0, &walltime_nsecs_avg, walltime_nsecs + i);
381 update_avg("runtime_cycles", 0, &runtime_cycles_avg, runtime_cycles + i);
382
383 for (j = 0; j < nr_counters; j++) {
384 update_avg("counter/0", j,
385 event_res_avg[j]+0, event_res[i][j]+0);
386 update_avg("counter/1", j,
387 event_res_avg[j]+1, event_res[i][j]+1);
388 update_avg("counter/2", j,
389 event_res_avg[j]+2, event_res[i][j]+2);
390 if (event_scaled[i][j] != (u64)-1)
391 update_avg("scaled", j,
392 event_scaled_avg + j, event_scaled[i]+j);
393 else
394 event_scaled_avg[j] = -1;
395 }
396 }
397 runtime_nsecs_avg /= run_count;
398 walltime_nsecs_avg /= run_count;
399 runtime_cycles_avg /= run_count;
400
401 for (j = 0; j < nr_counters; j++) {
402 event_res_avg[j][0] /= run_count;
403 event_res_avg[j][1] /= run_count;
404 event_res_avg[j][2] /= run_count;
405 }
406
407 for (i = 0; i < run_count; i++) {
408 runtime_nsecs_noise +=
409 abs((s64)(runtime_nsecs[i] - runtime_nsecs_avg));
410 walltime_nsecs_noise +=
411 abs((s64)(walltime_nsecs[i] - walltime_nsecs_avg));
412 runtime_cycles_noise +=
413 abs((s64)(runtime_cycles[i] - runtime_cycles_avg));
414
415 for (j = 0; j < nr_counters; j++) {
416 event_res_noise[j][0] +=
417 abs((s64)(event_res[i][j][0] - event_res_avg[j][0]));
418 event_res_noise[j][1] +=
419 abs((s64)(event_res[i][j][1] - event_res_avg[j][1]));
420 event_res_noise[j][2] +=
421 abs((s64)(event_res[i][j][2] - event_res_avg[j][2]));
422 }
423 } 381 }
424 382
425 normalize_noise(&runtime_nsecs_noise); 383 fprintf(stderr, "\n");
426 normalize_noise(&walltime_nsecs_noise);
427 normalize_noise(&runtime_cycles_noise);
428
429 for (j = 0; j < nr_counters; j++) {
430 normalize_noise(&event_res_noise[j][0]);
431 normalize_noise(&event_res_noise[j][1]);
432 normalize_noise(&event_res_noise[j][2]);
433 }
434} 384}
435 385
436static void print_stat(int argc, const char **argv) 386static void print_stat(int argc, const char **argv)
437{ 387{
438 int i, counter; 388 int i, counter;
439 389
440 calc_avg();
441
442 fflush(stdout); 390 fflush(stdout);
443 391
444 fprintf(stderr, "\n"); 392 fprintf(stderr, "\n");
@@ -457,10 +405,11 @@ static void print_stat(int argc, const char **argv)
457 405
458 fprintf(stderr, "\n"); 406 fprintf(stderr, "\n");
459 fprintf(stderr, " %14.9f seconds time elapsed", 407 fprintf(stderr, " %14.9f seconds time elapsed",
460 (double)walltime_nsecs_avg/1e9); 408 avg_stats(&walltime_nsecs_stats)/1e9);
461 if (run_count > 1) { 409 if (run_count > 1) {
462 fprintf(stderr, " ( +- %7.3f%% )", 410 fprintf(stderr, " ( +- %7.3f%% )",
463 100.0*(double)walltime_nsecs_noise/(double)walltime_nsecs_avg); 411 100*stddev_stats(&walltime_nsecs_stats) /
412 avg_stats(&walltime_nsecs_stats));
464 } 413 }
465 fprintf(stderr, "\n\n"); 414 fprintf(stderr, "\n\n");
466} 415}
@@ -515,7 +464,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
515 PARSE_OPT_STOP_AT_NON_OPTION); 464 PARSE_OPT_STOP_AT_NON_OPTION);
516 if (!argc) 465 if (!argc)
517 usage_with_options(stat_usage, options); 466 usage_with_options(stat_usage, options);
518 if (run_count <= 0 || run_count > MAX_RUN) 467 if (run_count <= 0)
519 usage_with_options(stat_usage, options); 468 usage_with_options(stat_usage, options);
520 469
521 /* Set attrs and nr_counters if no event is selected and !null_run */ 470 /* Set attrs and nr_counters if no event is selected and !null_run */
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7de28ce9ca26..4002ccb36750 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -27,6 +27,8 @@
27#include "util/parse-options.h" 27#include "util/parse-options.h"
28#include "util/parse-events.h" 28#include "util/parse-events.h"
29 29
30#include "util/debug.h"
31
30#include <assert.h> 32#include <assert.h>
31#include <fcntl.h> 33#include <fcntl.h>
32 34
@@ -68,8 +70,6 @@ static int group = 0;
68static unsigned int page_size; 70static unsigned int page_size;
69static unsigned int mmap_pages = 16; 71static unsigned int mmap_pages = 16;
70static int freq = 0; 72static int freq = 0;
71static int verbose = 0;
72static char *vmlinux = NULL;
73 73
74static int delay_secs = 2; 74static int delay_secs = 2;
75static int zero; 75static int zero;
@@ -122,7 +122,8 @@ static void parse_source(struct sym_entry *syme)
122 struct module *module; 122 struct module *module;
123 struct section *section = NULL; 123 struct section *section = NULL;
124 FILE *file; 124 FILE *file;
125 char command[PATH_MAX*2], *path = vmlinux; 125 char command[PATH_MAX*2];
126 const char *path = vmlinux_name;
126 u64 start, end, len; 127 u64 start, end, len;
127 128
128 if (!syme) 129 if (!syme)
@@ -338,8 +339,6 @@ static void show_details(struct sym_entry *syme)
338 printf("%d lines not displayed, maybe increase display entries [e]\n", more); 339 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
339} 340}
340 341
341struct dso *kernel_dso;
342
343/* 342/*
344 * Symbols will be added here in record_ip and will get out 343 * Symbols will be added here in record_ip and will get out
345 * after decayed. 344 * after decayed.
@@ -484,17 +483,24 @@ static void print_sym_table(void)
484 if (nr_counters == 1) 483 if (nr_counters == 1)
485 printf(" samples pcnt"); 484 printf(" samples pcnt");
486 else 485 else
487 printf(" weight samples pcnt"); 486 printf(" weight samples pcnt");
488 487
489 printf(" RIP kernel function\n" 488 if (verbose)
490 " ______ _______ _____ ________________ _______________\n\n" 489 printf(" RIP ");
491 ); 490 printf(" kernel function\n");
491 printf(" %s _______ _____",
492 nr_counters == 1 ? " " : "______");
493 if (verbose)
494 printf(" ________________");
495 printf(" _______________\n\n");
492 496
493 for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) { 497 for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
494 struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node); 498 struct symbol *sym;
495 struct symbol *sym = (struct symbol *)(syme + 1);
496 double pcnt; 499 double pcnt;
497 500
501 syme = rb_entry(nd, struct sym_entry, rb_node);
502 sym = (struct symbol *)(syme + 1);
503
498 if (++printed > print_entries || (int)syme->snap_count < count_filter) 504 if (++printed > print_entries || (int)syme->snap_count < count_filter)
499 continue; 505 continue;
500 506
@@ -507,7 +513,9 @@ static void print_sym_table(void)
507 printf("%9.1f %10ld - ", syme->weight, syme->snap_count); 513 printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
508 514
509 percent_color_fprintf(stdout, "%4.1f%%", pcnt); 515 percent_color_fprintf(stdout, "%4.1f%%", pcnt);
510 printf(" - %016llx : %s", sym->start, sym->name); 516 if (verbose)
517 printf(" - %016llx", sym->start);
518 printf(" : %s", sym->name);
511 if (sym->module) 519 if (sym->module)
512 printf("\t[%s]", sym->module->name); 520 printf("\t[%s]", sym->module->name);
513 printf("\n"); 521 printf("\n");
@@ -613,7 +621,7 @@ static void print_mapped_keys(void)
613 621
614 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); 622 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
615 623
616 if (vmlinux) { 624 if (vmlinux_name) {
617 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter); 625 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
618 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); 626 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
619 fprintf(stdout, "\t[S] stop annotation.\n"); 627 fprintf(stdout, "\t[S] stop annotation.\n");
@@ -642,7 +650,9 @@ static int key_mapped(int c)
642 case 'F': 650 case 'F':
643 case 's': 651 case 's':
644 case 'S': 652 case 'S':
645 return vmlinux ? 1 : 0; 653 return vmlinux_name ? 1 : 0;
654 default:
655 break;
646 } 656 }
647 657
648 return 0; 658 return 0;
@@ -728,6 +738,8 @@ static void handle_keypress(int c)
728 case 'z': 738 case 'z':
729 zero = ~zero; 739 zero = ~zero;
730 break; 740 break;
741 default:
742 break;
731 } 743 }
732} 744}
733 745
@@ -816,13 +828,13 @@ static int parse_symbols(void)
816{ 828{
817 struct rb_node *node; 829 struct rb_node *node;
818 struct symbol *sym; 830 struct symbol *sym;
819 int modules = vmlinux ? 1 : 0; 831 int use_modules = vmlinux_name ? 1 : 0;
820 832
821 kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry)); 833 kernel_dso = dso__new("[kernel]", sizeof(struct sym_entry));
822 if (kernel_dso == NULL) 834 if (kernel_dso == NULL)
823 return -1; 835 return -1;
824 836
825 if (dso__load_kernel(kernel_dso, vmlinux, symbol_filter, verbose, modules) <= 0) 837 if (dso__load_kernel(kernel_dso, vmlinux_name, symbol_filter, verbose, use_modules) <= 0)
826 goto out_delete_dso; 838 goto out_delete_dso;
827 839
828 node = rb_first(&kernel_dso->syms); 840 node = rb_first(&kernel_dso->syms);
@@ -937,26 +949,6 @@ static void mmap_read_counter(struct mmap_data *md)
937 last_read = this_read; 949 last_read = this_read;
938 950
939 for (; old != head;) { 951 for (; old != head;) {
940 struct ip_event {
941 struct perf_event_header header;
942 u64 ip;
943 u32 pid, target_pid;
944 };
945 struct mmap_event {
946 struct perf_event_header header;
947 u32 pid, target_pid;
948 u64 start;
949 u64 len;
950 u64 pgoff;
951 char filename[PATH_MAX];
952 };
953
954 typedef union event_union {
955 struct perf_event_header header;
956 struct ip_event ip;
957 struct mmap_event mmap;
958 } event_t;
959
960 event_t *event = (event_t *)&data[old & md->mask]; 952 event_t *event = (event_t *)&data[old & md->mask];
961 953
962 event_t event_copy; 954 event_t event_copy;
@@ -1138,7 +1130,7 @@ static const struct option options[] = {
1138 "system-wide collection from all CPUs"), 1130 "system-wide collection from all CPUs"),
1139 OPT_INTEGER('C', "CPU", &profile_cpu, 1131 OPT_INTEGER('C', "CPU", &profile_cpu,
1140 "CPU to profile on"), 1132 "CPU to profile on"),
1141 OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"), 1133 OPT_STRING('k', "vmlinux", &vmlinux_name, "file", "vmlinux pathname"),
1142 OPT_INTEGER('m', "mmap-pages", &mmap_pages, 1134 OPT_INTEGER('m', "mmap-pages", &mmap_pages,
1143 "number of mmap data pages"), 1135 "number of mmap data pages"),
1144 OPT_INTEGER('r', "realtime", &realtime_prio, 1136 OPT_INTEGER('r', "realtime", &realtime_prio,
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
new file mode 100644
index 000000000000..914ab366e369
--- /dev/null
+++ b/tools/perf/builtin-trace.c
@@ -0,0 +1,297 @@
1#include "builtin.h"
2
3#include "util/util.h"
4#include "util/cache.h"
5#include "util/symbol.h"
6#include "util/thread.h"
7#include "util/header.h"
8
9#include "util/parse-options.h"
10
11#include "perf.h"
12#include "util/debug.h"
13
14#include "util/trace-event.h"
15
16static char const *input_name = "perf.data";
17static int input;
18static unsigned long page_size;
19static unsigned long mmap_window = 32;
20
21static unsigned long total = 0;
22static unsigned long total_comm = 0;
23
24static struct rb_root threads;
25static struct thread *last_match;
26
27static struct perf_header *header;
28static u64 sample_type;
29
30
31static int
32process_comm_event(event_t *event, unsigned long offset, unsigned long head)
33{
34 struct thread *thread;
35
36 thread = threads__findnew(event->comm.pid, &threads, &last_match);
37
38 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
39 (void *)(offset + head),
40 (void *)(long)(event->header.size),
41 event->comm.comm, event->comm.pid);
42
43 if (thread == NULL ||
44 thread__set_comm(thread, event->comm.comm)) {
45 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n");
46 return -1;
47 }
48 total_comm++;
49
50 return 0;
51}
52
53static int
54process_sample_event(event_t *event, unsigned long offset, unsigned long head)
55{
56 char level;
57 int show = 0;
58 struct dso *dso = NULL;
59 struct thread *thread;
60 u64 ip = event->ip.ip;
61 u64 timestamp = -1;
62 u32 cpu = -1;
63 u64 period = 1;
64 void *more_data = event->ip.__more_data;
65 int cpumode;
66
67 thread = threads__findnew(event->ip.pid, &threads, &last_match);
68
69 if (sample_type & PERF_SAMPLE_TIME) {
70 timestamp = *(u64 *)more_data;
71 more_data += sizeof(u64);
72 }
73
74 if (sample_type & PERF_SAMPLE_CPU) {
75 cpu = *(u32 *)more_data;
76 more_data += sizeof(u32);
77 more_data += sizeof(u32); /* reserved */
78 }
79
80 if (sample_type & PERF_SAMPLE_PERIOD) {
81 period = *(u64 *)more_data;
82 more_data += sizeof(u64);
83 }
84
85 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
86 (void *)(offset + head),
87 (void *)(long)(event->header.size),
88 event->header.misc,
89 event->ip.pid, event->ip.tid,
90 (void *)(long)ip,
91 (long long)period);
92
93 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
94
95 if (thread == NULL) {
96 eprintf("problem processing %d event, skipping it.\n",
97 event->header.type);
98 return -1;
99 }
100
101 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;
102
103 if (cpumode == PERF_EVENT_MISC_KERNEL) {
104 show = SHOW_KERNEL;
105 level = 'k';
106
107 dso = kernel_dso;
108
109 dump_printf(" ...... dso: %s\n", dso->name);
110
111 } else if (cpumode == PERF_EVENT_MISC_USER) {
112
113 show = SHOW_USER;
114 level = '.';
115
116 } else {
117 show = SHOW_HV;
118 level = 'H';
119
120 dso = hypervisor_dso;
121
122 dump_printf(" ...... dso: [hypervisor]\n");
123 }
124
125 if (sample_type & PERF_SAMPLE_RAW) {
126 struct {
127 u32 size;
128 char data[0];
129 } *raw = more_data;
130
131 /*
132 * FIXME: better resolve from pid from the struct trace_entry
133 * field, although it should be the same than this perf
134 * event pid
135 */
136 print_event(cpu, raw->data, raw->size, timestamp, thread->comm);
137 }
138 total += period;
139
140 return 0;
141}
142
143static int
144process_event(event_t *event, unsigned long offset, unsigned long head)
145{
146 trace_event(event);
147
148 switch (event->header.type) {
149 case PERF_EVENT_MMAP ... PERF_EVENT_LOST:
150 return 0;
151
152 case PERF_EVENT_COMM:
153 return process_comm_event(event, offset, head);
154
155 case PERF_EVENT_EXIT ... PERF_EVENT_READ:
156 return 0;
157
158 case PERF_EVENT_SAMPLE:
159 return process_sample_event(event, offset, head);
160
161 case PERF_EVENT_MAX:
162 default:
163 return -1;
164 }
165
166 return 0;
167}
168
169static int __cmd_trace(void)
170{
171 int ret, rc = EXIT_FAILURE;
172 unsigned long offset = 0;
173 unsigned long head = 0;
174 struct stat perf_stat;
175 event_t *event;
176 uint32_t size;
177 char *buf;
178
179 trace_report();
180 register_idle_thread(&threads, &last_match);
181
182 input = open(input_name, O_RDONLY);
183 if (input < 0) {
184 perror("failed to open file");
185 exit(-1);
186 }
187
188 ret = fstat(input, &perf_stat);
189 if (ret < 0) {
190 perror("failed to stat file");
191 exit(-1);
192 }
193
194 if (!perf_stat.st_size) {
195 fprintf(stderr, "zero-sized file, nothing to do!\n");
196 exit(0);
197 }
198 header = perf_header__read(input);
199 head = header->data_offset;
200 sample_type = perf_header__sample_type(header);
201
202 if (!(sample_type & PERF_SAMPLE_RAW))
203 die("No trace sample to read. Did you call perf record "
204 "without -R?");
205
206 if (load_kernel() < 0) {
207 perror("failed to load kernel symbols");
208 return EXIT_FAILURE;
209 }
210
211remap:
212 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
213 MAP_SHARED, input, offset);
214 if (buf == MAP_FAILED) {
215 perror("failed to mmap file");
216 exit(-1);
217 }
218
219more:
220 event = (event_t *)(buf + head);
221
222 size = event->header.size;
223 if (!size)
224 size = 8;
225
226 if (head + event->header.size >= page_size * mmap_window) {
227 unsigned long shift = page_size * (head / page_size);
228 int res;
229
230 res = munmap(buf, page_size * mmap_window);
231 assert(res == 0);
232
233 offset += shift;
234 head -= shift;
235 goto remap;
236 }
237
238 size = event->header.size;
239
240
241 if (!size || process_event(event, offset, head) < 0) {
242
243 /*
244 * assume we lost track of the stream, check alignment, and
245 * increment a single u64 in the hope to catch on again 'soon'.
246 */
247
248 if (unlikely(head & 7))
249 head &= ~7ULL;
250
251 size = 8;
252 }
253
254 head += size;
255
256 if (offset + head < (unsigned long)perf_stat.st_size)
257 goto more;
258
259 rc = EXIT_SUCCESS;
260 close(input);
261
262 return rc;
263}
264
265static const char * const annotate_usage[] = {
266 "perf trace [<options>] <command>",
267 NULL
268};
269
270static const struct option options[] = {
271 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
272 "dump raw trace in ASCII"),
273 OPT_BOOLEAN('v', "verbose", &verbose,
274 "be more verbose (show symbol address, etc)"),
275 OPT_END()
276};
277
278int cmd_trace(int argc, const char **argv, const char *prefix __used)
279{
280 symbol__init();
281 page_size = getpagesize();
282
283 argc = parse_options(argc, argv, options, annotate_usage, 0);
284 if (argc) {
285 /*
286 * Special case: if there's an argument left then assume tha
287 * it's a symbol filter:
288 */
289 if (argc > 1)
290 usage_with_options(annotate_usage, options);
291 }
292
293
294 setup_pager();
295
296 return __cmd_trace();
297}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 51d168230ee7..3a63e41fb44e 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -22,5 +22,6 @@ extern int cmd_stat(int argc, const char **argv, const char *prefix);
22extern int cmd_top(int argc, const char **argv, const char *prefix); 22extern int cmd_top(int argc, const char **argv, const char *prefix);
23extern int cmd_version(int argc, const char **argv, const char *prefix); 23extern int cmd_version(int argc, const char **argv, const char *prefix);
24extern int cmd_list(int argc, const char **argv, const char *prefix); 24extern int cmd_list(int argc, const char **argv, const char *prefix);
25extern int cmd_trace(int argc, const char **argv, const char *prefix);
25 26
26#endif 27#endif
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 31982ad064b4..fe4589dde950 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -292,6 +292,7 @@ static void handle_internal_command(int argc, const char **argv)
292 { "top", cmd_top, 0 }, 292 { "top", cmd_top, 0 },
293 { "annotate", cmd_annotate, 0 }, 293 { "annotate", cmd_annotate, 0 },
294 { "version", cmd_version, 0 }, 294 { "version", cmd_version, 0 },
295 { "trace", cmd_trace, 0 },
295 }; 296 };
296 unsigned int i; 297 unsigned int i;
297 static const char ext[] = STRIP_EXTENSION; 298 static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/util/abspath.c b/tools/perf/util/abspath.c
index 61d33b81fc97..a791dd467261 100644
--- a/tools/perf/util/abspath.c
+++ b/tools/perf/util/abspath.c
@@ -50,7 +50,8 @@ const char *make_absolute_path(const char *path)
50 die ("Could not get current working directory"); 50 die ("Could not get current working directory");
51 51
52 if (last_elem) { 52 if (last_elem) {
53 int len = strlen(buf); 53 len = strlen(buf);
54
54 if (len + strlen(last_elem) + 2 > PATH_MAX) 55 if (len + strlen(last_elem) + 2 > PATH_MAX)
55 die ("Too long path name: '%s/%s'", 56 die ("Too long path name: '%s/%s'",
56 buf, last_elem); 57 buf, last_elem);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 4b50c412b9c5..6f8ea9d210b6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -52,7 +52,6 @@ extern const char *perf_mailmap_file;
52extern void maybe_flush_or_die(FILE *, const char *); 52extern void maybe_flush_or_die(FILE *, const char *);
53extern int copy_fd(int ifd, int ofd); 53extern int copy_fd(int ifd, int ofd);
54extern int copy_file(const char *dst, const char *src, int mode); 54extern int copy_file(const char *dst, const char *src, int mode);
55extern ssize_t read_in_full(int fd, void *buf, size_t count);
56extern ssize_t write_in_full(int fd, const void *buf, size_t count); 55extern ssize_t write_in_full(int fd, const void *buf, size_t count);
57extern void write_or_die(int fd, const void *buf, size_t count); 56extern void write_or_die(int fd, const void *buf, size_t count);
58extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg); 57extern int write_or_whine(int fd, const void *buf, size_t count, const char *msg);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 011473411642..3b8380f1b478 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -50,6 +50,7 @@ rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
50 else 50 else
51 p = &(*p)->rb_right; 51 p = &(*p)->rb_right;
52 break; 52 break;
53 case CHAIN_NONE:
53 default: 54 default:
54 break; 55 break;
55 } 56 }
@@ -143,6 +144,7 @@ int register_callchain_param(struct callchain_param *param)
143 case CHAIN_FLAT: 144 case CHAIN_FLAT:
144 param->sort = sort_chain_flat; 145 param->sort = sort_chain_flat;
145 break; 146 break;
147 case CHAIN_NONE:
146 default: 148 default:
147 return -1; 149 return -1;
148 } 150 }
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index a926ae4f5a16..43cf3ea9e088 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -4,6 +4,7 @@
4#include "../perf.h" 4#include "../perf.h"
5#include <linux/list.h> 5#include <linux/list.h>
6#include <linux/rbtree.h> 6#include <linux/rbtree.h>
7#include "util.h"
7#include "symbol.h" 8#include "symbol.h"
8 9
9enum chain_mode { 10enum chain_mode {
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 90a044d1fe7d..e88bca55a599 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -166,7 +166,7 @@ int perf_color_default_config(const char *var, const char *value, void *cb)
166 return perf_default_config(var, value, cb); 166 return perf_default_config(var, value, cb);
167} 167}
168 168
169static int color_vfprintf(FILE *fp, const char *color, const char *fmt, 169static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
170 va_list args, const char *trail) 170 va_list args, const char *trail)
171{ 171{
172 int r = 0; 172 int r = 0;
@@ -191,6 +191,10 @@ static int color_vfprintf(FILE *fp, const char *color, const char *fmt,
191 return r; 191 return r;
192} 192}
193 193
194int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
195{
196 return __color_vfprintf(fp, color, fmt, args, NULL);
197}
194 198
195 199
196int color_fprintf(FILE *fp, const char *color, const char *fmt, ...) 200int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
@@ -199,7 +203,7 @@ int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
199 int r; 203 int r;
200 204
201 va_start(args, fmt); 205 va_start(args, fmt);
202 r = color_vfprintf(fp, color, fmt, args, NULL); 206 r = color_vfprintf(fp, color, fmt, args);
203 va_end(args); 207 va_end(args);
204 return r; 208 return r;
205} 209}
@@ -209,7 +213,7 @@ int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
209 va_list args; 213 va_list args;
210 int r; 214 int r;
211 va_start(args, fmt); 215 va_start(args, fmt);
212 r = color_vfprintf(fp, color, fmt, args, "\n"); 216 r = __color_vfprintf(fp, color, fmt, args, "\n");
213 va_end(args); 217 va_end(args);
214 return r; 218 return r;
215} 219}
@@ -242,9 +246,9 @@ int color_fwrite_lines(FILE *fp, const char *color,
242 return 0; 246 return 0;
243} 247}
244 248
245char *get_percent_color(double percent) 249const char *get_percent_color(double percent)
246{ 250{
247 char *color = PERF_COLOR_NORMAL; 251 const char *color = PERF_COLOR_NORMAL;
248 252
249 /* 253 /*
250 * We color high-overhead entries in red, mid-overhead 254 * We color high-overhead entries in red, mid-overhead
@@ -263,7 +267,7 @@ char *get_percent_color(double percent)
263int percent_color_fprintf(FILE *fp, const char *fmt, double percent) 267int percent_color_fprintf(FILE *fp, const char *fmt, double percent)
264{ 268{
265 int r; 269 int r;
266 char *color; 270 const char *color;
267 271
268 color = get_percent_color(percent); 272 color = get_percent_color(percent);
269 r = color_fprintf(fp, color, fmt, percent); 273 r = color_fprintf(fp, color, fmt, percent);
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 706cec50bd25..58d597564b99 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -32,10 +32,11 @@ int perf_color_default_config(const char *var, const char *value, void *cb);
32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty); 32int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
33void color_parse(const char *value, const char *var, char *dst); 33void color_parse(const char *value, const char *var, char *dst);
34void color_parse_mem(const char *value, int len, const char *var, char *dst); 34void color_parse_mem(const char *value, int len, const char *var, char *dst);
35int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
35int color_fprintf(FILE *fp, const char *color, const char *fmt, ...); 36int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
36int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...); 37int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
37int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf); 38int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
38int percent_color_fprintf(FILE *fp, const char *fmt, double percent); 39int percent_color_fprintf(FILE *fp, const char *fmt, double percent);
39char *get_percent_color(double percent); 40const char *get_percent_color(double percent);
40 41
41#endif /* COLOR_H */ 42#endif /* COLOR_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 780df541006d..8784649109ce 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -160,17 +160,18 @@ static int get_extended_base_var(char *name, int baselen, int c)
160 name[baselen++] = '.'; 160 name[baselen++] = '.';
161 161
162 for (;;) { 162 for (;;) {
163 int c = get_next_char(); 163 int ch = get_next_char();
164 if (c == '\n') 164
165 if (ch == '\n')
165 return -1; 166 return -1;
166 if (c == '"') 167 if (ch == '"')
167 break; 168 break;
168 if (c == '\\') { 169 if (ch == '\\') {
169 c = get_next_char(); 170 ch = get_next_char();
170 if (c == '\n') 171 if (ch == '\n')
171 return -1; 172 return -1;
172 } 173 }
173 name[baselen++] = c; 174 name[baselen++] = ch;
174 if (baselen > MAXNAME / 2) 175 if (baselen > MAXNAME / 2)
175 return -1; 176 return -1;
176 } 177 }
@@ -530,6 +531,8 @@ static int store_aux(const char* key, const char* value, void *cb __used)
530 store.offset[store.seen] = ftell(config_file); 531 store.offset[store.seen] = ftell(config_file);
531 } 532 }
532 } 533 }
534 default:
535 break;
533 } 536 }
534 return 0; 537 return 0;
535} 538}
@@ -619,6 +622,7 @@ contline:
619 switch (contents[offset]) { 622 switch (contents[offset]) {
620 case '=': equal_offset = offset; break; 623 case '=': equal_offset = offset; break;
621 case ']': bracket_offset = offset; break; 624 case ']': bracket_offset = offset; break;
625 default: break;
622 } 626 }
623 if (offset > 0 && contents[offset-1] == '\\') { 627 if (offset > 0 && contents[offset-1] == '\\') {
624 offset_ = offset; 628 offset_ = offset;
@@ -742,9 +746,9 @@ int perf_config_set_multivar(const char* key, const char* value,
742 goto write_err_out; 746 goto write_err_out;
743 } else { 747 } else {
744 struct stat st; 748 struct stat st;
745 char* contents; 749 char *contents;
746 ssize_t contents_sz, copy_begin, copy_end; 750 ssize_t contents_sz, copy_begin, copy_end;
747 int i, new_line = 0; 751 int new_line = 0;
748 752
749 if (value_regex == NULL) 753 if (value_regex == NULL)
750 store.value_regex = NULL; 754 store.value_regex = NULL;
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
new file mode 100644
index 000000000000..e8ca98fe0bd4
--- /dev/null
+++ b/tools/perf/util/debug.c
@@ -0,0 +1,95 @@
1/* For general debugging purposes */
2
3#include "../perf.h"
4
5#include <string.h>
6#include <stdarg.h>
7#include <stdio.h>
8
9#include "color.h"
10#include "event.h"
11#include "debug.h"
12
13int verbose = 0;
14int dump_trace = 0;
15
16int eprintf(const char *fmt, ...)
17{
18 va_list args;
19 int ret = 0;
20
21 if (verbose) {
22 va_start(args, fmt);
23 ret = vfprintf(stderr, fmt, args);
24 va_end(args);
25 }
26
27 return ret;
28}
29
30int dump_printf(const char *fmt, ...)
31{
32 va_list args;
33 int ret = 0;
34
35 if (dump_trace) {
36 va_start(args, fmt);
37 ret = vprintf(fmt, args);
38 va_end(args);
39 }
40
41 return ret;
42}
43
44static int dump_printf_color(const char *fmt, const char *color, ...)
45{
46 va_list args;
47 int ret = 0;
48
49 if (dump_trace) {
50 va_start(args, color);
51 ret = color_vfprintf(stdout, color, fmt, args);
52 va_end(args);
53 }
54
55 return ret;
56}
57
58
59void trace_event(event_t *event)
60{
61 unsigned char *raw_event = (void *)event;
62 const char *color = PERF_COLOR_BLUE;
63 int i, j;
64
65 if (!dump_trace)
66 return;
67
68 dump_printf(".");
69 dump_printf_color("\n. ... raw event: size %d bytes\n", color,
70 event->header.size);
71
72 for (i = 0; i < event->header.size; i++) {
73 if ((i & 15) == 0) {
74 dump_printf(".");
75 dump_printf_color(" %04x: ", color, i);
76 }
77
78 dump_printf_color(" %02x", color, raw_event[i]);
79
80 if (((i & 15) == 15) || i == event->header.size-1) {
81 dump_printf_color(" ", color);
82 for (j = 0; j < 15-(i & 15); j++)
83 dump_printf_color(" ", color);
84 for (j = 0; j < (i & 15); j++) {
85 if (isprint(raw_event[i-15+j]))
86 dump_printf_color("%c", color,
87 raw_event[i-15+j]);
88 else
89 dump_printf_color(".", color);
90 }
91 dump_printf_color("\n", color);
92 }
93 }
94 dump_printf(".\n");
95}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
new file mode 100644
index 000000000000..437eea58ce40
--- /dev/null
+++ b/tools/perf/util/debug.h
@@ -0,0 +1,8 @@
1/* For debugging general purposes */
2
3extern int verbose;
4extern int dump_trace;
5
6int eprintf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
7int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
8void trace_event(event_t *event);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
new file mode 100644
index 000000000000..fa2d4e91d329
--- /dev/null
+++ b/tools/perf/util/event.h
@@ -0,0 +1,96 @@
1#ifndef __PERF_EVENT_H
2#define __PERF_EVENT_H
3#include "../perf.h"
4#include "util.h"
5#include <linux/list.h>
6
7enum {
8 SHOW_KERNEL = 1,
9 SHOW_USER = 2,
10 SHOW_HV = 4,
11};
12
13/*
14 * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
15 */
16struct ip_event {
17 struct perf_event_header header;
18 u64 ip;
19 u32 pid, tid;
20 unsigned char __more_data[];
21};
22
23struct mmap_event {
24 struct perf_event_header header;
25 u32 pid, tid;
26 u64 start;
27 u64 len;
28 u64 pgoff;
29 char filename[PATH_MAX];
30};
31
32struct comm_event {
33 struct perf_event_header header;
34 u32 pid, tid;
35 char comm[16];
36};
37
38struct fork_event {
39 struct perf_event_header header;
40 u32 pid, ppid;
41 u32 tid, ptid;
42};
43
44struct lost_event {
45 struct perf_event_header header;
46 u64 id;
47 u64 lost;
48};
49
50/*
51 * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
52 */
53struct read_event {
54 struct perf_event_header header;
55 u32 pid,tid;
56 u64 value;
57 u64 time_enabled;
58 u64 time_running;
59 u64 id;
60};
61
62typedef union event_union {
63 struct perf_event_header header;
64 struct ip_event ip;
65 struct mmap_event mmap;
66 struct comm_event comm;
67 struct fork_event fork;
68 struct lost_event lost;
69 struct read_event read;
70} event_t;
71
72struct map {
73 struct list_head node;
74 u64 start;
75 u64 end;
76 u64 pgoff;
77 u64 (*map_ip)(struct map *, u64);
78 struct dso *dso;
79};
80
81static inline u64 map__map_ip(struct map *map, u64 ip)
82{
83 return ip - map->start + map->pgoff;
84}
85
86static inline u64 vdso__map_ip(struct map *map __used, u64 ip)
87{
88 return ip;
89}
90
91struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen);
92struct map *map__clone(struct map *self);
93int map__overlap(struct map *l, struct map *r);
94size_t map__fprintf(struct map *self, FILE *fp);
95
96#endif
diff --git a/tools/perf/util/exec_cmd.c b/tools/perf/util/exec_cmd.c
index 34a352867382..2745605dba11 100644
--- a/tools/perf/util/exec_cmd.c
+++ b/tools/perf/util/exec_cmd.c
@@ -6,7 +6,6 @@
6 6
7#define MAX_ARGS 32 7#define MAX_ARGS 32
8 8
9extern char **environ;
10static const char *argv_exec_path; 9static const char *argv_exec_path;
11static const char *argv0_path; 10static const char *argv0_path;
12 11
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index b92a457ca32e..ec4d4c2f9522 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -237,9 +237,44 @@ struct perf_header *perf_header__read(int fd)
237 self->data_offset = f_header.data.offset; 237 self->data_offset = f_header.data.offset;
238 self->data_size = f_header.data.size; 238 self->data_size = f_header.data.size;
239 239
240 lseek(fd, self->data_offset + self->data_size, SEEK_SET); 240 lseek(fd, self->data_offset, SEEK_SET);
241 241
242 self->frozen = 1; 242 self->frozen = 1;
243 243
244 return self; 244 return self;
245} 245}
246
247u64 perf_header__sample_type(struct perf_header *header)
248{
249 u64 type = 0;
250 int i;
251
252 for (i = 0; i < header->attrs; i++) {
253 struct perf_header_attr *attr = header->attr[i];
254
255 if (!type)
256 type = attr->attr.sample_type;
257 else if (type != attr->attr.sample_type)
258 die("non matching sample_type");
259 }
260
261 return type;
262}
263
264struct perf_counter_attr *
265perf_header__find_attr(u64 id, struct perf_header *header)
266{
267 int i;
268
269 for (i = 0; i < header->attrs; i++) {
270 struct perf_header_attr *attr = header->attr[i];
271 int j;
272
273 for (j = 0; j < attr->ids; j++) {
274 if (attr->id[j] == id)
275 return &attr->attr;
276 }
277 }
278
279 return NULL;
280}
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index bf280449fcfd..5d0a72ecc919 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -31,6 +31,10 @@ struct perf_header_attr *
31perf_header_attr__new(struct perf_counter_attr *attr); 31perf_header_attr__new(struct perf_counter_attr *attr);
32void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); 32void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
33 33
34u64 perf_header__sample_type(struct perf_header *header);
35struct perf_counter_attr *
36perf_header__find_attr(u64 id, struct perf_header *header);
37
34 38
35struct perf_header *perf_header__new(void); 39struct perf_header *perf_header__new(void);
36 40
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
new file mode 100644
index 000000000000..804e02382739
--- /dev/null
+++ b/tools/perf/util/map.c
@@ -0,0 +1,97 @@
1#include "event.h"
2#include "symbol.h"
3#include <stdlib.h>
4#include <string.h>
5#include <stdio.h>
6
7static inline int is_anon_memory(const char *filename)
8{
9 return strcmp(filename, "//anon") == 0;
10}
11
12static int strcommon(const char *pathname, char *cwd, int cwdlen)
13{
14 int n = 0;
15
16 while (n < cwdlen && pathname[n] == cwd[n])
17 ++n;
18
19 return n;
20}
21
22 struct map *map__new(struct mmap_event *event, char *cwd, int cwdlen)
23{
24 struct map *self = malloc(sizeof(*self));
25
26 if (self != NULL) {
27 const char *filename = event->filename;
28 char newfilename[PATH_MAX];
29 int anon;
30
31 if (cwd) {
32 int n = strcommon(filename, cwd, cwdlen);
33
34 if (n == cwdlen) {
35 snprintf(newfilename, sizeof(newfilename),
36 ".%s", filename + n);
37 filename = newfilename;
38 }
39 }
40
41 anon = is_anon_memory(filename);
42
43 if (anon) {
44 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
45 filename = newfilename;
46 }
47
48 self->start = event->start;
49 self->end = event->start + event->len;
50 self->pgoff = event->pgoff;
51
52 self->dso = dsos__findnew(filename);
53 if (self->dso == NULL)
54 goto out_delete;
55
56 if (self->dso == vdso || anon)
57 self->map_ip = vdso__map_ip;
58 else
59 self->map_ip = map__map_ip;
60 }
61 return self;
62out_delete:
63 free(self);
64 return NULL;
65}
66
67struct map *map__clone(struct map *self)
68{
69 struct map *map = malloc(sizeof(*self));
70
71 if (!map)
72 return NULL;
73
74 memcpy(map, self, sizeof(*self));
75
76 return map;
77}
78
79int map__overlap(struct map *l, struct map *r)
80{
81 if (l->start > r->start) {
82 struct map *t = l;
83 l = r;
84 r = t;
85 }
86
87 if (l->end > r->start)
88 return 1;
89
90 return 0;
91}
92
93size_t map__fprintf(struct map *self, FILE *fp)
94{
95 return fprintf(fp, " %Lx-%Lx %Lx %s\n",
96 self->start, self->end, self->pgoff, self->dso->name);
97}
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
index ddabe925d65d..3d567fe59c79 100644
--- a/tools/perf/util/module.c
+++ b/tools/perf/util/module.c
@@ -436,9 +436,9 @@ static int mod_dso__load_module_paths(struct mod_dso *self)
436 goto out_failure; 436 goto out_failure;
437 437
438 while (!feof(file)) { 438 while (!feof(file)) {
439 char *path, *name, *tmp; 439 char *name, *tmp;
440 struct module *module; 440 struct module *module;
441 int line_len, len; 441 int line_len;
442 442
443 line_len = getline(&line, &n, file); 443 line_len = getline(&line, &n, file);
444 if (line_len < 0) 444 if (line_len < 0)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 044178408783..a587d41ae3c9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,23 +1,21 @@
1 1
2#include "../perf.h"
3#include "util.h" 2#include "util.h"
3#include "../perf.h"
4#include "parse-options.h" 4#include "parse-options.h"
5#include "parse-events.h" 5#include "parse-events.h"
6#include "exec_cmd.h" 6#include "exec_cmd.h"
7#include "string.h" 7#include "string.h"
8#include "cache.h" 8#include "cache.h"
9 9
10extern char *strcasestr(const char *haystack, const char *needle);
11
12int nr_counters; 10int nr_counters;
13 11
14struct perf_counter_attr attrs[MAX_COUNTERS]; 12struct perf_counter_attr attrs[MAX_COUNTERS];
15 13
16struct event_symbol { 14struct event_symbol {
17 u8 type; 15 u8 type;
18 u64 config; 16 u64 config;
19 char *symbol; 17 const char *symbol;
20 char *alias; 18 const char *alias;
21}; 19};
22 20
23char debugfs_path[MAXPATHLEN]; 21char debugfs_path[MAXPATHLEN];
@@ -51,7 +49,7 @@ static struct event_symbol event_symbols[] = {
51#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) 49#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
52#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) 50#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
53 51
54static char *hw_event_names[] = { 52static const char *hw_event_names[] = {
55 "cycles", 53 "cycles",
56 "instructions", 54 "instructions",
57 "cache-references", 55 "cache-references",
@@ -61,7 +59,7 @@ static char *hw_event_names[] = {
61 "bus-cycles", 59 "bus-cycles",
62}; 60};
63 61
64static char *sw_event_names[] = { 62static const char *sw_event_names[] = {
65 "cpu-clock-msecs", 63 "cpu-clock-msecs",
66 "task-clock-msecs", 64 "task-clock-msecs",
67 "page-faults", 65 "page-faults",
@@ -73,7 +71,7 @@ static char *sw_event_names[] = {
73 71
74#define MAX_ALIASES 8 72#define MAX_ALIASES 8
75 73
76static char *hw_cache[][MAX_ALIASES] = { 74static const char *hw_cache[][MAX_ALIASES] = {
77 { "L1-dcache", "l1-d", "l1d", "L1-data", }, 75 { "L1-dcache", "l1-d", "l1d", "L1-data", },
78 { "L1-icache", "l1-i", "l1i", "L1-instruction", }, 76 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
79 { "LLC", "L2" }, 77 { "LLC", "L2" },
@@ -82,13 +80,13 @@ static char *hw_cache[][MAX_ALIASES] = {
82 { "branch", "branches", "bpu", "btb", "bpc", }, 80 { "branch", "branches", "bpu", "btb", "bpc", },
83}; 81};
84 82
85static char *hw_cache_op[][MAX_ALIASES] = { 83static const char *hw_cache_op[][MAX_ALIASES] = {
86 { "load", "loads", "read", }, 84 { "load", "loads", "read", },
87 { "store", "stores", "write", }, 85 { "store", "stores", "write", },
88 { "prefetch", "prefetches", "speculative-read", "speculative-load", }, 86 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
89}; 87};
90 88
91static char *hw_cache_result[][MAX_ALIASES] = { 89static const char *hw_cache_result[][MAX_ALIASES] = {
92 { "refs", "Reference", "ops", "access", }, 90 { "refs", "Reference", "ops", "access", },
93 { "misses", "miss", }, 91 { "misses", "miss", },
94}; 92};
@@ -113,11 +111,9 @@ static unsigned long hw_cache_stat[C(MAX)] = {
113 [C(BPU)] = (CACHE_READ), 111 [C(BPU)] = (CACHE_READ),
114}; 112};
115 113
116#define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \ 114#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
117 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ 115 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
118 if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \ 116 if (sys_dirent.d_type == DT_DIR && \
119 sys_dirent.d_name) && \
120 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
121 (strcmp(sys_dirent.d_name, ".")) && \ 117 (strcmp(sys_dirent.d_name, ".")) && \
122 (strcmp(sys_dirent.d_name, ".."))) 118 (strcmp(sys_dirent.d_name, "..")))
123 119
@@ -136,11 +132,9 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
136 return 0; 132 return 0;
137} 133}
138 134
139#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \ 135#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
140 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ 136 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
141 if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \ 137 if (evt_dirent.d_type == DT_DIR && \
142 sys_dirent.d_name, evt_dirent.d_name) && \
143 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
144 (strcmp(evt_dirent.d_name, ".")) && \ 138 (strcmp(evt_dirent.d_name, ".")) && \
145 (strcmp(evt_dirent.d_name, "..")) && \ 139 (strcmp(evt_dirent.d_name, "..")) && \
146 (!tp_event_has_id(&sys_dirent, &evt_dirent))) 140 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
@@ -158,34 +152,39 @@ int valid_debugfs_mount(const char *debugfs)
158 return 0; 152 return 0;
159} 153}
160 154
161static char *tracepoint_id_to_name(u64 config) 155struct tracepoint_path *tracepoint_id_to_path(u64 config)
162{ 156{
163 static char tracepoint_name[2 * MAX_EVENT_LENGTH]; 157 struct tracepoint_path *path = NULL;
164 DIR *sys_dir, *evt_dir; 158 DIR *sys_dir, *evt_dir;
165 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 159 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
166 struct stat st;
167 char id_buf[4]; 160 char id_buf[4];
168 int fd; 161 int sys_dir_fd, fd;
169 u64 id; 162 u64 id;
170 char evt_path[MAXPATHLEN]; 163 char evt_path[MAXPATHLEN];
171 164
172 if (valid_debugfs_mount(debugfs_path)) 165 if (valid_debugfs_mount(debugfs_path))
173 return "unkown"; 166 return NULL;
174 167
175 sys_dir = opendir(debugfs_path); 168 sys_dir = opendir(debugfs_path);
176 if (!sys_dir) 169 if (!sys_dir)
177 goto cleanup; 170 goto cleanup;
178 171 sys_dir_fd = dirfd(sys_dir);
179 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) { 172
180 evt_dir = opendir(evt_path); 173 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
181 if (!evt_dir) 174 int dfd = openat(sys_dir_fd, sys_dirent.d_name,
182 goto cleanup; 175 O_RDONLY|O_DIRECTORY), evt_dir_fd;
183 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, 176 if (dfd == -1)
184 evt_path, st) { 177 continue;
185 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", 178 evt_dir = fdopendir(dfd);
186 debugfs_path, sys_dirent.d_name, 179 if (!evt_dir) {
180 close(dfd);
181 continue;
182 }
183 evt_dir_fd = dirfd(evt_dir);
184 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
185 snprintf(evt_path, MAXPATHLEN, "%s/id",
187 evt_dirent.d_name); 186 evt_dirent.d_name);
188 fd = open(evt_path, O_RDONLY); 187 fd = openat(evt_dir_fd, evt_path, O_RDONLY);
189 if (fd < 0) 188 if (fd < 0)
190 continue; 189 continue;
191 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 190 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
@@ -197,10 +196,23 @@ static char *tracepoint_id_to_name(u64 config)
197 if (id == config) { 196 if (id == config) {
198 closedir(evt_dir); 197 closedir(evt_dir);
199 closedir(sys_dir); 198 closedir(sys_dir);
200 snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH, 199 path = calloc(1, sizeof(path));
201 "%s:%s", sys_dirent.d_name, 200 path->system = malloc(MAX_EVENT_LENGTH);
202 evt_dirent.d_name); 201 if (!path->system) {
203 return tracepoint_name; 202 free(path);
203 return NULL;
204 }
205 path->name = malloc(MAX_EVENT_LENGTH);
206 if (!path->name) {
207 free(path->system);
208 free(path);
209 return NULL;
210 }
211 strncpy(path->system, sys_dirent.d_name,
212 MAX_EVENT_LENGTH);
213 strncpy(path->name, evt_dirent.d_name,
214 MAX_EVENT_LENGTH);
215 return path;
204 } 216 }
205 } 217 }
206 closedir(evt_dir); 218 closedir(evt_dir);
@@ -208,7 +220,25 @@ static char *tracepoint_id_to_name(u64 config)
208 220
209cleanup: 221cleanup:
210 closedir(sys_dir); 222 closedir(sys_dir);
211 return "unkown"; 223 return NULL;
224}
225
226#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
227static const char *tracepoint_id_to_name(u64 config)
228{
229 static char buf[TP_PATH_LEN];
230 struct tracepoint_path *path;
231
232 path = tracepoint_id_to_path(config);
233 if (path) {
234 snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
235 free(path->name);
236 free(path->system);
237 free(path);
238 } else
239 snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
240
241 return buf;
212} 242}
213 243
214static int is_cache_op_valid(u8 cache_type, u8 cache_op) 244static int is_cache_op_valid(u8 cache_type, u8 cache_op)
@@ -235,7 +265,7 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
235 return name; 265 return name;
236} 266}
237 267
238char *event_name(int counter) 268const char *event_name(int counter)
239{ 269{
240 u64 config = attrs[counter].config; 270 u64 config = attrs[counter].config;
241 int type = attrs[counter].type; 271 int type = attrs[counter].type;
@@ -243,7 +273,7 @@ char *event_name(int counter)
243 return __event_name(type, config); 273 return __event_name(type, config);
244} 274}
245 275
246char *__event_name(int type, u64 config) 276const char *__event_name(int type, u64 config)
247{ 277{
248 static char buf[32]; 278 static char buf[32];
249 279
@@ -294,7 +324,7 @@ char *__event_name(int type, u64 config)
294 return "unknown"; 324 return "unknown";
295} 325}
296 326
297static int parse_aliases(const char **str, char *names[][MAX_ALIASES], int size) 327static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int size)
298{ 328{
299 int i, j; 329 int i, j;
300 int n, longest = -1; 330 int n, longest = -1;
@@ -598,7 +628,7 @@ static void print_tracepoint_events(void)
598{ 628{
599 DIR *sys_dir, *evt_dir; 629 DIR *sys_dir, *evt_dir;
600 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 630 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
601 struct stat st; 631 int sys_dir_fd;
602 char evt_path[MAXPATHLEN]; 632 char evt_path[MAXPATHLEN];
603 633
604 if (valid_debugfs_mount(debugfs_path)) 634 if (valid_debugfs_mount(debugfs_path))
@@ -607,16 +637,23 @@ static void print_tracepoint_events(void)
607 sys_dir = opendir(debugfs_path); 637 sys_dir = opendir(debugfs_path);
608 if (!sys_dir) 638 if (!sys_dir)
609 goto cleanup; 639 goto cleanup;
610 640 sys_dir_fd = dirfd(sys_dir);
611 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) { 641
612 evt_dir = opendir(evt_path); 642 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
613 if (!evt_dir) 643 int dfd = openat(sys_dir_fd, sys_dirent.d_name,
614 goto cleanup; 644 O_RDONLY|O_DIRECTORY), evt_dir_fd;
615 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, 645 if (dfd == -1)
616 evt_path, st) { 646 continue;
647 evt_dir = fdopendir(dfd);
648 if (!evt_dir) {
649 close(dfd);
650 continue;
651 }
652 evt_dir_fd = dirfd(evt_dir);
653 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
617 snprintf(evt_path, MAXPATHLEN, "%s:%s", 654 snprintf(evt_path, MAXPATHLEN, "%s:%s",
618 sys_dirent.d_name, evt_dirent.d_name); 655 sys_dirent.d_name, evt_dirent.d_name);
619 fprintf(stderr, " %-40s [%s]\n", evt_path, 656 fprintf(stderr, " %-42s [%s]\n", evt_path,
620 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]); 657 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
621 } 658 }
622 closedir(evt_dir); 659 closedir(evt_dir);
@@ -650,7 +687,7 @@ void print_events(void)
650 sprintf(name, "%s OR %s", syms->symbol, syms->alias); 687 sprintf(name, "%s OR %s", syms->symbol, syms->alias);
651 else 688 else
652 strcpy(name, syms->symbol); 689 strcpy(name, syms->symbol);
653 fprintf(stderr, " %-40s [%s]\n", name, 690 fprintf(stderr, " %-42s [%s]\n", name,
654 event_type_descriptors[type]); 691 event_type_descriptors[type]);
655 692
656 prev_type = type; 693 prev_type = type;
@@ -664,7 +701,7 @@ void print_events(void)
664 continue; 701 continue;
665 702
666 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 703 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
667 fprintf(stderr, " %-40s [%s]\n", 704 fprintf(stderr, " %-42s [%s]\n",
668 event_cache_name(type, op, i), 705 event_cache_name(type, op, i),
669 event_type_descriptors[4]); 706 event_type_descriptors[4]);
670 } 707 }
@@ -672,7 +709,7 @@ void print_events(void)
672 } 709 }
673 710
674 fprintf(stderr, "\n"); 711 fprintf(stderr, "\n");
675 fprintf(stderr, " %-40s [raw hardware event descriptor]\n", 712 fprintf(stderr, " %-42s [raw hardware event descriptor]\n",
676 "rNNN"); 713 "rNNN");
677 fprintf(stderr, "\n"); 714 fprintf(stderr, "\n");
678 715
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 192a962e3a0f..60704c15961f 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -1,16 +1,25 @@
1 1#ifndef _PARSE_EVENTS_H
2#define _PARSE_EVENTS_H
2/* 3/*
3 * Parse symbolic events/counts passed in as options: 4 * Parse symbolic events/counts passed in as options:
4 */ 5 */
5 6
6struct option; 7struct option;
7 8
9struct tracepoint_path {
10 char *system;
11 char *name;
12 struct tracepoint_path *next;
13};
14
15extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16
8extern int nr_counters; 17extern int nr_counters;
9 18
10extern struct perf_counter_attr attrs[MAX_COUNTERS]; 19extern struct perf_counter_attr attrs[MAX_COUNTERS];
11 20
12extern char *event_name(int ctr); 21extern const char *event_name(int ctr);
13extern char *__event_name(int type, u64 config); 22extern const char *__event_name(int type, u64 config);
14 23
15extern int parse_events(const struct option *opt, const char *str, int unset); 24extern int parse_events(const struct option *opt, const char *str, int unset);
16 25
@@ -21,3 +30,5 @@ extern void print_events(void);
21extern char debugfs_path[]; 30extern char debugfs_path[];
22extern int valid_debugfs_mount(const char *debugfs); 31extern int valid_debugfs_mount(const char *debugfs);
23 32
33
34#endif /* _PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 1bf67190c820..6d8af48c925e 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -53,6 +53,12 @@ static int get_value(struct parse_opt_ctx_t *p,
53 case OPTION_SET_INT: 53 case OPTION_SET_INT:
54 case OPTION_SET_PTR: 54 case OPTION_SET_PTR:
55 return opterror(opt, "takes no value", flags); 55 return opterror(opt, "takes no value", flags);
56 case OPTION_END:
57 case OPTION_ARGUMENT:
58 case OPTION_GROUP:
59 case OPTION_STRING:
60 case OPTION_INTEGER:
61 case OPTION_LONG:
56 default: 62 default:
57 break; 63 break;
58 } 64 }
@@ -130,6 +136,9 @@ static int get_value(struct parse_opt_ctx_t *p,
130 return opterror(opt, "expects a numerical value", flags); 136 return opterror(opt, "expects a numerical value", flags);
131 return 0; 137 return 0;
132 138
139 case OPTION_END:
140 case OPTION_ARGUMENT:
141 case OPTION_GROUP:
133 default: 142 default:
134 die("should not happen, someone must be hit on the forehead"); 143 die("should not happen, someone must be hit on the forehead");
135 } 144 }
@@ -296,6 +305,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
296 return parse_options_usage(usagestr, options); 305 return parse_options_usage(usagestr, options);
297 case -2: 306 case -2:
298 goto unknown; 307 goto unknown;
308 default:
309 break;
299 } 310 }
300 if (ctx->opt) 311 if (ctx->opt)
301 check_typos(arg + 1, options); 312 check_typos(arg + 1, options);
@@ -314,6 +325,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
314 ctx->argv[0] = strdup(ctx->opt - 1); 325 ctx->argv[0] = strdup(ctx->opt - 1);
315 *(char *)ctx->argv[0] = '-'; 326 *(char *)ctx->argv[0] = '-';
316 goto unknown; 327 goto unknown;
328 default:
329 break;
317 } 330 }
318 } 331 }
319 continue; 332 continue;
@@ -336,6 +349,8 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
336 return parse_options_usage(usagestr, options); 349 return parse_options_usage(usagestr, options);
337 case -2: 350 case -2:
338 goto unknown; 351 goto unknown;
352 default:
353 break;
339 } 354 }
340 continue; 355 continue;
341unknown: 356unknown:
@@ -456,6 +471,13 @@ int usage_with_options_internal(const char * const *usagestr,
456 } 471 }
457 break; 472 break;
458 default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */ 473 default: /* OPTION_{BIT,BOOLEAN,SET_INT,SET_PTR} */
474 case OPTION_END:
475 case OPTION_GROUP:
476 case OPTION_BIT:
477 case OPTION_BOOLEAN:
478 case OPTION_SET_INT:
479 case OPTION_SET_PTR:
480 case OPTION_LONG:
459 break; 481 break;
460 } 482 }
461 483
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index a501a40dd2cb..fd1f2faaade4 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -17,7 +17,7 @@ static char bad_path[] = "/bad-path/";
17 * Two hacks: 17 * Two hacks:
18 */ 18 */
19 19
20static char *get_perf_dir(void) 20static const char *get_perf_dir(void)
21{ 21{
22 return "."; 22 return ".";
23} 23}
@@ -38,8 +38,9 @@ size_t strlcpy(char *dest, const char *src, size_t size)
38static char *get_pathname(void) 38static char *get_pathname(void)
39{ 39{
40 static char pathname_array[4][PATH_MAX]; 40 static char pathname_array[4][PATH_MAX];
41 static int index; 41 static int idx;
42 return pathname_array[3 & ++index]; 42
43 return pathname_array[3 & ++idx];
43} 44}
44 45
45static char *cleanup_path(char *path) 46static char *cleanup_path(char *path)
@@ -161,20 +162,24 @@ int perf_mkstemp(char *path, size_t len, const char *template)
161} 162}
162 163
163 164
164const char *make_relative_path(const char *abs, const char *base) 165const char *make_relative_path(const char *abs_path, const char *base)
165{ 166{
166 static char buf[PATH_MAX + 1]; 167 static char buf[PATH_MAX + 1];
167 int baselen; 168 int baselen;
169
168 if (!base) 170 if (!base)
169 return abs; 171 return abs_path;
172
170 baselen = strlen(base); 173 baselen = strlen(base);
171 if (prefixcmp(abs, base)) 174 if (prefixcmp(abs_path, base))
172 return abs; 175 return abs_path;
173 if (abs[baselen] == '/') 176 if (abs_path[baselen] == '/')
174 baselen++; 177 baselen++;
175 else if (base[baselen - 1] != '/') 178 else if (base[baselen - 1] != '/')
176 return abs; 179 return abs_path;
177 strcpy(buf, abs + baselen); 180
181 strcpy(buf, abs_path + baselen);
182
178 return buf; 183 return buf;
179} 184}
180 185
diff --git a/tools/perf/util/run-command.c b/tools/perf/util/run-command.c
index a3935343091a..2b615acf94d7 100644
--- a/tools/perf/util/run-command.c
+++ b/tools/perf/util/run-command.c
@@ -262,7 +262,7 @@ int run_hook(const char *index_file, const char *name, ...)
262{ 262{
263 struct child_process hook; 263 struct child_process hook;
264 const char **argv = NULL, *env[2]; 264 const char **argv = NULL, *env[2];
265 char index[PATH_MAX]; 265 char idx[PATH_MAX];
266 va_list args; 266 va_list args;
267 int ret; 267 int ret;
268 size_t i = 0, alloc = 0; 268 size_t i = 0, alloc = 0;
@@ -284,8 +284,8 @@ int run_hook(const char *index_file, const char *name, ...)
284 hook.no_stdin = 1; 284 hook.no_stdin = 1;
285 hook.stdout_to_stderr = 1; 285 hook.stdout_to_stderr = 1;
286 if (index_file) { 286 if (index_file) {
287 snprintf(index, sizeof(index), "PERF_INDEX_FILE=%s", index_file); 287 snprintf(idx, sizeof(idx), "PERF_INDEX_FILE=%s", index_file);
288 env[0] = index; 288 env[0] = idx;
289 env[1] = NULL; 289 env[1] = NULL;
290 hook.env = env; 290 hook.env = env;
291 } 291 }
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 5c0f42e6b33b..fd3d9c8e90fc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -3,6 +3,8 @@
3#include "string.h" 3#include "string.h"
4#include "symbol.h" 4#include "symbol.h"
5 5
6#include "debug.h"
7
6#include <libelf.h> 8#include <libelf.h>
7#include <gelf.h> 9#include <gelf.h>
8#include <elf.h> 10#include <elf.h>
@@ -21,7 +23,7 @@ enum dso_origin {
21 23
22static struct symbol *symbol__new(u64 start, u64 len, 24static struct symbol *symbol__new(u64 start, u64 len,
23 const char *name, unsigned int priv_size, 25 const char *name, unsigned int priv_size,
24 u64 obj_start, int verbose) 26 u64 obj_start, int v)
25{ 27{
26 size_t namelen = strlen(name) + 1; 28 size_t namelen = strlen(name) + 1;
27 struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen); 29 struct symbol *self = calloc(1, priv_size + sizeof(*self) + namelen);
@@ -29,7 +31,7 @@ static struct symbol *symbol__new(u64 start, u64 len,
29 if (!self) 31 if (!self)
30 return NULL; 32 return NULL;
31 33
32 if (verbose >= 2) 34 if (v >= 2)
33 printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n", 35 printf("new symbol: %016Lx [%08lx]: %s, hist: %p, obj_start: %p\n",
34 (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start); 36 (u64)start, (unsigned long)len, name, self->hist, (void *)(unsigned long)obj_start);
35 37
@@ -156,7 +158,7 @@ size_t dso__fprintf(struct dso *self, FILE *fp)
156 return ret; 158 return ret;
157} 159}
158 160
159static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verbose) 161static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int v)
160{ 162{
161 struct rb_node *nd, *prevnd; 163 struct rb_node *nd, *prevnd;
162 char *line = NULL; 164 char *line = NULL;
@@ -198,7 +200,7 @@ static int dso__load_kallsyms(struct dso *self, symbol_filter_t filter, int verb
198 * Well fix up the end later, when we have all sorted. 200 * Well fix up the end later, when we have all sorted.
199 */ 201 */
200 sym = symbol__new(start, 0xdead, line + len + 2, 202 sym = symbol__new(start, 0xdead, line + len + 2,
201 self->sym_priv_size, 0, verbose); 203 self->sym_priv_size, 0, v);
202 204
203 if (sym == NULL) 205 if (sym == NULL)
204 goto out_delete_line; 206 goto out_delete_line;
@@ -239,7 +241,7 @@ out_failure:
239 return -1; 241 return -1;
240} 242}
241 243
242static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int verbose) 244static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int v)
243{ 245{
244 char *line = NULL; 246 char *line = NULL;
245 size_t n; 247 size_t n;
@@ -277,7 +279,7 @@ static int dso__load_perf_map(struct dso *self, symbol_filter_t filter, int verb
277 continue; 279 continue;
278 280
279 sym = symbol__new(start, size, line + len, 281 sym = symbol__new(start, size, line + len,
280 self->sym_priv_size, start, verbose); 282 self->sym_priv_size, start, v);
281 283
282 if (sym == NULL) 284 if (sym == NULL)
283 goto out_delete_line; 285 goto out_delete_line;
@@ -305,13 +307,13 @@ out_failure:
305 * elf_symtab__for_each_symbol - iterate thru all the symbols 307 * elf_symtab__for_each_symbol - iterate thru all the symbols
306 * 308 *
307 * @self: struct elf_symtab instance to iterate 309 * @self: struct elf_symtab instance to iterate
308 * @index: uint32_t index 310 * @idx: uint32_t idx
309 * @sym: GElf_Sym iterator 311 * @sym: GElf_Sym iterator
310 */ 312 */
311#define elf_symtab__for_each_symbol(syms, nr_syms, index, sym) \ 313#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
312 for (index = 0, gelf_getsym(syms, index, &sym);\ 314 for (idx = 0, gelf_getsym(syms, idx, &sym);\
313 index < nr_syms; \ 315 idx < nr_syms; \
314 index++, gelf_getsym(syms, index, &sym)) 316 idx++, gelf_getsym(syms, idx, &sym))
315 317
316static inline uint8_t elf_sym__type(const GElf_Sym *sym) 318static inline uint8_t elf_sym__type(const GElf_Sym *sym)
317{ 319{
@@ -354,7 +356,7 @@ static inline const char *elf_sym__name(const GElf_Sym *sym,
354 356
355static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 357static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
356 GElf_Shdr *shp, const char *name, 358 GElf_Shdr *shp, const char *name,
357 size_t *index) 359 size_t *idx)
358{ 360{
359 Elf_Scn *sec = NULL; 361 Elf_Scn *sec = NULL;
360 size_t cnt = 1; 362 size_t cnt = 1;
@@ -365,8 +367,8 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
365 gelf_getshdr(sec, shp); 367 gelf_getshdr(sec, shp);
366 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 368 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
367 if (!strcmp(name, str)) { 369 if (!strcmp(name, str)) {
368 if (index) 370 if (idx)
369 *index = cnt; 371 *idx = cnt;
370 break; 372 break;
371 } 373 }
372 ++cnt; 374 ++cnt;
@@ -392,7 +394,7 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
392 * And always look at the original dso, not at debuginfo packages, that 394 * And always look at the original dso, not at debuginfo packages, that
393 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 395 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
394 */ 396 */
395static int dso__synthesize_plt_symbols(struct dso *self, int verbose) 397static int dso__synthesize_plt_symbols(struct dso *self, int v)
396{ 398{
397 uint32_t nr_rel_entries, idx; 399 uint32_t nr_rel_entries, idx;
398 GElf_Sym sym; 400 GElf_Sym sym;
@@ -442,7 +444,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
442 goto out_elf_end; 444 goto out_elf_end;
443 445
444 /* 446 /*
445 * Fetch the relocation section to find the indexes to the GOT 447 * Fetch the relocation section to find the idxes to the GOT
446 * and the symbols in the .dynsym they refer to. 448 * and the symbols in the .dynsym they refer to.
447 */ 449 */
448 reldata = elf_getdata(scn_plt_rel, NULL); 450 reldata = elf_getdata(scn_plt_rel, NULL);
@@ -476,7 +478,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
476 "%s@plt", elf_sym__name(&sym, symstrs)); 478 "%s@plt", elf_sym__name(&sym, symstrs));
477 479
478 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 480 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
479 sympltname, self->sym_priv_size, 0, verbose); 481 sympltname, self->sym_priv_size, 0, v);
480 if (!f) 482 if (!f)
481 goto out_elf_end; 483 goto out_elf_end;
482 484
@@ -494,7 +496,7 @@ static int dso__synthesize_plt_symbols(struct dso *self, int verbose)
494 "%s@plt", elf_sym__name(&sym, symstrs)); 496 "%s@plt", elf_sym__name(&sym, symstrs));
495 497
496 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 498 f = symbol__new(plt_offset, shdr_plt.sh_entsize,
497 sympltname, self->sym_priv_size, 0, verbose); 499 sympltname, self->sym_priv_size, 0, v);
498 if (!f) 500 if (!f)
499 goto out_elf_end; 501 goto out_elf_end;
500 502
@@ -518,12 +520,12 @@ out:
518} 520}
519 521
520static int dso__load_sym(struct dso *self, int fd, const char *name, 522static int dso__load_sym(struct dso *self, int fd, const char *name,
521 symbol_filter_t filter, int verbose, struct module *mod) 523 symbol_filter_t filter, int v, struct module *mod)
522{ 524{
523 Elf_Data *symstrs, *secstrs; 525 Elf_Data *symstrs, *secstrs;
524 uint32_t nr_syms; 526 uint32_t nr_syms;
525 int err = -1; 527 int err = -1;
526 uint32_t index; 528 uint32_t idx;
527 GElf_Ehdr ehdr; 529 GElf_Ehdr ehdr;
528 GElf_Shdr shdr; 530 GElf_Shdr shdr;
529 Elf_Data *syms; 531 Elf_Data *syms;
@@ -534,14 +536,14 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
534 536
535 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); 537 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
536 if (elf == NULL) { 538 if (elf == NULL) {
537 if (verbose) 539 if (v)
538 fprintf(stderr, "%s: cannot read %s ELF file.\n", 540 fprintf(stderr, "%s: cannot read %s ELF file.\n",
539 __func__, name); 541 __func__, name);
540 goto out_close; 542 goto out_close;
541 } 543 }
542 544
543 if (gelf_getehdr(elf, &ehdr) == NULL) { 545 if (gelf_getehdr(elf, &ehdr) == NULL) {
544 if (verbose) 546 if (v)
545 fprintf(stderr, "%s: cannot get elf header.\n", __func__); 547 fprintf(stderr, "%s: cannot get elf header.\n", __func__);
546 goto out_elf_end; 548 goto out_elf_end;
547 } 549 }
@@ -583,9 +585,9 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
583 NULL) != NULL); 585 NULL) != NULL);
584 } else self->adjust_symbols = 0; 586 } else self->adjust_symbols = 0;
585 587
586 elf_symtab__for_each_symbol(syms, nr_syms, index, sym) { 588 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
587 struct symbol *f; 589 struct symbol *f;
588 const char *name; 590 const char *elf_name;
589 char *demangled; 591 char *demangled;
590 u64 obj_start; 592 u64 obj_start;
591 struct section *section = NULL; 593 struct section *section = NULL;
@@ -608,7 +610,7 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
608 obj_start = sym.st_value; 610 obj_start = sym.st_value;
609 611
610 if (self->adjust_symbols) { 612 if (self->adjust_symbols) {
611 if (verbose >= 2) 613 if (v >= 2)
612 printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n", 614 printf("adjusting symbol: st_value: %Lx sh_addr: %Lx sh_offset: %Lx\n",
613 (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset); 615 (u64)sym.st_value, (u64)shdr.sh_addr, (u64)shdr.sh_offset);
614 616
@@ -630,13 +632,13 @@ static int dso__load_sym(struct dso *self, int fd, const char *name,
630 * DWARF DW_compile_unit has this, but we don't always have access 632 * DWARF DW_compile_unit has this, but we don't always have access
631 * to it... 633 * to it...
632 */ 634 */
633 name = elf_sym__name(&sym, symstrs); 635 elf_name = elf_sym__name(&sym, symstrs);
634 demangled = bfd_demangle(NULL, name, DMGL_PARAMS | DMGL_ANSI); 636 demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI);
635 if (demangled != NULL) 637 if (demangled != NULL)
636 name = demangled; 638 elf_name = demangled;
637 639
638 f = symbol__new(sym.st_value, sym.st_size, name, 640 f = symbol__new(sym.st_value, sym.st_size, elf_name,
639 self->sym_priv_size, obj_start, verbose); 641 self->sym_priv_size, obj_start, v);
640 free(demangled); 642 free(demangled);
641 if (!f) 643 if (!f)
642 goto out_elf_end; 644 goto out_elf_end;
@@ -659,7 +661,7 @@ out_close:
659 661
660#define BUILD_ID_SIZE 128 662#define BUILD_ID_SIZE 128
661 663
662static char *dso__read_build_id(struct dso *self, int verbose) 664static char *dso__read_build_id(struct dso *self, int v)
663{ 665{
664 int i; 666 int i;
665 GElf_Ehdr ehdr; 667 GElf_Ehdr ehdr;
@@ -676,14 +678,14 @@ static char *dso__read_build_id(struct dso *self, int verbose)
676 678
677 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); 679 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
678 if (elf == NULL) { 680 if (elf == NULL) {
679 if (verbose) 681 if (v)
680 fprintf(stderr, "%s: cannot read %s ELF file.\n", 682 fprintf(stderr, "%s: cannot read %s ELF file.\n",
681 __func__, self->name); 683 __func__, self->name);
682 goto out_close; 684 goto out_close;
683 } 685 }
684 686
685 if (gelf_getehdr(elf, &ehdr) == NULL) { 687 if (gelf_getehdr(elf, &ehdr) == NULL) {
686 if (verbose) 688 if (v)
687 fprintf(stderr, "%s: cannot get elf header.\n", __func__); 689 fprintf(stderr, "%s: cannot get elf header.\n", __func__);
688 goto out_elf_end; 690 goto out_elf_end;
689 } 691 }
@@ -706,7 +708,7 @@ static char *dso__read_build_id(struct dso *self, int verbose)
706 ++raw; 708 ++raw;
707 bid += 2; 709 bid += 2;
708 } 710 }
709 if (verbose >= 2) 711 if (v >= 2)
710 printf("%s(%s): %s\n", __func__, self->name, build_id); 712 printf("%s(%s): %s\n", __func__, self->name, build_id);
711out_elf_end: 713out_elf_end:
712 elf_end(elf); 714 elf_end(elf);
@@ -732,7 +734,7 @@ char dso__symtab_origin(const struct dso *self)
732 return origin[self->origin]; 734 return origin[self->origin];
733} 735}
734 736
735int dso__load(struct dso *self, symbol_filter_t filter, int verbose) 737int dso__load(struct dso *self, symbol_filter_t filter, int v)
736{ 738{
737 int size = PATH_MAX; 739 int size = PATH_MAX;
738 char *name = malloc(size), *build_id = NULL; 740 char *name = malloc(size), *build_id = NULL;
@@ -745,7 +747,7 @@ int dso__load(struct dso *self, symbol_filter_t filter, int verbose)
745 self->adjust_symbols = 0; 747 self->adjust_symbols = 0;
746 748
747 if (strncmp(self->name, "/tmp/perf-", 10) == 0) { 749 if (strncmp(self->name, "/tmp/perf-", 10) == 0) {
748 ret = dso__load_perf_map(self, filter, verbose); 750 ret = dso__load_perf_map(self, filter, v);
749 self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : 751 self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT :
750 DSO__ORIG_NOT_FOUND; 752 DSO__ORIG_NOT_FOUND;
751 return ret; 753 return ret;
@@ -764,7 +766,7 @@ more:
764 snprintf(name, size, "/usr/lib/debug%s", self->name); 766 snprintf(name, size, "/usr/lib/debug%s", self->name);
765 break; 767 break;
766 case DSO__ORIG_BUILDID: 768 case DSO__ORIG_BUILDID:
767 build_id = dso__read_build_id(self, verbose); 769 build_id = dso__read_build_id(self, v);
768 if (build_id != NULL) { 770 if (build_id != NULL) {
769 snprintf(name, size, 771 snprintf(name, size,
770 "/usr/lib/debug/.build-id/%.2s/%s.debug", 772 "/usr/lib/debug/.build-id/%.2s/%s.debug",
@@ -785,7 +787,7 @@ more:
785 fd = open(name, O_RDONLY); 787 fd = open(name, O_RDONLY);
786 } while (fd < 0); 788 } while (fd < 0);
787 789
788 ret = dso__load_sym(self, fd, name, filter, verbose, NULL); 790 ret = dso__load_sym(self, fd, name, filter, v, NULL);
789 close(fd); 791 close(fd);
790 792
791 /* 793 /*
@@ -795,7 +797,7 @@ more:
795 goto more; 797 goto more;
796 798
797 if (ret > 0) { 799 if (ret > 0) {
798 int nr_plt = dso__synthesize_plt_symbols(self, verbose); 800 int nr_plt = dso__synthesize_plt_symbols(self, v);
799 if (nr_plt > 0) 801 if (nr_plt > 0)
800 ret += nr_plt; 802 ret += nr_plt;
801 } 803 }
@@ -807,7 +809,7 @@ out:
807} 809}
808 810
809static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name, 811static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *name,
810 symbol_filter_t filter, int verbose) 812 symbol_filter_t filter, int v)
811{ 813{
812 struct module *mod = mod_dso__find_module(mods, name); 814 struct module *mod = mod_dso__find_module(mods, name);
813 int err = 0, fd; 815 int err = 0, fd;
@@ -820,13 +822,13 @@ static int dso__load_module(struct dso *self, struct mod_dso *mods, const char *
820 if (fd < 0) 822 if (fd < 0)
821 return err; 823 return err;
822 824
823 err = dso__load_sym(self, fd, name, filter, verbose, mod); 825 err = dso__load_sym(self, fd, name, filter, v, mod);
824 close(fd); 826 close(fd);
825 827
826 return err; 828 return err;
827} 829}
828 830
829int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose) 831int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
830{ 832{
831 struct mod_dso *mods = mod_dso__new_dso("modules"); 833 struct mod_dso *mods = mod_dso__new_dso("modules");
832 struct module *pos; 834 struct module *pos;
@@ -844,7 +846,7 @@ int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose)
844 next = rb_first(&mods->mods); 846 next = rb_first(&mods->mods);
845 while (next) { 847 while (next) {
846 pos = rb_entry(next, struct module, rb_node); 848 pos = rb_entry(next, struct module, rb_node);
847 err = dso__load_module(self, mods, pos->name, filter, verbose); 849 err = dso__load_module(self, mods, pos->name, filter, v);
848 850
849 if (err < 0) 851 if (err < 0)
850 break; 852 break;
@@ -887,14 +889,14 @@ static inline void dso__fill_symbol_holes(struct dso *self)
887} 889}
888 890
889static int dso__load_vmlinux(struct dso *self, const char *vmlinux, 891static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
890 symbol_filter_t filter, int verbose) 892 symbol_filter_t filter, int v)
891{ 893{
892 int err, fd = open(vmlinux, O_RDONLY); 894 int err, fd = open(vmlinux, O_RDONLY);
893 895
894 if (fd < 0) 896 if (fd < 0)
895 return -1; 897 return -1;
896 898
897 err = dso__load_sym(self, fd, vmlinux, filter, verbose, NULL); 899 err = dso__load_sym(self, fd, vmlinux, filter, v, NULL);
898 900
899 if (err > 0) 901 if (err > 0)
900 dso__fill_symbol_holes(self); 902 dso__fill_symbol_holes(self);
@@ -905,18 +907,18 @@ static int dso__load_vmlinux(struct dso *self, const char *vmlinux,
905} 907}
906 908
907int dso__load_kernel(struct dso *self, const char *vmlinux, 909int dso__load_kernel(struct dso *self, const char *vmlinux,
908 symbol_filter_t filter, int verbose, int modules) 910 symbol_filter_t filter, int v, int use_modules)
909{ 911{
910 int err = -1; 912 int err = -1;
911 913
912 if (vmlinux) { 914 if (vmlinux) {
913 err = dso__load_vmlinux(self, vmlinux, filter, verbose); 915 err = dso__load_vmlinux(self, vmlinux, filter, v);
914 if (err > 0 && modules) 916 if (err > 0 && use_modules)
915 err = dso__load_modules(self, filter, verbose); 917 err = dso__load_modules(self, filter, v);
916 } 918 }
917 919
918 if (err <= 0) 920 if (err <= 0)
919 err = dso__load_kallsyms(self, filter, verbose); 921 err = dso__load_kallsyms(self, filter, v);
920 922
921 if (err > 0) 923 if (err > 0)
922 self->origin = DSO__ORIG_KERNEL; 924 self->origin = DSO__ORIG_KERNEL;
@@ -924,6 +926,103 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
924 return err; 926 return err;
925} 927}
926 928
929LIST_HEAD(dsos);
930struct dso *kernel_dso;
931struct dso *vdso;
932struct dso *hypervisor_dso;
933
934const char *vmlinux_name = "vmlinux";
935int modules;
936
937static void dsos__add(struct dso *dso)
938{
939 list_add_tail(&dso->node, &dsos);
940}
941
942static struct dso *dsos__find(const char *name)
943{
944 struct dso *pos;
945
946 list_for_each_entry(pos, &dsos, node)
947 if (strcmp(pos->name, name) == 0)
948 return pos;
949 return NULL;
950}
951
952struct dso *dsos__findnew(const char *name)
953{
954 struct dso *dso = dsos__find(name);
955 int nr;
956
957 if (dso)
958 return dso;
959
960 dso = dso__new(name, 0);
961 if (!dso)
962 goto out_delete_dso;
963
964 nr = dso__load(dso, NULL, verbose);
965 if (nr < 0) {
966 eprintf("Failed to open: %s\n", name);
967 goto out_delete_dso;
968 }
969 if (!nr)
970 eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
971
972 dsos__add(dso);
973
974 return dso;
975
976out_delete_dso:
977 dso__delete(dso);
978 return NULL;
979}
980
981void dsos__fprintf(FILE *fp)
982{
983 struct dso *pos;
984
985 list_for_each_entry(pos, &dsos, node)
986 dso__fprintf(pos, fp);
987}
988
989static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
990{
991 return dso__find_symbol(dso, ip);
992}
993
994int load_kernel(void)
995{
996 int err;
997
998 kernel_dso = dso__new("[kernel]", 0);
999 if (!kernel_dso)
1000 return -1;
1001
1002 err = dso__load_kernel(kernel_dso, vmlinux_name, NULL, verbose, modules);
1003 if (err <= 0) {
1004 dso__delete(kernel_dso);
1005 kernel_dso = NULL;
1006 } else
1007 dsos__add(kernel_dso);
1008
1009 vdso = dso__new("[vdso]", 0);
1010 if (!vdso)
1011 return -1;
1012
1013 vdso->find_symbol = vdso__find_symbol;
1014
1015 dsos__add(vdso);
1016
1017 hypervisor_dso = dso__new("[hypervisor]", 0);
1018 if (!hypervisor_dso)
1019 return -1;
1020 dsos__add(hypervisor_dso);
1021
1022 return err;
1023}
1024
1025
927void symbol__init(void) 1026void symbol__init(void)
928{ 1027{
929 elf_version(EV_CURRENT); 1028 elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b53bf0125c1b..6e8490716408 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -6,6 +6,7 @@
6#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/rbtree.h> 7#include <linux/rbtree.h>
8#include "module.h" 8#include "module.h"
9#include "event.h"
9 10
10#ifdef HAVE_CPLUS_DEMANGLE 11#ifdef HAVE_CPLUS_DEMANGLE
11extern char *cplus_demangle(const char *, int); 12extern char *cplus_demangle(const char *, int);
@@ -54,7 +55,7 @@ struct dso {
54 char name[0]; 55 char name[0];
55}; 56};
56 57
57const char *sym_hist_filter; 58extern const char *sym_hist_filter;
58 59
59typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym); 60typedef int (*symbol_filter_t)(struct dso *self, struct symbol *sym);
60 61
@@ -72,9 +73,20 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
72 symbol_filter_t filter, int verbose, int modules); 73 symbol_filter_t filter, int verbose, int modules);
73int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose); 74int dso__load_modules(struct dso *self, symbol_filter_t filter, int verbose);
74int dso__load(struct dso *self, symbol_filter_t filter, int verbose); 75int dso__load(struct dso *self, symbol_filter_t filter, int verbose);
76struct dso *dsos__findnew(const char *name);
77void dsos__fprintf(FILE *fp);
75 78
76size_t dso__fprintf(struct dso *self, FILE *fp); 79size_t dso__fprintf(struct dso *self, FILE *fp);
77char dso__symtab_origin(const struct dso *self); 80char dso__symtab_origin(const struct dso *self);
78 81
82int load_kernel(void);
83
79void symbol__init(void); 84void symbol__init(void);
85
86extern struct list_head dsos;
87extern struct dso *kernel_dso;
88extern struct dso *vdso;
89extern struct dso *hypervisor_dso;
90extern const char *vmlinux_name;
91extern int modules;
80#endif /* _PERF_SYMBOL_ */ 92#endif /* _PERF_SYMBOL_ */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
new file mode 100644
index 000000000000..7635928ca278
--- /dev/null
+++ b/tools/perf/util/thread.c
@@ -0,0 +1,175 @@
1#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "thread.h"
6#include "util.h"
7#include "debug.h"
8
9static struct thread *thread__new(pid_t pid)
10{
11 struct thread *self = malloc(sizeof(*self));
12
13 if (self != NULL) {
14 self->pid = pid;
15 self->comm = malloc(32);
16 if (self->comm)
17 snprintf(self->comm, 32, ":%d", self->pid);
18 INIT_LIST_HEAD(&self->maps);
19 }
20
21 return self;
22}
23
24int thread__set_comm(struct thread *self, const char *comm)
25{
26 if (self->comm)
27 free(self->comm);
28 self->comm = strdup(comm);
29 return self->comm ? 0 : -ENOMEM;
30}
31
32static size_t thread__fprintf(struct thread *self, FILE *fp)
33{
34 struct map *pos;
35 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
36
37 list_for_each_entry(pos, &self->maps, node)
38 ret += map__fprintf(pos, fp);
39
40 return ret;
41}
42
43struct thread *
44threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
45{
46 struct rb_node **p = &threads->rb_node;
47 struct rb_node *parent = NULL;
48 struct thread *th;
49
50 /*
51 * Font-end cache - PID lookups come in blocks,
52 * so most of the time we dont have to look up
53 * the full rbtree:
54 */
55 if (*last_match && (*last_match)->pid == pid)
56 return *last_match;
57
58 while (*p != NULL) {
59 parent = *p;
60 th = rb_entry(parent, struct thread, rb_node);
61
62 if (th->pid == pid) {
63 *last_match = th;
64 return th;
65 }
66
67 if (pid < th->pid)
68 p = &(*p)->rb_left;
69 else
70 p = &(*p)->rb_right;
71 }
72
73 th = thread__new(pid);
74 if (th != NULL) {
75 rb_link_node(&th->rb_node, parent, p);
76 rb_insert_color(&th->rb_node, threads);
77 *last_match = th;
78 }
79
80 return th;
81}
82
83struct thread *
84register_idle_thread(struct rb_root *threads, struct thread **last_match)
85{
86 struct thread *thread = threads__findnew(0, threads, last_match);
87
88 if (!thread || thread__set_comm(thread, "[init]")) {
89 fprintf(stderr, "problem inserting idle task.\n");
90 exit(-1);
91 }
92
93 return thread;
94}
95
96void thread__insert_map(struct thread *self, struct map *map)
97{
98 struct map *pos, *tmp;
99
100 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
101 if (map__overlap(pos, map)) {
102 if (verbose >= 2) {
103 printf("overlapping maps:\n");
104 map__fprintf(map, stdout);
105 map__fprintf(pos, stdout);
106 }
107
108 if (map->start <= pos->start && map->end > pos->start)
109 pos->start = map->end;
110
111 if (map->end >= pos->end && map->start < pos->end)
112 pos->end = map->start;
113
114 if (verbose >= 2) {
115 printf("after collision:\n");
116 map__fprintf(pos, stdout);
117 }
118
119 if (pos->start >= pos->end) {
120 list_del_init(&pos->node);
121 free(pos);
122 }
123 }
124 }
125
126 list_add_tail(&map->node, &self->maps);
127}
128
129int thread__fork(struct thread *self, struct thread *parent)
130{
131 struct map *map;
132
133 if (self->comm)
134 free(self->comm);
135 self->comm = strdup(parent->comm);
136 if (!self->comm)
137 return -ENOMEM;
138
139 list_for_each_entry(map, &parent->maps, node) {
140 struct map *new = map__clone(map);
141 if (!new)
142 return -ENOMEM;
143 thread__insert_map(self, new);
144 }
145
146 return 0;
147}
148
149struct map *thread__find_map(struct thread *self, u64 ip)
150{
151 struct map *pos;
152
153 if (self == NULL)
154 return NULL;
155
156 list_for_each_entry(pos, &self->maps, node)
157 if (ip >= pos->start && ip <= pos->end)
158 return pos;
159
160 return NULL;
161}
162
163size_t threads__fprintf(FILE *fp, struct rb_root *threads)
164{
165 size_t ret = 0;
166 struct rb_node *nd;
167
168 for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
169 struct thread *pos = rb_entry(nd, struct thread, rb_node);
170
171 ret += thread__fprintf(pos, fp);
172 }
173
174 return ret;
175}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
new file mode 100644
index 000000000000..634f2809a342
--- /dev/null
+++ b/tools/perf/util/thread.h
@@ -0,0 +1,21 @@
1#include <linux/rbtree.h>
2#include <linux/list.h>
3#include <unistd.h>
4#include "symbol.h"
5
6struct thread {
7 struct rb_node rb_node;
8 struct list_head maps;
9 pid_t pid;
10 char *comm;
11};
12
13int thread__set_comm(struct thread *self, const char *comm);
14struct thread *
15threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match);
16struct thread *
17register_idle_thread(struct rb_root *threads, struct thread **last_match);
18void thread__insert_map(struct thread *self, struct map *map);
19int thread__fork(struct thread *self, struct thread *parent);
20struct map *thread__find_map(struct thread *self, u64 ip);
21size_t threads__fprintf(FILE *fp, struct rb_root *threads);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
new file mode 100644
index 000000000000..6c9302a7274c
--- /dev/null
+++ b/tools/perf/util/trace-event-info.c
@@ -0,0 +1,539 @@
1/*
2 * Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#define _GNU_SOURCE
22#include <dirent.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <stdarg.h>
27#include <sys/types.h>
28#include <sys/stat.h>
29#include <sys/wait.h>
30#include <pthread.h>
31#include <fcntl.h>
32#include <unistd.h>
33#include <ctype.h>
34#include <errno.h>
35#include <stdbool.h>
36
37#include "../perf.h"
38#include "trace-event.h"
39
40
41#define VERSION "0.5"
42
43#define _STR(x) #x
44#define STR(x) _STR(x)
45#define MAX_PATH 256
46
47#define TRACE_CTRL "tracing_on"
48#define TRACE "trace"
49#define AVAILABLE "available_tracers"
50#define CURRENT "current_tracer"
51#define ITER_CTRL "trace_options"
52#define MAX_LATENCY "tracing_max_latency"
53
54unsigned int page_size;
55
56static const char *output_file = "trace.info";
57static int output_fd;
58
59struct event_list {
60 struct event_list *next;
61 const char *event;
62};
63
64struct events {
65 struct events *sibling;
66 struct events *children;
67 struct events *next;
68 char *name;
69};
70
71
72
73static void die(const char *fmt, ...)
74{
75 va_list ap;
76 int ret = errno;
77
78 if (errno)
79 perror("trace-cmd");
80 else
81 ret = -1;
82
83 va_start(ap, fmt);
84 fprintf(stderr, " ");
85 vfprintf(stderr, fmt, ap);
86 va_end(ap);
87
88 fprintf(stderr, "\n");
89 exit(ret);
90}
91
92void *malloc_or_die(unsigned int size)
93{
94 void *data;
95
96 data = malloc(size);
97 if (!data)
98 die("malloc");
99 return data;
100}
101
102static const char *find_debugfs(void)
103{
104 static char debugfs[MAX_PATH+1];
105 static int debugfs_found;
106 char type[100];
107 FILE *fp;
108
109 if (debugfs_found)
110 return debugfs;
111
112 if ((fp = fopen("/proc/mounts","r")) == NULL)
113 die("Can't open /proc/mounts for read");
114
115 while (fscanf(fp, "%*s %"
116 STR(MAX_PATH)
117 "s %99s %*s %*d %*d\n",
118 debugfs, type) == 2) {
119 if (strcmp(type, "debugfs") == 0)
120 break;
121 }
122 fclose(fp);
123
124 if (strcmp(type, "debugfs") != 0)
125 die("debugfs not mounted, please mount");
126
127 debugfs_found = 1;
128
129 return debugfs;
130}
131
132/*
133 * Finds the path to the debugfs/tracing
134 * Allocates the string and stores it.
135 */
136static const char *find_tracing_dir(void)
137{
138 static char *tracing;
139 static int tracing_found;
140 const char *debugfs;
141
142 if (tracing_found)
143 return tracing;
144
145 debugfs = find_debugfs();
146
147 tracing = malloc_or_die(strlen(debugfs) + 9);
148
149 sprintf(tracing, "%s/tracing", debugfs);
150
151 tracing_found = 1;
152 return tracing;
153}
154
155static char *get_tracing_file(const char *name)
156{
157 const char *tracing;
158 char *file;
159
160 tracing = find_tracing_dir();
161 if (!tracing)
162 return NULL;
163
164 file = malloc_or_die(strlen(tracing) + strlen(name) + 2);
165
166 sprintf(file, "%s/%s", tracing, name);
167 return file;
168}
169
170static void put_tracing_file(char *file)
171{
172 free(file);
173}
174
175static ssize_t write_or_die(const void *buf, size_t len)
176{
177 int ret;
178
179 ret = write(output_fd, buf, len);
180 if (ret < 0)
181 die("writing to '%s'", output_file);
182
183 return ret;
184}
185
186int bigendian(void)
187{
188 unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
189 unsigned int *ptr;
190
191 ptr = (unsigned int *)(void *)str;
192 return *ptr == 0x01020304;
193}
194
195static unsigned long long copy_file_fd(int fd)
196{
197 unsigned long long size = 0;
198 char buf[BUFSIZ];
199 int r;
200
201 do {
202 r = read(fd, buf, BUFSIZ);
203 if (r > 0) {
204 size += r;
205 write_or_die(buf, r);
206 }
207 } while (r > 0);
208
209 return size;
210}
211
212static unsigned long long copy_file(const char *file)
213{
214 unsigned long long size = 0;
215 int fd;
216
217 fd = open(file, O_RDONLY);
218 if (fd < 0)
219 die("Can't read '%s'", file);
220 size = copy_file_fd(fd);
221 close(fd);
222
223 return size;
224}
225
226static unsigned long get_size_fd(int fd)
227{
228 unsigned long long size = 0;
229 char buf[BUFSIZ];
230 int r;
231
232 do {
233 r = read(fd, buf, BUFSIZ);
234 if (r > 0)
235 size += r;
236 } while (r > 0);
237
238 lseek(fd, 0, SEEK_SET);
239
240 return size;
241}
242
243static unsigned long get_size(const char *file)
244{
245 unsigned long long size = 0;
246 int fd;
247
248 fd = open(file, O_RDONLY);
249 if (fd < 0)
250 die("Can't read '%s'", file);
251 size = get_size_fd(fd);
252 close(fd);
253
254 return size;
255}
256
257static void read_header_files(void)
258{
259 unsigned long long size, check_size;
260 char *path;
261 int fd;
262
263 path = get_tracing_file("events/header_page");
264 fd = open(path, O_RDONLY);
265 if (fd < 0)
266 die("can't read '%s'", path);
267
268 /* unfortunately, you can not stat debugfs files for size */
269 size = get_size_fd(fd);
270
271 write_or_die("header_page", 12);
272 write_or_die(&size, 8);
273 check_size = copy_file_fd(fd);
274 if (size != check_size)
275 die("wrong size for '%s' size=%lld read=%lld",
276 path, size, check_size);
277 put_tracing_file(path);
278
279 path = get_tracing_file("events/header_event");
280 fd = open(path, O_RDONLY);
281 if (fd < 0)
282 die("can't read '%s'", path);
283
284 size = get_size_fd(fd);
285
286 write_or_die("header_event", 13);
287 write_or_die(&size, 8);
288 check_size = copy_file_fd(fd);
289 if (size != check_size)
290 die("wrong size for '%s'", path);
291 put_tracing_file(path);
292}
293
294static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
295{
296 while (tps) {
297 if (!strcmp(sys, tps->name))
298 return true;
299 tps = tps->next;
300 }
301
302 return false;
303}
304
305static void copy_event_system(const char *sys, struct tracepoint_path *tps)
306{
307 unsigned long long size, check_size;
308 struct dirent *dent;
309 struct stat st;
310 char *format;
311 DIR *dir;
312 int count = 0;
313 int ret;
314
315 dir = opendir(sys);
316 if (!dir)
317 die("can't read directory '%s'", sys);
318
319 while ((dent = readdir(dir))) {
320 if (strcmp(dent->d_name, ".") == 0 ||
321 strcmp(dent->d_name, "..") == 0 ||
322 !name_in_tp_list(dent->d_name, tps))
323 continue;
324 format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
325 sprintf(format, "%s/%s/format", sys, dent->d_name);
326 ret = stat(format, &st);
327 free(format);
328 if (ret < 0)
329 continue;
330 count++;
331 }
332
333 write_or_die(&count, 4);
334
335 rewinddir(dir);
336 while ((dent = readdir(dir))) {
337 if (strcmp(dent->d_name, ".") == 0 ||
338 strcmp(dent->d_name, "..") == 0 ||
339 !name_in_tp_list(dent->d_name, tps))
340 continue;
341 format = malloc_or_die(strlen(sys) + strlen(dent->d_name) + 10);
342 sprintf(format, "%s/%s/format", sys, dent->d_name);
343 ret = stat(format, &st);
344
345 if (ret >= 0) {
346 /* unfortunately, you can not stat debugfs files for size */
347 size = get_size(format);
348 write_or_die(&size, 8);
349 check_size = copy_file(format);
350 if (size != check_size)
351 die("error in size of file '%s'", format);
352 }
353
354 free(format);
355 }
356}
357
358static void read_ftrace_files(struct tracepoint_path *tps)
359{
360 char *path;
361
362 path = get_tracing_file("events/ftrace");
363
364 copy_event_system(path, tps);
365
366 put_tracing_file(path);
367}
368
369static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
370{
371 while (tps) {
372 if (!strcmp(sys, tps->system))
373 return true;
374 tps = tps->next;
375 }
376
377 return false;
378}
379
380static void read_event_files(struct tracepoint_path *tps)
381{
382 struct dirent *dent;
383 struct stat st;
384 char *path;
385 char *sys;
386 DIR *dir;
387 int count = 0;
388 int ret;
389
390 path = get_tracing_file("events");
391
392 dir = opendir(path);
393 if (!dir)
394 die("can't read directory '%s'", path);
395
396 while ((dent = readdir(dir))) {
397 if (strcmp(dent->d_name, ".") == 0 ||
398 strcmp(dent->d_name, "..") == 0 ||
399 strcmp(dent->d_name, "ftrace") == 0 ||
400 !system_in_tp_list(dent->d_name, tps))
401 continue;
402 sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
403 sprintf(sys, "%s/%s", path, dent->d_name);
404 ret = stat(sys, &st);
405 free(sys);
406 if (ret < 0)
407 continue;
408 if (S_ISDIR(st.st_mode))
409 count++;
410 }
411
412 write_or_die(&count, 4);
413
414 rewinddir(dir);
415 while ((dent = readdir(dir))) {
416 if (strcmp(dent->d_name, ".") == 0 ||
417 strcmp(dent->d_name, "..") == 0 ||
418 strcmp(dent->d_name, "ftrace") == 0 ||
419 !system_in_tp_list(dent->d_name, tps))
420 continue;
421 sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
422 sprintf(sys, "%s/%s", path, dent->d_name);
423 ret = stat(sys, &st);
424 if (ret >= 0) {
425 if (S_ISDIR(st.st_mode)) {
426 write_or_die(dent->d_name, strlen(dent->d_name) + 1);
427 copy_event_system(sys, tps);
428 }
429 }
430 free(sys);
431 }
432
433 put_tracing_file(path);
434}
435
436static void read_proc_kallsyms(void)
437{
438 unsigned int size, check_size;
439 const char *path = "/proc/kallsyms";
440 struct stat st;
441 int ret;
442
443 ret = stat(path, &st);
444 if (ret < 0) {
445 /* not found */
446 size = 0;
447 write_or_die(&size, 4);
448 return;
449 }
450 size = get_size(path);
451 write_or_die(&size, 4);
452 check_size = copy_file(path);
453 if (size != check_size)
454 die("error in size of file '%s'", path);
455
456}
457
458static void read_ftrace_printk(void)
459{
460 unsigned int size, check_size;
461 const char *path;
462 struct stat st;
463 int ret;
464
465 path = get_tracing_file("printk_formats");
466 ret = stat(path, &st);
467 if (ret < 0) {
468 /* not found */
469 size = 0;
470 write_or_die(&size, 4);
471 return;
472 }
473 size = get_size(path);
474 write_or_die(&size, 4);
475 check_size = copy_file(path);
476 if (size != check_size)
477 die("error in size of file '%s'", path);
478
479}
480
481static struct tracepoint_path *
482get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
483{
484 struct tracepoint_path path, *ppath = &path;
485 int i;
486
487 for (i = 0; i < nb_counters; i++) {
488 if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
489 continue;
490 ppath->next = tracepoint_id_to_path(pattrs[i].config);
491 if (!ppath->next)
492 die("%s\n", "No memory to alloc tracepoints list");
493 ppath = ppath->next;
494 }
495
496 return path.next;
497}
498void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
499{
500 char buf[BUFSIZ];
501 struct tracepoint_path *tps;
502
503 output_fd = open(output_file, O_WRONLY | O_CREAT | O_TRUNC | O_LARGEFILE, 0644);
504 if (output_fd < 0)
505 die("creating file '%s'", output_file);
506
507 buf[0] = 23;
508 buf[1] = 8;
509 buf[2] = 68;
510 memcpy(buf + 3, "tracing", 7);
511
512 write_or_die(buf, 10);
513
514 write_or_die(VERSION, strlen(VERSION) + 1);
515
516 /* save endian */
517 if (bigendian())
518 buf[0] = 1;
519 else
520 buf[0] = 0;
521
522 write_or_die(buf, 1);
523
524 /* save size of long */
525 buf[0] = sizeof(long);
526 write_or_die(buf, 1);
527
528 /* save page_size */
529 page_size = getpagesize();
530 write_or_die(&page_size, 4);
531
532 tps = get_tracepoints_path(pattrs, nb_counters);
533
534 read_header_files();
535 read_ftrace_files(tps);
536 read_event_files(tps);
537 read_proc_kallsyms();
538 read_ftrace_printk();
539}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
new file mode 100644
index 000000000000..629e602d9405
--- /dev/null
+++ b/tools/perf/util/trace-event-parse.c
@@ -0,0 +1,2942 @@
1/*
2 * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 *
21 * The parts for function graph printing was taken and modified from the
22 * Linux Kernel that were written by Frederic Weisbecker.
23 */
24#define _GNU_SOURCE
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <ctype.h>
29#include <errno.h>
30
31#undef _GNU_SOURCE
32#include "../perf.h"
33#include "util.h"
34#include "trace-event.h"
35
36int header_page_ts_offset;
37int header_page_ts_size;
38int header_page_size_offset;
39int header_page_size_size;
40int header_page_data_offset;
41int header_page_data_size;
42
43static char *input_buf;
44static unsigned long long input_buf_ptr;
45static unsigned long long input_buf_siz;
46
47static int cpus;
48static int long_size;
49
50static void init_input_buf(char *buf, unsigned long long size)
51{
52 input_buf = buf;
53 input_buf_siz = size;
54 input_buf_ptr = 0;
55}
56
57struct cmdline {
58 char *comm;
59 int pid;
60};
61
62static struct cmdline *cmdlines;
63static int cmdline_count;
64
65static int cmdline_cmp(const void *a, const void *b)
66{
67 const struct cmdline *ca = a;
68 const struct cmdline *cb = b;
69
70 if (ca->pid < cb->pid)
71 return -1;
72 if (ca->pid > cb->pid)
73 return 1;
74
75 return 0;
76}
77
78void parse_cmdlines(char *file, int size __unused)
79{
80 struct cmdline_list {
81 struct cmdline_list *next;
82 char *comm;
83 int pid;
84 } *list = NULL, *item;
85 char *line;
86 char *next = NULL;
87 int i;
88
89 line = strtok_r(file, "\n", &next);
90 while (line) {
91 item = malloc_or_die(sizeof(*item));
92 sscanf(line, "%d %as", &item->pid,
93 (float *)(void *)&item->comm); /* workaround gcc warning */
94 item->next = list;
95 list = item;
96 line = strtok_r(NULL, "\n", &next);
97 cmdline_count++;
98 }
99
100 cmdlines = malloc_or_die(sizeof(*cmdlines) * cmdline_count);
101
102 i = 0;
103 while (list) {
104 cmdlines[i].pid = list->pid;
105 cmdlines[i].comm = list->comm;
106 i++;
107 item = list;
108 list = list->next;
109 free(item);
110 }
111
112 qsort(cmdlines, cmdline_count, sizeof(*cmdlines), cmdline_cmp);
113}
114
115static struct func_map {
116 unsigned long long addr;
117 char *func;
118 char *mod;
119} *func_list;
120static unsigned int func_count;
121
122static int func_cmp(const void *a, const void *b)
123{
124 const struct func_map *fa = a;
125 const struct func_map *fb = b;
126
127 if (fa->addr < fb->addr)
128 return -1;
129 if (fa->addr > fb->addr)
130 return 1;
131
132 return 0;
133}
134
135void parse_proc_kallsyms(char *file, unsigned int size __unused)
136{
137 struct func_list {
138 struct func_list *next;
139 unsigned long long addr;
140 char *func;
141 char *mod;
142 } *list = NULL, *item;
143 char *line;
144 char *next = NULL;
145 char *addr_str;
146 char ch;
147 int ret;
148 int i;
149
150 line = strtok_r(file, "\n", &next);
151 while (line) {
152 item = malloc_or_die(sizeof(*item));
153 item->mod = NULL;
154 ret = sscanf(line, "%as %c %as\t[%as",
155 (float *)(void *)&addr_str, /* workaround gcc warning */
156 &ch,
157 (float *)(void *)&item->func,
158 (float *)(void *)&item->mod);
159 item->addr = strtoull(addr_str, NULL, 16);
160 free(addr_str);
161
162 /* truncate the extra ']' */
163 if (item->mod)
164 item->mod[strlen(item->mod) - 1] = 0;
165
166
167 item->next = list;
168 list = item;
169 line = strtok_r(NULL, "\n", &next);
170 func_count++;
171 }
172
173 func_list = malloc_or_die(sizeof(*func_list) * func_count + 1);
174
175 i = 0;
176 while (list) {
177 func_list[i].func = list->func;
178 func_list[i].addr = list->addr;
179 func_list[i].mod = list->mod;
180 i++;
181 item = list;
182 list = list->next;
183 free(item);
184 }
185
186 qsort(func_list, func_count, sizeof(*func_list), func_cmp);
187
188 /*
189 * Add a special record at the end.
190 */
191 func_list[func_count].func = NULL;
192 func_list[func_count].addr = 0;
193 func_list[func_count].mod = NULL;
194}
195
196/*
197 * We are searching for a record in between, not an exact
198 * match.
199 */
200static int func_bcmp(const void *a, const void *b)
201{
202 const struct func_map *fa = a;
203 const struct func_map *fb = b;
204
205 if ((fa->addr == fb->addr) ||
206
207 (fa->addr > fb->addr &&
208 fa->addr < (fb+1)->addr))
209 return 0;
210
211 if (fa->addr < fb->addr)
212 return -1;
213
214 return 1;
215}
216
217static struct func_map *find_func(unsigned long long addr)
218{
219 struct func_map *func;
220 struct func_map key;
221
222 key.addr = addr;
223
224 func = bsearch(&key, func_list, func_count, sizeof(*func_list),
225 func_bcmp);
226
227 return func;
228}
229
230void print_funcs(void)
231{
232 int i;
233
234 for (i = 0; i < (int)func_count; i++) {
235 printf("%016llx %s",
236 func_list[i].addr,
237 func_list[i].func);
238 if (func_list[i].mod)
239 printf(" [%s]\n", func_list[i].mod);
240 else
241 printf("\n");
242 }
243}
244
245static struct printk_map {
246 unsigned long long addr;
247 char *printk;
248} *printk_list;
249static unsigned int printk_count;
250
251static int printk_cmp(const void *a, const void *b)
252{
253 const struct func_map *fa = a;
254 const struct func_map *fb = b;
255
256 if (fa->addr < fb->addr)
257 return -1;
258 if (fa->addr > fb->addr)
259 return 1;
260
261 return 0;
262}
263
264static struct printk_map *find_printk(unsigned long long addr)
265{
266 struct printk_map *printk;
267 struct printk_map key;
268
269 key.addr = addr;
270
271 printk = bsearch(&key, printk_list, printk_count, sizeof(*printk_list),
272 printk_cmp);
273
274 return printk;
275}
276
277void parse_ftrace_printk(char *file, unsigned int size __unused)
278{
279 struct printk_list {
280 struct printk_list *next;
281 unsigned long long addr;
282 char *printk;
283 } *list = NULL, *item;
284 char *line;
285 char *next = NULL;
286 char *addr_str;
287 int ret;
288 int i;
289
290 line = strtok_r(file, "\n", &next);
291 while (line) {
292 item = malloc_or_die(sizeof(*item));
293 ret = sscanf(line, "%as : %as",
294 (float *)(void *)&addr_str, /* workaround gcc warning */
295 (float *)(void *)&item->printk);
296 item->addr = strtoull(addr_str, NULL, 16);
297 free(addr_str);
298
299 item->next = list;
300 list = item;
301 line = strtok_r(NULL, "\n", &next);
302 printk_count++;
303 }
304
305 printk_list = malloc_or_die(sizeof(*printk_list) * printk_count + 1);
306
307 i = 0;
308 while (list) {
309 printk_list[i].printk = list->printk;
310 printk_list[i].addr = list->addr;
311 i++;
312 item = list;
313 list = list->next;
314 free(item);
315 }
316
317 qsort(printk_list, printk_count, sizeof(*printk_list), printk_cmp);
318}
319
320void print_printk(void)
321{
322 int i;
323
324 for (i = 0; i < (int)printk_count; i++) {
325 printf("%016llx %s\n",
326 printk_list[i].addr,
327 printk_list[i].printk);
328 }
329}
330
331static struct event *alloc_event(void)
332{
333 struct event *event;
334
335 event = malloc_or_die(sizeof(*event));
336 memset(event, 0, sizeof(*event));
337
338 return event;
339}
340
341enum event_type {
342 EVENT_ERROR,
343 EVENT_NONE,
344 EVENT_SPACE,
345 EVENT_NEWLINE,
346 EVENT_OP,
347 EVENT_DELIM,
348 EVENT_ITEM,
349 EVENT_DQUOTE,
350 EVENT_SQUOTE,
351};
352
353static struct event *event_list;
354
355static void add_event(struct event *event)
356{
357 event->next = event_list;
358 event_list = event;
359}
360
361static int event_item_type(enum event_type type)
362{
363 switch (type) {
364 case EVENT_ITEM ... EVENT_SQUOTE:
365 return 1;
366 case EVENT_ERROR ... EVENT_DELIM:
367 default:
368 return 0;
369 }
370}
371
372static void free_arg(struct print_arg *arg)
373{
374 if (!arg)
375 return;
376
377 switch (arg->type) {
378 case PRINT_ATOM:
379 if (arg->atom.atom)
380 free(arg->atom.atom);
381 break;
382 case PRINT_NULL:
383 case PRINT_FIELD ... PRINT_OP:
384 default:
385 /* todo */
386 break;
387 }
388
389 free(arg);
390}
391
392static enum event_type get_type(int ch)
393{
394 if (ch == '\n')
395 return EVENT_NEWLINE;
396 if (isspace(ch))
397 return EVENT_SPACE;
398 if (isalnum(ch) || ch == '_')
399 return EVENT_ITEM;
400 if (ch == '\'')
401 return EVENT_SQUOTE;
402 if (ch == '"')
403 return EVENT_DQUOTE;
404 if (!isprint(ch))
405 return EVENT_NONE;
406 if (ch == '(' || ch == ')' || ch == ',')
407 return EVENT_DELIM;
408
409 return EVENT_OP;
410}
411
412static int __read_char(void)
413{
414 if (input_buf_ptr >= input_buf_siz)
415 return -1;
416
417 return input_buf[input_buf_ptr++];
418}
419
420static int __peek_char(void)
421{
422 if (input_buf_ptr >= input_buf_siz)
423 return -1;
424
425 return input_buf[input_buf_ptr];
426}
427
428static enum event_type __read_token(char **tok)
429{
430 char buf[BUFSIZ];
431 int ch, last_ch, quote_ch, next_ch;
432 int i = 0;
433 int tok_size = 0;
434 enum event_type type;
435
436 *tok = NULL;
437
438
439 ch = __read_char();
440 if (ch < 0)
441 return EVENT_NONE;
442
443 type = get_type(ch);
444 if (type == EVENT_NONE)
445 return type;
446
447 buf[i++] = ch;
448
449 switch (type) {
450 case EVENT_NEWLINE:
451 case EVENT_DELIM:
452 *tok = malloc_or_die(2);
453 (*tok)[0] = ch;
454 (*tok)[1] = 0;
455 return type;
456
457 case EVENT_OP:
458 switch (ch) {
459 case '-':
460 next_ch = __peek_char();
461 if (next_ch == '>') {
462 buf[i++] = __read_char();
463 break;
464 }
465 /* fall through */
466 case '+':
467 case '|':
468 case '&':
469 case '>':
470 case '<':
471 last_ch = ch;
472 ch = __peek_char();
473 if (ch != last_ch)
474 goto test_equal;
475 buf[i++] = __read_char();
476 switch (last_ch) {
477 case '>':
478 case '<':
479 goto test_equal;
480 default:
481 break;
482 }
483 break;
484 case '!':
485 case '=':
486 goto test_equal;
487 default: /* what should we do instead? */
488 break;
489 }
490 buf[i] = 0;
491 *tok = strdup(buf);
492 return type;
493
494 test_equal:
495 ch = __peek_char();
496 if (ch == '=')
497 buf[i++] = __read_char();
498 break;
499
500 case EVENT_DQUOTE:
501 case EVENT_SQUOTE:
502 /* don't keep quotes */
503 i--;
504 quote_ch = ch;
505 last_ch = 0;
506 do {
507 if (i == (BUFSIZ - 1)) {
508 buf[i] = 0;
509 if (*tok) {
510 *tok = realloc(*tok, tok_size + BUFSIZ);
511 if (!*tok)
512 return EVENT_NONE;
513 strcat(*tok, buf);
514 } else
515 *tok = strdup(buf);
516
517 if (!*tok)
518 return EVENT_NONE;
519 tok_size += BUFSIZ;
520 i = 0;
521 }
522 last_ch = ch;
523 ch = __read_char();
524 buf[i++] = ch;
525 } while (ch != quote_ch && last_ch != '\\');
526 /* remove the last quote */
527 i--;
528 goto out;
529
530 case EVENT_ERROR ... EVENT_SPACE:
531 case EVENT_ITEM:
532 default:
533 break;
534 }
535
536 while (get_type(__peek_char()) == type) {
537 if (i == (BUFSIZ - 1)) {
538 buf[i] = 0;
539 if (*tok) {
540 *tok = realloc(*tok, tok_size + BUFSIZ);
541 if (!*tok)
542 return EVENT_NONE;
543 strcat(*tok, buf);
544 } else
545 *tok = strdup(buf);
546
547 if (!*tok)
548 return EVENT_NONE;
549 tok_size += BUFSIZ;
550 i = 0;
551 }
552 ch = __read_char();
553 buf[i++] = ch;
554 }
555
556 out:
557 buf[i] = 0;
558 if (*tok) {
559 *tok = realloc(*tok, tok_size + i);
560 if (!*tok)
561 return EVENT_NONE;
562 strcat(*tok, buf);
563 } else
564 *tok = strdup(buf);
565 if (!*tok)
566 return EVENT_NONE;
567
568 return type;
569}
570
571static void free_token(char *tok)
572{
573 if (tok)
574 free(tok);
575}
576
577static enum event_type read_token(char **tok)
578{
579 enum event_type type;
580
581 for (;;) {
582 type = __read_token(tok);
583 if (type != EVENT_SPACE)
584 return type;
585
586 free_token(*tok);
587 }
588
589 /* not reached */
590 return EVENT_NONE;
591}
592
593/* no newline */
594static enum event_type read_token_item(char **tok)
595{
596 enum event_type type;
597
598 for (;;) {
599 type = __read_token(tok);
600 if (type != EVENT_SPACE && type != EVENT_NEWLINE)
601 return type;
602
603 free_token(*tok);
604 }
605
606 /* not reached */
607 return EVENT_NONE;
608}
609
610static int test_type(enum event_type type, enum event_type expect)
611{
612 if (type != expect) {
613 die("Error: expected type %d but read %d",
614 expect, type);
615 return -1;
616 }
617 return 0;
618}
619
620static int test_type_token(enum event_type type, char *token,
621 enum event_type expect, char *expect_tok)
622{
623 if (type != expect) {
624 die("Error: expected type %d but read %d",
625 expect, type);
626 return -1;
627 }
628
629 if (strcmp(token, expect_tok) != 0) {
630 die("Error: expected '%s' but read '%s'",
631 expect_tok, token);
632 return -1;
633 }
634 return 0;
635}
636
637static int __read_expect_type(enum event_type expect, char **tok, int newline_ok)
638{
639 enum event_type type;
640
641 if (newline_ok)
642 type = read_token(tok);
643 else
644 type = read_token_item(tok);
645 return test_type(type, expect);
646}
647
648static int read_expect_type(enum event_type expect, char **tok)
649{
650 return __read_expect_type(expect, tok, 1);
651}
652
653static int __read_expected(enum event_type expect, char *str, int newline_ok)
654{
655 enum event_type type;
656 char *token;
657 int ret;
658
659 if (newline_ok)
660 type = read_token(&token);
661 else
662 type = read_token_item(&token);
663
664 ret = test_type_token(type, token, expect, str);
665
666 free_token(token);
667
668 return 0;
669}
670
671static int read_expected(enum event_type expect, char *str)
672{
673 return __read_expected(expect, str, 1);
674}
675
676static int read_expected_item(enum event_type expect, char *str)
677{
678 return __read_expected(expect, str, 0);
679}
680
681static char *event_read_name(void)
682{
683 char *token;
684
685 if (read_expected(EVENT_ITEM, (char *)"name") < 0)
686 return NULL;
687
688 if (read_expected(EVENT_OP, (char *)":") < 0)
689 return NULL;
690
691 if (read_expect_type(EVENT_ITEM, &token) < 0)
692 goto fail;
693
694 return token;
695
696 fail:
697 free_token(token);
698 return NULL;
699}
700
701static int event_read_id(void)
702{
703 char *token;
704 int id;
705
706 if (read_expected_item(EVENT_ITEM, (char *)"ID") < 0)
707 return -1;
708
709 if (read_expected(EVENT_OP, (char *)":") < 0)
710 return -1;
711
712 if (read_expect_type(EVENT_ITEM, &token) < 0)
713 goto fail;
714
715 id = strtoul(token, NULL, 0);
716 free_token(token);
717 return id;
718
719 fail:
720 free_token(token);
721 return -1;
722}
723
724static int event_read_fields(struct event *event, struct format_field **fields)
725{
726 struct format_field *field = NULL;
727 enum event_type type;
728 char *token;
729 char *last_token;
730 int count = 0;
731
732 do {
733 type = read_token(&token);
734 if (type == EVENT_NEWLINE) {
735 free_token(token);
736 return count;
737 }
738
739 count++;
740
741 if (test_type_token(type, token, EVENT_ITEM, (char *)"field"))
742 goto fail;
743 free_token(token);
744
745 type = read_token(&token);
746 /*
747 * The ftrace fields may still use the "special" name.
748 * Just ignore it.
749 */
750 if (event->flags & EVENT_FL_ISFTRACE &&
751 type == EVENT_ITEM && strcmp(token, "special") == 0) {
752 free_token(token);
753 type = read_token(&token);
754 }
755
756 if (test_type_token(type, token, EVENT_OP, (char *)":") < 0)
757 return -1;
758
759 if (read_expect_type(EVENT_ITEM, &token) < 0)
760 goto fail;
761
762 last_token = token;
763
764 field = malloc_or_die(sizeof(*field));
765 memset(field, 0, sizeof(*field));
766
767 /* read the rest of the type */
768 for (;;) {
769 type = read_token(&token);
770 if (type == EVENT_ITEM ||
771 (type == EVENT_OP && strcmp(token, "*") == 0) ||
772 /*
773 * Some of the ftrace fields are broken and have
774 * an illegal "." in them.
775 */
776 (event->flags & EVENT_FL_ISFTRACE &&
777 type == EVENT_OP && strcmp(token, ".") == 0)) {
778
779 if (strcmp(token, "*") == 0)
780 field->flags |= FIELD_IS_POINTER;
781
782 if (field->type) {
783 field->type = realloc(field->type,
784 strlen(field->type) +
785 strlen(last_token) + 2);
786 strcat(field->type, " ");
787 strcat(field->type, last_token);
788 } else
789 field->type = last_token;
790 last_token = token;
791 continue;
792 }
793
794 break;
795 }
796
797 if (!field->type) {
798 die("no type found");
799 goto fail;
800 }
801 field->name = last_token;
802
803 if (test_type(type, EVENT_OP))
804 goto fail;
805
806 if (strcmp(token, "[") == 0) {
807 enum event_type last_type = type;
808 char *brackets = token;
809 int len;
810
811 field->flags |= FIELD_IS_ARRAY;
812
813 type = read_token(&token);
814 while (strcmp(token, "]") != 0) {
815 if (last_type == EVENT_ITEM &&
816 type == EVENT_ITEM)
817 len = 2;
818 else
819 len = 1;
820 last_type = type;
821
822 brackets = realloc(brackets,
823 strlen(brackets) +
824 strlen(token) + len);
825 if (len == 2)
826 strcat(brackets, " ");
827 strcat(brackets, token);
828 free_token(token);
829 type = read_token(&token);
830 if (type == EVENT_NONE) {
831 die("failed to find token");
832 goto fail;
833 }
834 }
835
836 free_token(token);
837
838 brackets = realloc(brackets, strlen(brackets) + 2);
839 strcat(brackets, "]");
840
841 /* add brackets to type */
842
843 type = read_token(&token);
844 /*
845 * If the next token is not an OP, then it is of
846 * the format: type [] item;
847 */
848 if (type == EVENT_ITEM) {
849 field->type = realloc(field->type,
850 strlen(field->type) +
851 strlen(field->name) +
852 strlen(brackets) + 2);
853 strcat(field->type, " ");
854 strcat(field->type, field->name);
855 free_token(field->name);
856 strcat(field->type, brackets);
857 field->name = token;
858 type = read_token(&token);
859 } else {
860 field->type = realloc(field->type,
861 strlen(field->type) +
862 strlen(brackets) + 1);
863 strcat(field->type, brackets);
864 }
865 free(brackets);
866 }
867
868 if (test_type_token(type, token, EVENT_OP, (char *)";"))
869 goto fail;
870 free_token(token);
871
872 if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
873 goto fail_expect;
874
875 if (read_expected(EVENT_OP, (char *)":") < 0)
876 goto fail_expect;
877
878 if (read_expect_type(EVENT_ITEM, &token))
879 goto fail;
880 field->offset = strtoul(token, NULL, 0);
881 free_token(token);
882
883 if (read_expected(EVENT_OP, (char *)";") < 0)
884 goto fail_expect;
885
886 if (read_expected(EVENT_ITEM, (char *)"size") < 0)
887 goto fail_expect;
888
889 if (read_expected(EVENT_OP, (char *)":") < 0)
890 goto fail_expect;
891
892 if (read_expect_type(EVENT_ITEM, &token))
893 goto fail;
894 field->size = strtoul(token, NULL, 0);
895 free_token(token);
896
897 if (read_expected(EVENT_OP, (char *)";") < 0)
898 goto fail_expect;
899
900 if (read_expect_type(EVENT_NEWLINE, &token) < 0)
901 goto fail;
902 free_token(token);
903
904 *fields = field;
905 fields = &field->next;
906
907 } while (1);
908
909 return 0;
910
911fail:
912 free_token(token);
913fail_expect:
914 if (field)
915 free(field);
916 return -1;
917}
918
919static int event_read_format(struct event *event)
920{
921 char *token;
922 int ret;
923
924 if (read_expected_item(EVENT_ITEM, (char *)"format") < 0)
925 return -1;
926
927 if (read_expected(EVENT_OP, (char *)":") < 0)
928 return -1;
929
930 if (read_expect_type(EVENT_NEWLINE, &token))
931 goto fail;
932 free_token(token);
933
934 ret = event_read_fields(event, &event->format.common_fields);
935 if (ret < 0)
936 return ret;
937 event->format.nr_common = ret;
938
939 ret = event_read_fields(event, &event->format.fields);
940 if (ret < 0)
941 return ret;
942 event->format.nr_fields = ret;
943
944 return 0;
945
946 fail:
947 free_token(token);
948 return -1;
949}
950
951enum event_type
952process_arg_token(struct event *event, struct print_arg *arg,
953 char **tok, enum event_type type);
954
955static enum event_type
956process_arg(struct event *event, struct print_arg *arg, char **tok)
957{
958 enum event_type type;
959 char *token;
960
961 type = read_token(&token);
962 *tok = token;
963
964 return process_arg_token(event, arg, tok, type);
965}
966
967static enum event_type
968process_cond(struct event *event, struct print_arg *top, char **tok)
969{
970 struct print_arg *arg, *left, *right;
971 enum event_type type;
972 char *token = NULL;
973
974 arg = malloc_or_die(sizeof(*arg));
975 memset(arg, 0, sizeof(*arg));
976
977 left = malloc_or_die(sizeof(*left));
978
979 right = malloc_or_die(sizeof(*right));
980
981 arg->type = PRINT_OP;
982 arg->op.left = left;
983 arg->op.right = right;
984
985 *tok = NULL;
986 type = process_arg(event, left, &token);
987 if (test_type_token(type, token, EVENT_OP, (char *)":"))
988 goto out_free;
989
990 arg->op.op = token;
991
992 type = process_arg(event, right, &token);
993
994 top->op.right = arg;
995
996 *tok = token;
997 return type;
998
999out_free:
1000 free_token(*tok);
1001 free(right);
1002 free(left);
1003 free_arg(arg);
1004 return EVENT_ERROR;
1005}
1006
1007static int get_op_prio(char *op)
1008{
1009 if (!op[1]) {
1010 switch (op[0]) {
1011 case '*':
1012 case '/':
1013 case '%':
1014 return 6;
1015 case '+':
1016 case '-':
1017 return 7;
1018 /* '>>' and '<<' are 8 */
1019 case '<':
1020 case '>':
1021 return 9;
1022 /* '==' and '!=' are 10 */
1023 case '&':
1024 return 11;
1025 case '^':
1026 return 12;
1027 case '|':
1028 return 13;
1029 case '?':
1030 return 16;
1031 default:
1032 die("unknown op '%c'", op[0]);
1033 return -1;
1034 }
1035 } else {
1036 if (strcmp(op, "++") == 0 ||
1037 strcmp(op, "--") == 0) {
1038 return 3;
1039 } else if (strcmp(op, ">>") == 0 ||
1040 strcmp(op, "<<") == 0) {
1041 return 8;
1042 } else if (strcmp(op, ">=") == 0 ||
1043 strcmp(op, "<=") == 0) {
1044 return 9;
1045 } else if (strcmp(op, "==") == 0 ||
1046 strcmp(op, "!=") == 0) {
1047 return 10;
1048 } else if (strcmp(op, "&&") == 0) {
1049 return 14;
1050 } else if (strcmp(op, "||") == 0) {
1051 return 15;
1052 } else {
1053 die("unknown op '%s'", op);
1054 return -1;
1055 }
1056 }
1057}
1058
1059static void set_op_prio(struct print_arg *arg)
1060{
1061
1062 /* single ops are the greatest */
1063 if (!arg->op.left || arg->op.left->type == PRINT_NULL) {
1064 arg->op.prio = 0;
1065 return;
1066 }
1067
1068 arg->op.prio = get_op_prio(arg->op.op);
1069}
1070
1071static enum event_type
1072process_op(struct event *event, struct print_arg *arg, char **tok)
1073{
1074 struct print_arg *left, *right = NULL;
1075 enum event_type type;
1076 char *token;
1077
1078 /* the op is passed in via tok */
1079 token = *tok;
1080
1081 if (arg->type == PRINT_OP && !arg->op.left) {
1082 /* handle single op */
1083 if (token[1]) {
1084 die("bad op token %s", token);
1085 return EVENT_ERROR;
1086 }
1087 switch (token[0]) {
1088 case '!':
1089 case '+':
1090 case '-':
1091 break;
1092 default:
1093 die("bad op token %s", token);
1094 return EVENT_ERROR;
1095 }
1096
1097 /* make an empty left */
1098 left = malloc_or_die(sizeof(*left));
1099 left->type = PRINT_NULL;
1100 arg->op.left = left;
1101
1102 right = malloc_or_die(sizeof(*right));
1103 arg->op.right = right;
1104
1105 type = process_arg(event, right, tok);
1106
1107 } else if (strcmp(token, "?") == 0) {
1108
1109 left = malloc_or_die(sizeof(*left));
1110 /* copy the top arg to the left */
1111 *left = *arg;
1112
1113 arg->type = PRINT_OP;
1114 arg->op.op = token;
1115 arg->op.left = left;
1116 arg->op.prio = 0;
1117
1118 type = process_cond(event, arg, tok);
1119
1120 } else if (strcmp(token, ">>") == 0 ||
1121 strcmp(token, "<<") == 0 ||
1122 strcmp(token, "&") == 0 ||
1123 strcmp(token, "|") == 0 ||
1124 strcmp(token, "&&") == 0 ||
1125 strcmp(token, "||") == 0 ||
1126 strcmp(token, "-") == 0 ||
1127 strcmp(token, "+") == 0 ||
1128 strcmp(token, "*") == 0 ||
1129 strcmp(token, "^") == 0 ||
1130 strcmp(token, "/") == 0 ||
1131 strcmp(token, "==") == 0 ||
1132 strcmp(token, "!=") == 0) {
1133
1134 left = malloc_or_die(sizeof(*left));
1135
1136 /* copy the top arg to the left */
1137 *left = *arg;
1138
1139 arg->type = PRINT_OP;
1140 arg->op.op = token;
1141 arg->op.left = left;
1142
1143 set_op_prio(arg);
1144
1145 right = malloc_or_die(sizeof(*right));
1146
1147 type = process_arg(event, right, tok);
1148
1149 arg->op.right = right;
1150
1151 } else {
1152 die("unknown op '%s'", token);
1153 /* the arg is now the left side */
1154 return EVENT_NONE;
1155 }
1156
1157
1158 if (type == EVENT_OP) {
1159 int prio;
1160
1161 /* higher prios need to be closer to the root */
1162 prio = get_op_prio(*tok);
1163
1164 if (prio > arg->op.prio)
1165 return process_op(event, arg, tok);
1166
1167 return process_op(event, right, tok);
1168 }
1169
1170 return type;
1171}
1172
1173static enum event_type
1174process_entry(struct event *event __unused, struct print_arg *arg,
1175 char **tok)
1176{
1177 enum event_type type;
1178 char *field;
1179 char *token;
1180
1181 if (read_expected(EVENT_OP, (char *)"->") < 0)
1182 return EVENT_ERROR;
1183
1184 if (read_expect_type(EVENT_ITEM, &token) < 0)
1185 goto fail;
1186 field = token;
1187
1188 arg->type = PRINT_FIELD;
1189 arg->field.name = field;
1190
1191 type = read_token(&token);
1192 *tok = token;
1193
1194 return type;
1195
1196fail:
1197 free_token(token);
1198 return EVENT_ERROR;
1199}
1200
1201static char *arg_eval (struct print_arg *arg);
1202
1203static long long arg_num_eval(struct print_arg *arg)
1204{
1205 long long left, right;
1206 long long val = 0;
1207
1208 switch (arg->type) {
1209 case PRINT_ATOM:
1210 val = strtoll(arg->atom.atom, NULL, 0);
1211 break;
1212 case PRINT_TYPE:
1213 val = arg_num_eval(arg->typecast.item);
1214 break;
1215 case PRINT_OP:
1216 switch (arg->op.op[0]) {
1217 case '|':
1218 left = arg_num_eval(arg->op.left);
1219 right = arg_num_eval(arg->op.right);
1220 if (arg->op.op[1])
1221 val = left || right;
1222 else
1223 val = left | right;
1224 break;
1225 case '&':
1226 left = arg_num_eval(arg->op.left);
1227 right = arg_num_eval(arg->op.right);
1228 if (arg->op.op[1])
1229 val = left && right;
1230 else
1231 val = left & right;
1232 break;
1233 case '<':
1234 left = arg_num_eval(arg->op.left);
1235 right = arg_num_eval(arg->op.right);
1236 switch (arg->op.op[1]) {
1237 case 0:
1238 val = left < right;
1239 break;
1240 case '<':
1241 val = left << right;
1242 break;
1243 case '=':
1244 val = left <= right;
1245 break;
1246 default:
1247 die("unknown op '%s'", arg->op.op);
1248 }
1249 break;
1250 case '>':
1251 left = arg_num_eval(arg->op.left);
1252 right = arg_num_eval(arg->op.right);
1253 switch (arg->op.op[1]) {
1254 case 0:
1255 val = left > right;
1256 break;
1257 case '>':
1258 val = left >> right;
1259 break;
1260 case '=':
1261 val = left >= right;
1262 break;
1263 default:
1264 die("unknown op '%s'", arg->op.op);
1265 }
1266 break;
1267 case '=':
1268 left = arg_num_eval(arg->op.left);
1269 right = arg_num_eval(arg->op.right);
1270
1271 if (arg->op.op[1] != '=')
1272 die("unknown op '%s'", arg->op.op);
1273
1274 val = left == right;
1275 break;
1276 case '!':
1277 left = arg_num_eval(arg->op.left);
1278 right = arg_num_eval(arg->op.right);
1279
1280 switch (arg->op.op[1]) {
1281 case '=':
1282 val = left != right;
1283 break;
1284 default:
1285 die("unknown op '%s'", arg->op.op);
1286 }
1287 break;
1288 default:
1289 die("unknown op '%s'", arg->op.op);
1290 }
1291 break;
1292
1293 case PRINT_NULL:
1294 case PRINT_FIELD ... PRINT_SYMBOL:
1295 case PRINT_STRING:
1296 default:
1297 die("invalid eval type %d", arg->type);
1298
1299 }
1300 return val;
1301}
1302
1303static char *arg_eval (struct print_arg *arg)
1304{
1305 long long val;
1306 static char buf[20];
1307
1308 switch (arg->type) {
1309 case PRINT_ATOM:
1310 return arg->atom.atom;
1311 case PRINT_TYPE:
1312 return arg_eval(arg->typecast.item);
1313 case PRINT_OP:
1314 val = arg_num_eval(arg);
1315 sprintf(buf, "%lld", val);
1316 return buf;
1317
1318 case PRINT_NULL:
1319 case PRINT_FIELD ... PRINT_SYMBOL:
1320 case PRINT_STRING:
1321 default:
1322 die("invalid eval type %d", arg->type);
1323 break;
1324 }
1325
1326 return NULL;
1327}
1328
1329static enum event_type
1330process_fields(struct event *event, struct print_flag_sym **list, char **tok)
1331{
1332 enum event_type type;
1333 struct print_arg *arg = NULL;
1334 struct print_flag_sym *field;
1335 char *token = NULL;
1336 char *value;
1337
1338 do {
1339 free_token(token);
1340 type = read_token_item(&token);
1341 if (test_type_token(type, token, EVENT_OP, (char *)"{"))
1342 break;
1343
1344 arg = malloc_or_die(sizeof(*arg));
1345
1346 free_token(token);
1347 type = process_arg(event, arg, &token);
1348 if (test_type_token(type, token, EVENT_DELIM, (char *)","))
1349 goto out_free;
1350
1351 field = malloc_or_die(sizeof(*field));
1352 memset(field, 0, sizeof(field));
1353
1354 value = arg_eval(arg);
1355 field->value = strdup(value);
1356
1357 free_token(token);
1358 type = process_arg(event, arg, &token);
1359 if (test_type_token(type, token, EVENT_OP, (char *)"}"))
1360 goto out_free;
1361
1362 value = arg_eval(arg);
1363 field->str = strdup(value);
1364 free_arg(arg);
1365 arg = NULL;
1366
1367 *list = field;
1368 list = &field->next;
1369
1370 free_token(token);
1371 type = read_token_item(&token);
1372 } while (type == EVENT_DELIM && strcmp(token, ",") == 0);
1373
1374 *tok = token;
1375 return type;
1376
1377out_free:
1378 free_arg(arg);
1379 free_token(token);
1380
1381 return EVENT_ERROR;
1382}
1383
1384static enum event_type
1385process_flags(struct event *event, struct print_arg *arg, char **tok)
1386{
1387 struct print_arg *field;
1388 enum event_type type;
1389 char *token;
1390
1391 memset(arg, 0, sizeof(*arg));
1392 arg->type = PRINT_FLAGS;
1393
1394 if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
1395 return EVENT_ERROR;
1396
1397 field = malloc_or_die(sizeof(*field));
1398
1399 type = process_arg(event, field, &token);
1400 if (test_type_token(type, token, EVENT_DELIM, (char *)","))
1401 goto out_free;
1402
1403 arg->flags.field = field;
1404
1405 type = read_token_item(&token);
1406 if (event_item_type(type)) {
1407 arg->flags.delim = token;
1408 type = read_token_item(&token);
1409 }
1410
1411 if (test_type_token(type, token, EVENT_DELIM, (char *)","))
1412 goto out_free;
1413
1414 type = process_fields(event, &arg->flags.flags, &token);
1415 if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
1416 goto out_free;
1417
1418 free_token(token);
1419 type = read_token_item(tok);
1420 return type;
1421
1422out_free:
1423 free_token(token);
1424 return EVENT_ERROR;
1425}
1426
1427static enum event_type
1428process_symbols(struct event *event, struct print_arg *arg, char **tok)
1429{
1430 struct print_arg *field;
1431 enum event_type type;
1432 char *token;
1433
1434 memset(arg, 0, sizeof(*arg));
1435 arg->type = PRINT_SYMBOL;
1436
1437 if (read_expected_item(EVENT_DELIM, (char *)"(") < 0)
1438 return EVENT_ERROR;
1439
1440 field = malloc_or_die(sizeof(*field));
1441
1442 type = process_arg(event, field, &token);
1443 if (test_type_token(type, token, EVENT_DELIM, (char *)","))
1444 goto out_free;
1445
1446 arg->symbol.field = field;
1447
1448 type = process_fields(event, &arg->symbol.symbols, &token);
1449 if (test_type_token(type, token, EVENT_DELIM, (char *)")"))
1450 goto out_free;
1451
1452 free_token(token);
1453 type = read_token_item(tok);
1454 return type;
1455
1456out_free:
1457 free_token(token);
1458 return EVENT_ERROR;
1459}
1460
1461static enum event_type
1462process_paren(struct event *event, struct print_arg *arg, char **tok)
1463{
1464 struct print_arg *item_arg;
1465 enum event_type type;
1466 int ptr_cast = 0;
1467 char *token;
1468
1469 type = process_arg(event, arg, &token);
1470
1471 if (type == EVENT_ERROR)
1472 return EVENT_ERROR;
1473
1474 if (type == EVENT_OP) {
1475 /* handle the ptr casts */
1476 if (!strcmp(token, "*")) {
1477 /*
1478 * FIXME: should we zapp whitespaces before ')' ?
1479 * (may require a peek_token_item())
1480 */
1481 if (__peek_char() == ')') {
1482 ptr_cast = 1;
1483 free_token(token);
1484 type = read_token_item(&token);
1485 }
1486 }
1487 if (!ptr_cast) {
1488 type = process_op(event, arg, &token);
1489
1490 if (type == EVENT_ERROR)
1491 return EVENT_ERROR;
1492 }
1493 }
1494
1495 if (test_type_token(type, token, EVENT_DELIM, (char *)")")) {
1496 free_token(token);
1497 return EVENT_ERROR;
1498 }
1499
1500 free_token(token);
1501 type = read_token_item(&token);
1502
1503 /*
1504 * If the next token is an item or another open paren, then
1505 * this was a typecast.
1506 */
1507 if (event_item_type(type) ||
1508 (type == EVENT_DELIM && strcmp(token, "(") == 0)) {
1509
1510 /* make this a typecast and contine */
1511
1512 /* prevous must be an atom */
1513 if (arg->type != PRINT_ATOM)
1514 die("previous needed to be PRINT_ATOM");
1515
1516 item_arg = malloc_or_die(sizeof(*item_arg));
1517
1518 arg->type = PRINT_TYPE;
1519 if (ptr_cast) {
1520 char *old = arg->atom.atom;
1521
1522 arg->atom.atom = malloc_or_die(strlen(old + 3));
1523 sprintf(arg->atom.atom, "%s *", old);
1524 free(old);
1525 }
1526 arg->typecast.type = arg->atom.atom;
1527 arg->typecast.item = item_arg;
1528 type = process_arg_token(event, item_arg, &token, type);
1529
1530 }
1531
1532 *tok = token;
1533 return type;
1534}
1535
1536
1537static enum event_type
1538process_str(struct event *event __unused, struct print_arg *arg, char **tok)
1539{
1540 enum event_type type;
1541 char *token;
1542
1543 if (read_expected(EVENT_DELIM, (char *)"(") < 0)
1544 return EVENT_ERROR;
1545
1546 if (read_expect_type(EVENT_ITEM, &token) < 0)
1547 goto fail;
1548
1549 arg->type = PRINT_STRING;
1550 arg->string.string = token;
1551 arg->string.offset = -1;
1552
1553 if (read_expected(EVENT_DELIM, (char *)")") < 0)
1554 return EVENT_ERROR;
1555
1556 type = read_token(&token);
1557 *tok = token;
1558
1559 return type;
1560fail:
1561 free_token(token);
1562 return EVENT_ERROR;
1563}
1564
1565enum event_type
1566process_arg_token(struct event *event, struct print_arg *arg,
1567 char **tok, enum event_type type)
1568{
1569 char *token;
1570 char *atom;
1571
1572 token = *tok;
1573
1574 switch (type) {
1575 case EVENT_ITEM:
1576 if (strcmp(token, "REC") == 0) {
1577 free_token(token);
1578 type = process_entry(event, arg, &token);
1579 } else if (strcmp(token, "__print_flags") == 0) {
1580 free_token(token);
1581 type = process_flags(event, arg, &token);
1582 } else if (strcmp(token, "__print_symbolic") == 0) {
1583 free_token(token);
1584 type = process_symbols(event, arg, &token);
1585 } else if (strcmp(token, "__get_str") == 0) {
1586 free_token(token);
1587 type = process_str(event, arg, &token);
1588 } else {
1589 atom = token;
1590 /* test the next token */
1591 type = read_token_item(&token);
1592
1593 /* atoms can be more than one token long */
1594 while (type == EVENT_ITEM) {
1595 atom = realloc(atom, strlen(atom) + strlen(token) + 2);
1596 strcat(atom, " ");
1597 strcat(atom, token);
1598 free_token(token);
1599 type = read_token_item(&token);
1600 }
1601
1602 /* todo, test for function */
1603
1604 arg->type = PRINT_ATOM;
1605 arg->atom.atom = atom;
1606 }
1607 break;
1608 case EVENT_DQUOTE:
1609 case EVENT_SQUOTE:
1610 arg->type = PRINT_ATOM;
1611 arg->atom.atom = token;
1612 type = read_token_item(&token);
1613 break;
1614 case EVENT_DELIM:
1615 if (strcmp(token, "(") == 0) {
1616 free_token(token);
1617 type = process_paren(event, arg, &token);
1618 break;
1619 }
1620 case EVENT_OP:
1621 /* handle single ops */
1622 arg->type = PRINT_OP;
1623 arg->op.op = token;
1624 arg->op.left = NULL;
1625 type = process_op(event, arg, &token);
1626
1627 break;
1628
1629 case EVENT_ERROR ... EVENT_NEWLINE:
1630 default:
1631 die("unexpected type %d", type);
1632 }
1633 *tok = token;
1634
1635 return type;
1636}
1637
1638static int event_read_print_args(struct event *event, struct print_arg **list)
1639{
1640 enum event_type type;
1641 struct print_arg *arg;
1642 char *token;
1643 int args = 0;
1644
1645 do {
1646 arg = malloc_or_die(sizeof(*arg));
1647 memset(arg, 0, sizeof(*arg));
1648
1649 type = process_arg(event, arg, &token);
1650
1651 if (type == EVENT_ERROR) {
1652 free_arg(arg);
1653 return -1;
1654 }
1655
1656 *list = arg;
1657 args++;
1658
1659 if (type == EVENT_OP) {
1660 type = process_op(event, arg, &token);
1661 list = &arg->next;
1662 continue;
1663 }
1664
1665 if (type == EVENT_DELIM && strcmp(token, ",") == 0) {
1666 free_token(token);
1667 *list = arg;
1668 list = &arg->next;
1669 continue;
1670 }
1671 break;
1672 } while (type != EVENT_NONE);
1673
1674 if (type != EVENT_NONE)
1675 free_token(token);
1676
1677 return args;
1678}
1679
1680static int event_read_print(struct event *event)
1681{
1682 enum event_type type;
1683 char *token;
1684 int ret;
1685
1686 if (read_expected_item(EVENT_ITEM, (char *)"print") < 0)
1687 return -1;
1688
1689 if (read_expected(EVENT_ITEM, (char *)"fmt") < 0)
1690 return -1;
1691
1692 if (read_expected(EVENT_OP, (char *)":") < 0)
1693 return -1;
1694
1695 if (read_expect_type(EVENT_DQUOTE, &token) < 0)
1696 goto fail;
1697
1698 event->print_fmt.format = token;
1699 event->print_fmt.args = NULL;
1700
1701 /* ok to have no arg */
1702 type = read_token_item(&token);
1703
1704 if (type == EVENT_NONE)
1705 return 0;
1706
1707 if (test_type_token(type, token, EVENT_DELIM, (char *)","))
1708 goto fail;
1709
1710 free_token(token);
1711
1712 ret = event_read_print_args(event, &event->print_fmt.args);
1713 if (ret < 0)
1714 return -1;
1715
1716 return 0;
1717
1718 fail:
1719 free_token(token);
1720 return -1;
1721}
1722
1723static struct format_field *
1724find_common_field(struct event *event, const char *name)
1725{
1726 struct format_field *format;
1727
1728 for (format = event->format.common_fields;
1729 format; format = format->next) {
1730 if (strcmp(format->name, name) == 0)
1731 break;
1732 }
1733
1734 return format;
1735}
1736
1737static struct format_field *
1738find_field(struct event *event, const char *name)
1739{
1740 struct format_field *format;
1741
1742 for (format = event->format.fields;
1743 format; format = format->next) {
1744 if (strcmp(format->name, name) == 0)
1745 break;
1746 }
1747
1748 return format;
1749}
1750
1751static struct format_field *
1752find_any_field(struct event *event, const char *name)
1753{
1754 struct format_field *format;
1755
1756 format = find_common_field(event, name);
1757 if (format)
1758 return format;
1759 return find_field(event, name);
1760}
1761
1762static unsigned long long read_size(void *ptr, int size)
1763{
1764 switch (size) {
1765 case 1:
1766 return *(unsigned char *)ptr;
1767 case 2:
1768 return data2host2(ptr);
1769 case 4:
1770 return data2host4(ptr);
1771 case 8:
1772 return data2host8(ptr);
1773 default:
1774 /* BUG! */
1775 return 0;
1776 }
1777}
1778
1779static int get_common_info(const char *type, int *offset, int *size)
1780{
1781 struct event *event;
1782 struct format_field *field;
1783
1784 /*
1785 * All events should have the same common elements.
1786 * Pick any event to find where the type is;
1787 */
1788 if (!event_list)
1789 die("no event_list!");
1790
1791 event = event_list;
1792 field = find_common_field(event, type);
1793 if (!field)
1794 die("field '%s' not found", type);
1795
1796 *offset = field->offset;
1797 *size = field->size;
1798
1799 return 0;
1800}
1801
1802static int parse_common_type(void *data)
1803{
1804 static int type_offset;
1805 static int type_size;
1806 int ret;
1807
1808 if (!type_size) {
1809 ret = get_common_info("common_type",
1810 &type_offset,
1811 &type_size);
1812 if (ret < 0)
1813 return ret;
1814 }
1815 return read_size(data + type_offset, type_size);
1816}
1817
1818static int parse_common_pid(void *data)
1819{
1820 static int pid_offset;
1821 static int pid_size;
1822 int ret;
1823
1824 if (!pid_size) {
1825 ret = get_common_info("common_pid",
1826 &pid_offset,
1827 &pid_size);
1828 if (ret < 0)
1829 return ret;
1830 }
1831
1832 return read_size(data + pid_offset, pid_size);
1833}
1834
1835static struct event *find_event(int id)
1836{
1837 struct event *event;
1838
1839 for (event = event_list; event; event = event->next) {
1840 if (event->id == id)
1841 break;
1842 }
1843 return event;
1844}
1845
1846static unsigned long long eval_num_arg(void *data, int size,
1847 struct event *event, struct print_arg *arg)
1848{
1849 unsigned long long val = 0;
1850 unsigned long long left, right;
1851
1852 switch (arg->type) {
1853 case PRINT_NULL:
1854 /* ?? */
1855 return 0;
1856 case PRINT_ATOM:
1857 return strtoull(arg->atom.atom, NULL, 0);
1858 case PRINT_FIELD:
1859 if (!arg->field.field) {
1860 arg->field.field = find_any_field(event, arg->field.name);
1861 if (!arg->field.field)
1862 die("field %s not found", arg->field.name);
1863 }
1864 /* must be a number */
1865 val = read_size(data + arg->field.field->offset,
1866 arg->field.field->size);
1867 break;
1868 case PRINT_FLAGS:
1869 case PRINT_SYMBOL:
1870 break;
1871 case PRINT_TYPE:
1872 return eval_num_arg(data, size, event, arg->typecast.item);
1873 case PRINT_STRING:
1874 return 0;
1875 break;
1876 case PRINT_OP:
1877 left = eval_num_arg(data, size, event, arg->op.left);
1878 right = eval_num_arg(data, size, event, arg->op.right);
1879 switch (arg->op.op[0]) {
1880 case '|':
1881 if (arg->op.op[1])
1882 val = left || right;
1883 else
1884 val = left | right;
1885 break;
1886 case '&':
1887 if (arg->op.op[1])
1888 val = left && right;
1889 else
1890 val = left & right;
1891 break;
1892 case '<':
1893 switch (arg->op.op[1]) {
1894 case 0:
1895 val = left < right;
1896 break;
1897 case '<':
1898 val = left << right;
1899 break;
1900 case '=':
1901 val = left <= right;
1902 break;
1903 default:
1904 die("unknown op '%s'", arg->op.op);
1905 }
1906 break;
1907 case '>':
1908 switch (arg->op.op[1]) {
1909 case 0:
1910 val = left > right;
1911 break;
1912 case '>':
1913 val = left >> right;
1914 break;
1915 case '=':
1916 val = left >= right;
1917 break;
1918 default:
1919 die("unknown op '%s'", arg->op.op);
1920 }
1921 break;
1922 case '=':
1923 if (arg->op.op[1] != '=')
1924 die("unknown op '%s'", arg->op.op);
1925 val = left == right;
1926 break;
1927 default:
1928 die("unknown op '%s'", arg->op.op);
1929 }
1930 break;
1931 default: /* not sure what to do there */
1932 return 0;
1933 }
1934 return val;
1935}
1936
1937struct flag {
1938 const char *name;
1939 unsigned long long value;
1940};
1941
1942static const struct flag flags[] = {
1943 { "HI_SOFTIRQ", 0 },
1944 { "TIMER_SOFTIRQ", 1 },
1945 { "NET_TX_SOFTIRQ", 2 },
1946 { "NET_RX_SOFTIRQ", 3 },
1947 { "BLOCK_SOFTIRQ", 4 },
1948 { "TASKLET_SOFTIRQ", 5 },
1949 { "SCHED_SOFTIRQ", 6 },
1950 { "HRTIMER_SOFTIRQ", 7 },
1951 { "RCU_SOFTIRQ", 8 },
1952
1953 { "HRTIMER_NORESTART", 0 },
1954 { "HRTIMER_RESTART", 1 },
1955};
1956
1957static unsigned long long eval_flag(const char *flag)
1958{
1959 int i;
1960
1961 /*
1962 * Some flags in the format files do not get converted.
1963 * If the flag is not numeric, see if it is something that
1964 * we already know about.
1965 */
1966 if (isdigit(flag[0]))
1967 return strtoull(flag, NULL, 0);
1968
1969 for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
1970 if (strcmp(flags[i].name, flag) == 0)
1971 return flags[i].value;
1972
1973 return 0;
1974}
1975
1976static void print_str_arg(void *data, int size,
1977 struct event *event, struct print_arg *arg)
1978{
1979 struct print_flag_sym *flag;
1980 unsigned long long val, fval;
1981 char *str;
1982 int print;
1983
1984 switch (arg->type) {
1985 case PRINT_NULL:
1986 /* ?? */
1987 return;
1988 case PRINT_ATOM:
1989 printf("%s", arg->atom.atom);
1990 return;
1991 case PRINT_FIELD:
1992 if (!arg->field.field) {
1993 arg->field.field = find_any_field(event, arg->field.name);
1994 if (!arg->field.field)
1995 die("field %s not found", arg->field.name);
1996 }
1997 str = malloc_or_die(arg->field.field->size + 1);
1998 memcpy(str, data + arg->field.field->offset,
1999 arg->field.field->size);
2000 str[arg->field.field->size] = 0;
2001 printf("%s", str);
2002 free(str);
2003 break;
2004 case PRINT_FLAGS:
2005 val = eval_num_arg(data, size, event, arg->flags.field);
2006 print = 0;
2007 for (flag = arg->flags.flags; flag; flag = flag->next) {
2008 fval = eval_flag(flag->value);
2009 if (!val && !fval) {
2010 printf("%s", flag->str);
2011 break;
2012 }
2013 if (fval && (val & fval) == fval) {
2014 if (print && arg->flags.delim)
2015 printf("%s", arg->flags.delim);
2016 printf("%s", flag->str);
2017 print = 1;
2018 val &= ~fval;
2019 }
2020 }
2021 break;
2022 case PRINT_SYMBOL:
2023 val = eval_num_arg(data, size, event, arg->symbol.field);
2024 for (flag = arg->symbol.symbols; flag; flag = flag->next) {
2025 fval = eval_flag(flag->value);
2026 if (val == fval) {
2027 printf("%s", flag->str);
2028 break;
2029 }
2030 }
2031 break;
2032
2033 case PRINT_TYPE:
2034 break;
2035 case PRINT_STRING: {
2036 int str_offset;
2037
2038 if (arg->string.offset == -1) {
2039 struct format_field *f;
2040
2041 f = find_any_field(event, arg->string.string);
2042 arg->string.offset = f->offset;
2043 }
2044 str_offset = *(int *)(data + arg->string.offset);
2045 str_offset &= 0xffff;
2046 printf("%s", ((char *)data) + str_offset);
2047 break;
2048 }
2049 case PRINT_OP:
2050 /*
2051 * The only op for string should be ? :
2052 */
2053 if (arg->op.op[0] != '?')
2054 return;
2055 val = eval_num_arg(data, size, event, arg->op.left);
2056 if (val)
2057 print_str_arg(data, size, event, arg->op.right->op.left);
2058 else
2059 print_str_arg(data, size, event, arg->op.right->op.right);
2060 break;
2061 default:
2062 /* well... */
2063 break;
2064 }
2065}
2066
2067static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event *event)
2068{
2069 static struct format_field *field, *ip_field;
2070 struct print_arg *args, *arg, **next;
2071 unsigned long long ip, val;
2072 char *ptr;
2073 void *bptr;
2074
2075 if (!field) {
2076 field = find_field(event, "buf");
2077 if (!field)
2078 die("can't find buffer field for binary printk");
2079 ip_field = find_field(event, "ip");
2080 if (!ip_field)
2081 die("can't find ip field for binary printk");
2082 }
2083
2084 ip = read_size(data + ip_field->offset, ip_field->size);
2085
2086 /*
2087 * The first arg is the IP pointer.
2088 */
2089 args = malloc_or_die(sizeof(*args));
2090 arg = args;
2091 arg->next = NULL;
2092 next = &arg->next;
2093
2094 arg->type = PRINT_ATOM;
2095 arg->atom.atom = malloc_or_die(32);
2096 sprintf(arg->atom.atom, "%lld", ip);
2097
2098 /* skip the first "%pf : " */
2099 for (ptr = fmt + 6, bptr = data + field->offset;
2100 bptr < data + size && *ptr; ptr++) {
2101 int ls = 0;
2102
2103 if (*ptr == '%') {
2104 process_again:
2105 ptr++;
2106 switch (*ptr) {
2107 case '%':
2108 break;
2109 case 'l':
2110 ls++;
2111 goto process_again;
2112 case 'L':
2113 ls = 2;
2114 goto process_again;
2115 case '0' ... '9':
2116 goto process_again;
2117 case 'p':
2118 ls = 1;
2119 /* fall through */
2120 case 'd':
2121 case 'u':
2122 case 'x':
2123 case 'i':
2124 bptr = (void *)(((unsigned long)bptr + (long_size - 1)) &
2125 ~(long_size - 1));
2126 switch (ls) {
2127 case 0:
2128 case 1:
2129 ls = long_size;
2130 break;
2131 case 2:
2132 ls = 8;
2133 default:
2134 break;
2135 }
2136 val = read_size(bptr, ls);
2137 bptr += ls;
2138 arg = malloc_or_die(sizeof(*arg));
2139 arg->next = NULL;
2140 arg->type = PRINT_ATOM;
2141 arg->atom.atom = malloc_or_die(32);
2142 sprintf(arg->atom.atom, "%lld", val);
2143 *next = arg;
2144 next = &arg->next;
2145 break;
2146 case 's':
2147 arg = malloc_or_die(sizeof(*arg));
2148 arg->next = NULL;
2149 arg->type = PRINT_STRING;
2150 arg->string.string = strdup(bptr);
2151 bptr += strlen(bptr) + 1;
2152 *next = arg;
2153 next = &arg->next;
2154 default:
2155 break;
2156 }
2157 }
2158 }
2159
2160 return args;
2161}
2162
2163static void free_args(struct print_arg *args)
2164{
2165 struct print_arg *next;
2166
2167 while (args) {
2168 next = args->next;
2169
2170 if (args->type == PRINT_ATOM)
2171 free(args->atom.atom);
2172 else
2173 free(args->string.string);
2174 free(args);
2175 args = next;
2176 }
2177}
2178
2179static char *get_bprint_format(void *data, int size __unused, struct event *event)
2180{
2181 unsigned long long addr;
2182 static struct format_field *field;
2183 struct printk_map *printk;
2184 char *format;
2185 char *p;
2186
2187 if (!field) {
2188 field = find_field(event, "fmt");
2189 if (!field)
2190 die("can't find format field for binary printk");
2191 printf("field->offset = %d size=%d\n", field->offset, field->size);
2192 }
2193
2194 addr = read_size(data + field->offset, field->size);
2195
2196 printk = find_printk(addr);
2197 if (!printk) {
2198 format = malloc_or_die(45);
2199 sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n",
2200 addr);
2201 return format;
2202 }
2203
2204 p = printk->printk;
2205 /* Remove any quotes. */
2206 if (*p == '"')
2207 p++;
2208 format = malloc_or_die(strlen(p) + 10);
2209 sprintf(format, "%s : %s", "%pf", p);
2210 /* remove ending quotes and new line since we will add one too */
2211 p = format + strlen(format) - 1;
2212 if (*p == '"')
2213 *p = 0;
2214
2215 p -= 2;
2216 if (strcmp(p, "\\n") == 0)
2217 *p = 0;
2218
2219 return format;
2220}
2221
2222static void pretty_print(void *data, int size, struct event *event)
2223{
2224 struct print_fmt *print_fmt = &event->print_fmt;
2225 struct print_arg *arg = print_fmt->args;
2226 struct print_arg *args = NULL;
2227 const char *ptr = print_fmt->format;
2228 unsigned long long val;
2229 struct func_map *func;
2230 const char *saveptr;
2231 char *bprint_fmt = NULL;
2232 char format[32];
2233 int show_func;
2234 int len;
2235 int ls;
2236
2237 if (event->flags & EVENT_FL_ISFUNC)
2238 ptr = " %pF <-- %pF";
2239
2240 if (event->flags & EVENT_FL_ISBPRINT) {
2241 bprint_fmt = get_bprint_format(data, size, event);
2242 args = make_bprint_args(bprint_fmt, data, size, event);
2243 arg = args;
2244 ptr = bprint_fmt;
2245 }
2246
2247 for (; *ptr; ptr++) {
2248 ls = 0;
2249 if (*ptr == '%') {
2250 saveptr = ptr;
2251 show_func = 0;
2252 cont_process:
2253 ptr++;
2254 switch (*ptr) {
2255 case '%':
2256 printf("%%");
2257 break;
2258 case 'l':
2259 ls++;
2260 goto cont_process;
2261 case 'L':
2262 ls = 2;
2263 goto cont_process;
2264 case 'z':
2265 case 'Z':
2266 case '0' ... '9':
2267 goto cont_process;
2268 case 'p':
2269 if (long_size == 4)
2270 ls = 1;
2271 else
2272 ls = 2;
2273
2274 if (*(ptr+1) == 'F' ||
2275 *(ptr+1) == 'f') {
2276 ptr++;
2277 show_func = *ptr;
2278 }
2279
2280 /* fall through */
2281 case 'd':
2282 case 'i':
2283 case 'x':
2284 case 'X':
2285 case 'u':
2286 if (!arg)
2287 die("no argument match");
2288
2289 len = ((unsigned long)ptr + 1) -
2290 (unsigned long)saveptr;
2291
2292 /* should never happen */
2293 if (len > 32)
2294 die("bad format!");
2295
2296 memcpy(format, saveptr, len);
2297 format[len] = 0;
2298
2299 val = eval_num_arg(data, size, event, arg);
2300 arg = arg->next;
2301
2302 if (show_func) {
2303 func = find_func(val);
2304 if (func) {
2305 printf("%s", func->func);
2306 if (show_func == 'F')
2307 printf("+0x%llx",
2308 val - func->addr);
2309 break;
2310 }
2311 }
2312 switch (ls) {
2313 case 0:
2314 printf(format, (int)val);
2315 break;
2316 case 1:
2317 printf(format, (long)val);
2318 break;
2319 case 2:
2320 printf(format, (long long)val);
2321 break;
2322 default:
2323 die("bad count (%d)", ls);
2324 }
2325 break;
2326 case 's':
2327 if (!arg)
2328 die("no matching argument");
2329
2330 print_str_arg(data, size, event, arg);
2331 arg = arg->next;
2332 break;
2333 default:
2334 printf(">%c<", *ptr);
2335
2336 }
2337 } else
2338 printf("%c", *ptr);
2339 }
2340
2341 if (args) {
2342 free_args(args);
2343 free(bprint_fmt);
2344 }
2345}
2346
2347static inline int log10_cpu(int nb)
2348{
2349 if (nb / 100)
2350 return 3;
2351 if (nb / 10)
2352 return 2;
2353 return 1;
2354}
2355
2356/* taken from Linux, written by Frederic Weisbecker */
2357static void print_graph_cpu(int cpu)
2358{
2359 int i;
2360 int log10_this = log10_cpu(cpu);
2361 int log10_all = log10_cpu(cpus);
2362
2363
2364 /*
2365 * Start with a space character - to make it stand out
2366 * to the right a bit when trace output is pasted into
2367 * email:
2368 */
2369 printf(" ");
2370
2371 /*
2372 * Tricky - we space the CPU field according to the max
2373 * number of online CPUs. On a 2-cpu system it would take
2374 * a maximum of 1 digit - on a 128 cpu system it would
2375 * take up to 3 digits:
2376 */
2377 for (i = 0; i < log10_all - log10_this; i++)
2378 printf(" ");
2379
2380 printf("%d) ", cpu);
2381}
2382
2383#define TRACE_GRAPH_PROCINFO_LENGTH 14
2384#define TRACE_GRAPH_INDENT 2
2385
2386static void print_graph_proc(int pid, const char *comm)
2387{
2388 /* sign + log10(MAX_INT) + '\0' */
2389 char pid_str[11];
2390 int spaces = 0;
2391 int len;
2392 int i;
2393
2394 sprintf(pid_str, "%d", pid);
2395
2396 /* 1 stands for the "-" character */
2397 len = strlen(comm) + strlen(pid_str) + 1;
2398
2399 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
2400 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
2401
2402 /* First spaces to align center */
2403 for (i = 0; i < spaces / 2; i++)
2404 printf(" ");
2405
2406 printf("%s-%s", comm, pid_str);
2407
2408 /* Last spaces to align center */
2409 for (i = 0; i < spaces - (spaces / 2); i++)
2410 printf(" ");
2411}
2412
2413static struct record *
2414get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
2415 struct record *next)
2416{
2417 struct format_field *field;
2418 struct event *event;
2419 unsigned long val;
2420 int type;
2421 int pid;
2422
2423 type = parse_common_type(next->data);
2424 event = find_event(type);
2425 if (!event)
2426 return NULL;
2427
2428 if (!(event->flags & EVENT_FL_ISFUNCRET))
2429 return NULL;
2430
2431 pid = parse_common_pid(next->data);
2432 field = find_field(event, "func");
2433 if (!field)
2434 die("function return does not have field func");
2435
2436 val = read_size(next->data + field->offset, field->size);
2437
2438 if (cur_pid != pid || cur_func != val)
2439 return NULL;
2440
2441 /* this is a leaf, now advance the iterator */
2442 return trace_read_data(cpu);
2443}
2444
2445/* Signal a overhead of time execution to the output */
2446static void print_graph_overhead(unsigned long long duration)
2447{
2448 /* Non nested entry or return */
2449 if (duration == ~0ULL)
2450 return (void)printf(" ");
2451
2452 /* Duration exceeded 100 msecs */
2453 if (duration > 100000ULL)
2454 return (void)printf("! ");
2455
2456 /* Duration exceeded 10 msecs */
2457 if (duration > 10000ULL)
2458 return (void)printf("+ ");
2459
2460 printf(" ");
2461}
2462
2463static void print_graph_duration(unsigned long long duration)
2464{
2465 unsigned long usecs = duration / 1000;
2466 unsigned long nsecs_rem = duration % 1000;
2467 /* log10(ULONG_MAX) + '\0' */
2468 char msecs_str[21];
2469 char nsecs_str[5];
2470 int len;
2471 int i;
2472
2473 sprintf(msecs_str, "%lu", usecs);
2474
2475 /* Print msecs */
2476 len = printf("%lu", usecs);
2477
2478 /* Print nsecs (we don't want to exceed 7 numbers) */
2479 if (len < 7) {
2480 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
2481 len += printf(".%s", nsecs_str);
2482 }
2483
2484 printf(" us ");
2485
2486 /* Print remaining spaces to fit the row's width */
2487 for (i = len; i < 7; i++)
2488 printf(" ");
2489
2490 printf("| ");
2491}
2492
2493static void
2494print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
2495{
2496 unsigned long long rettime, calltime;
2497 unsigned long long duration, depth;
2498 unsigned long long val;
2499 struct format_field *field;
2500 struct func_map *func;
2501 struct event *ret_event;
2502 int type;
2503 int i;
2504
2505 type = parse_common_type(ret_rec->data);
2506 ret_event = find_event(type);
2507
2508 field = find_field(ret_event, "rettime");
2509 if (!field)
2510 die("can't find rettime in return graph");
2511 rettime = read_size(ret_rec->data + field->offset, field->size);
2512
2513 field = find_field(ret_event, "calltime");
2514 if (!field)
2515 die("can't find rettime in return graph");
2516 calltime = read_size(ret_rec->data + field->offset, field->size);
2517
2518 duration = rettime - calltime;
2519
2520 /* Overhead */
2521 print_graph_overhead(duration);
2522
2523 /* Duration */
2524 print_graph_duration(duration);
2525
2526 field = find_field(event, "depth");
2527 if (!field)
2528 die("can't find depth in entry graph");
2529 depth = read_size(data + field->offset, field->size);
2530
2531 /* Function */
2532 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2533 printf(" ");
2534
2535 field = find_field(event, "func");
2536 if (!field)
2537 die("can't find func in entry graph");
2538 val = read_size(data + field->offset, field->size);
2539 func = find_func(val);
2540
2541 if (func)
2542 printf("%s();", func->func);
2543 else
2544 printf("%llx();", val);
2545}
2546
2547static void print_graph_nested(struct event *event, void *data)
2548{
2549 struct format_field *field;
2550 unsigned long long depth;
2551 unsigned long long val;
2552 struct func_map *func;
2553 int i;
2554
2555 /* No overhead */
2556 print_graph_overhead(-1);
2557
2558 /* No time */
2559 printf(" | ");
2560
2561 field = find_field(event, "depth");
2562 if (!field)
2563 die("can't find depth in entry graph");
2564 depth = read_size(data + field->offset, field->size);
2565
2566 /* Function */
2567 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2568 printf(" ");
2569
2570 field = find_field(event, "func");
2571 if (!field)
2572 die("can't find func in entry graph");
2573 val = read_size(data + field->offset, field->size);
2574 func = find_func(val);
2575
2576 if (func)
2577 printf("%s() {", func->func);
2578 else
2579 printf("%llx() {", val);
2580}
2581
2582static void
2583pretty_print_func_ent(void *data, int size, struct event *event,
2584 int cpu, int pid, const char *comm,
2585 unsigned long secs, unsigned long usecs)
2586{
2587 struct format_field *field;
2588 struct record *rec;
2589 void *copy_data;
2590 unsigned long val;
2591
2592 printf("%5lu.%06lu | ", secs, usecs);
2593
2594 print_graph_cpu(cpu);
2595 print_graph_proc(pid, comm);
2596
2597 printf(" | ");
2598
2599 field = find_field(event, "func");
2600 if (!field)
2601 die("function entry does not have func field");
2602
2603 val = read_size(data + field->offset, field->size);
2604
2605 /*
2606 * peek_data may unmap the data pointer. Copy it first.
2607 */
2608 copy_data = malloc_or_die(size);
2609 memcpy(copy_data, data, size);
2610 data = copy_data;
2611
2612 rec = trace_peek_data(cpu);
2613 if (rec) {
2614 rec = get_return_for_leaf(cpu, pid, val, rec);
2615 if (rec) {
2616 print_graph_entry_leaf(event, data, rec);
2617 goto out_free;
2618 }
2619 }
2620 print_graph_nested(event, data);
2621out_free:
2622 free(data);
2623}
2624
2625static void
2626pretty_print_func_ret(void *data, int size __unused, struct event *event,
2627 int cpu, int pid, const char *comm,
2628 unsigned long secs, unsigned long usecs)
2629{
2630 unsigned long long rettime, calltime;
2631 unsigned long long duration, depth;
2632 struct format_field *field;
2633 int i;
2634
2635 printf("%5lu.%06lu | ", secs, usecs);
2636
2637 print_graph_cpu(cpu);
2638 print_graph_proc(pid, comm);
2639
2640 printf(" | ");
2641
2642 field = find_field(event, "rettime");
2643 if (!field)
2644 die("can't find rettime in return graph");
2645 rettime = read_size(data + field->offset, field->size);
2646
2647 field = find_field(event, "calltime");
2648 if (!field)
2649 die("can't find calltime in return graph");
2650 calltime = read_size(data + field->offset, field->size);
2651
2652 duration = rettime - calltime;
2653
2654 /* Overhead */
2655 print_graph_overhead(duration);
2656
2657 /* Duration */
2658 print_graph_duration(duration);
2659
2660 field = find_field(event, "depth");
2661 if (!field)
2662 die("can't find depth in entry graph");
2663 depth = read_size(data + field->offset, field->size);
2664
2665 /* Function */
2666 for (i = 0; i < (int)(depth * TRACE_GRAPH_INDENT); i++)
2667 printf(" ");
2668
2669 printf("}");
2670}
2671
2672static void
2673pretty_print_func_graph(void *data, int size, struct event *event,
2674 int cpu, int pid, const char *comm,
2675 unsigned long secs, unsigned long usecs)
2676{
2677 if (event->flags & EVENT_FL_ISFUNCENT)
2678 pretty_print_func_ent(data, size, event,
2679 cpu, pid, comm, secs, usecs);
2680 else if (event->flags & EVENT_FL_ISFUNCRET)
2681 pretty_print_func_ret(data, size, event,
2682 cpu, pid, comm, secs, usecs);
2683 printf("\n");
2684}
2685
2686void print_event(int cpu, void *data, int size, unsigned long long nsecs,
2687 char *comm)
2688{
2689 struct event *event;
2690 unsigned long secs;
2691 unsigned long usecs;
2692 int type;
2693 int pid;
2694
2695 secs = nsecs / NSECS_PER_SEC;
2696 nsecs -= secs * NSECS_PER_SEC;
2697 usecs = nsecs / NSECS_PER_USEC;
2698
2699 type = parse_common_type(data);
2700
2701 event = find_event(type);
2702 if (!event)
2703 die("ug! no event found for type %d", type);
2704
2705 pid = parse_common_pid(data);
2706
2707 if (event->flags & (EVENT_FL_ISFUNCENT | EVENT_FL_ISFUNCRET))
2708 return pretty_print_func_graph(data, size, event, cpu,
2709 pid, comm, secs, usecs);
2710
2711 printf("%16s-%-5d [%03d] %5lu.%09Lu: %s: ",
2712 comm, pid, cpu,
2713 secs, nsecs, event->name);
2714
2715 pretty_print(data, size, event);
2716 printf("\n");
2717}
2718
2719static void print_fields(struct print_flag_sym *field)
2720{
2721 printf("{ %s, %s }", field->value, field->str);
2722 if (field->next) {
2723 printf(", ");
2724 print_fields(field->next);
2725 }
2726}
2727
2728static void print_args(struct print_arg *args)
2729{
2730 int print_paren = 1;
2731
2732 switch (args->type) {
2733 case PRINT_NULL:
2734 printf("null");
2735 break;
2736 case PRINT_ATOM:
2737 printf("%s", args->atom.atom);
2738 break;
2739 case PRINT_FIELD:
2740 printf("REC->%s", args->field.name);
2741 break;
2742 case PRINT_FLAGS:
2743 printf("__print_flags(");
2744 print_args(args->flags.field);
2745 printf(", %s, ", args->flags.delim);
2746 print_fields(args->flags.flags);
2747 printf(")");
2748 break;
2749 case PRINT_SYMBOL:
2750 printf("__print_symbolic(");
2751 print_args(args->symbol.field);
2752 printf(", ");
2753 print_fields(args->symbol.symbols);
2754 printf(")");
2755 break;
2756 case PRINT_STRING:
2757 printf("__get_str(%s)", args->string.string);
2758 break;
2759 case PRINT_TYPE:
2760 printf("(%s)", args->typecast.type);
2761 print_args(args->typecast.item);
2762 break;
2763 case PRINT_OP:
2764 if (strcmp(args->op.op, ":") == 0)
2765 print_paren = 0;
2766 if (print_paren)
2767 printf("(");
2768 print_args(args->op.left);
2769 printf(" %s ", args->op.op);
2770 print_args(args->op.right);
2771 if (print_paren)
2772 printf(")");
2773 break;
2774 default:
2775 /* we should warn... */
2776 return;
2777 }
2778 if (args->next) {
2779 printf("\n");
2780 print_args(args->next);
2781 }
2782}
2783
2784static void parse_header_field(char *type,
2785 int *offset, int *size)
2786{
2787 char *token;
2788
2789 if (read_expected(EVENT_ITEM, (char *)"field") < 0)
2790 return;
2791 if (read_expected(EVENT_OP, (char *)":") < 0)
2792 return;
2793 /* type */
2794 if (read_expect_type(EVENT_ITEM, &token) < 0)
2795 return;
2796 free_token(token);
2797
2798 if (read_expected(EVENT_ITEM, type) < 0)
2799 return;
2800 if (read_expected(EVENT_OP, (char *)";") < 0)
2801 return;
2802 if (read_expected(EVENT_ITEM, (char *)"offset") < 0)
2803 return;
2804 if (read_expected(EVENT_OP, (char *)":") < 0)
2805 return;
2806 if (read_expect_type(EVENT_ITEM, &token) < 0)
2807 return;
2808 *offset = atoi(token);
2809 free_token(token);
2810 if (read_expected(EVENT_OP, (char *)";") < 0)
2811 return;
2812 if (read_expected(EVENT_ITEM, (char *)"size") < 0)
2813 return;
2814 if (read_expected(EVENT_OP, (char *)":") < 0)
2815 return;
2816 if (read_expect_type(EVENT_ITEM, &token) < 0)
2817 return;
2818 *size = atoi(token);
2819 free_token(token);
2820 if (read_expected(EVENT_OP, (char *)";") < 0)
2821 return;
2822 if (read_expect_type(EVENT_NEWLINE, &token) < 0)
2823 return;
2824 free_token(token);
2825}
2826
2827int parse_header_page(char *buf, unsigned long size)
2828{
2829 init_input_buf(buf, size);
2830
2831 parse_header_field((char *)"timestamp", &header_page_ts_offset,
2832 &header_page_ts_size);
2833 parse_header_field((char *)"commit", &header_page_size_offset,
2834 &header_page_size_size);
2835 parse_header_field((char *)"data", &header_page_data_offset,
2836 &header_page_data_size);
2837
2838 return 0;
2839}
2840
2841int parse_ftrace_file(char *buf, unsigned long size)
2842{
2843 struct format_field *field;
2844 struct print_arg *arg, **list;
2845 struct event *event;
2846 int ret;
2847
2848 init_input_buf(buf, size);
2849
2850 event = alloc_event();
2851 if (!event)
2852 return -ENOMEM;
2853
2854 event->flags |= EVENT_FL_ISFTRACE;
2855
2856 event->name = event_read_name();
2857 if (!event->name)
2858 die("failed to read ftrace event name");
2859
2860 if (strcmp(event->name, "function") == 0)
2861 event->flags |= EVENT_FL_ISFUNC;
2862
2863 else if (strcmp(event->name, "funcgraph_entry") == 0)
2864 event->flags |= EVENT_FL_ISFUNCENT;
2865
2866 else if (strcmp(event->name, "funcgraph_exit") == 0)
2867 event->flags |= EVENT_FL_ISFUNCRET;
2868
2869 else if (strcmp(event->name, "bprint") == 0)
2870 event->flags |= EVENT_FL_ISBPRINT;
2871
2872 event->id = event_read_id();
2873 if (event->id < 0)
2874 die("failed to read ftrace event id");
2875
2876 add_event(event);
2877
2878 ret = event_read_format(event);
2879 if (ret < 0)
2880 die("failed to read ftrace event format");
2881
2882 ret = event_read_print(event);
2883 if (ret < 0)
2884 die("failed to read ftrace event print fmt");
2885
2886 /*
2887 * The arguments for ftrace files are parsed by the fields.
2888 * Set up the fields as their arguments.
2889 */
2890 list = &event->print_fmt.args;
2891 for (field = event->format.fields; field; field = field->next) {
2892 arg = malloc_or_die(sizeof(*arg));
2893 memset(arg, 0, sizeof(*arg));
2894 *list = arg;
2895 list = &arg->next;
2896 arg->type = PRINT_FIELD;
2897 arg->field.name = field->name;
2898 arg->field.field = field;
2899 }
2900 return 0;
2901}
2902
2903int parse_event_file(char *buf, unsigned long size, char *system__unused __unused)
2904{
2905 struct event *event;
2906 int ret;
2907
2908 init_input_buf(buf, size);
2909
2910 event = alloc_event();
2911 if (!event)
2912 return -ENOMEM;
2913
2914 event->name = event_read_name();
2915 if (!event->name)
2916 die("failed to read event name");
2917
2918 event->id = event_read_id();
2919 if (event->id < 0)
2920 die("failed to read event id");
2921
2922 ret = event_read_format(event);
2923 if (ret < 0)
2924 die("failed to read event format");
2925
2926 ret = event_read_print(event);
2927 if (ret < 0)
2928 die("failed to read event print fmt");
2929
2930#define PRINT_ARGS 0
2931 if (PRINT_ARGS && event->print_fmt.args)
2932 print_args(event->print_fmt.args);
2933
2934 add_event(event);
2935 return 0;
2936}
2937
2938void parse_set_info(int nr_cpus, int long_sz)
2939{
2940 cpus = nr_cpus;
2941 long_size = long_sz;
2942}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
new file mode 100644
index 000000000000..a1217a10632f
--- /dev/null
+++ b/tools/perf/util/trace-event-read.c
@@ -0,0 +1,512 @@
1/*
2 * Copyright (C) 2009, Steven Rostedt <srostedt@redhat.com>
3 *
4 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License (not later!)
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 */
21#define _LARGEFILE64_SOURCE
22
23#include <dirent.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <getopt.h>
28#include <stdarg.h>
29#include <sys/types.h>
30#include <sys/stat.h>
31#include <sys/wait.h>
32#include <sys/mman.h>
33#include <pthread.h>
34#include <fcntl.h>
35#include <unistd.h>
36#include <ctype.h>
37#include <errno.h>
38
39#include "../perf.h"
40#include "util.h"
41#include "trace-event.h"
42
43static int input_fd;
44
45static int read_page;
46
47int file_bigendian;
48int host_bigendian;
49static int long_size;
50
51static unsigned long page_size;
52
53static int read_or_die(void *data, int size)
54{
55 int r;
56
57 r = read(input_fd, data, size);
58 if (r != size)
59 die("reading input file (size expected=%d received=%d)",
60 size, r);
61 return r;
62}
63
64static unsigned int read4(void)
65{
66 unsigned int data;
67
68 read_or_die(&data, 4);
69 return __data2host4(data);
70}
71
72static unsigned long long read8(void)
73{
74 unsigned long long data;
75
76 read_or_die(&data, 8);
77 return __data2host8(data);
78}
79
80static char *read_string(void)
81{
82 char buf[BUFSIZ];
83 char *str = NULL;
84 int size = 0;
85 int i;
86 int r;
87
88 for (;;) {
89 r = read(input_fd, buf, BUFSIZ);
90 if (r < 0)
91 die("reading input file");
92
93 if (!r)
94 die("no data");
95
96 for (i = 0; i < r; i++) {
97 if (!buf[i])
98 break;
99 }
100 if (i < r)
101 break;
102
103 if (str) {
104 size += BUFSIZ;
105 str = realloc(str, size);
106 if (!str)
107 die("malloc of size %d", size);
108 memcpy(str + (size - BUFSIZ), buf, BUFSIZ);
109 } else {
110 size = BUFSIZ;
111 str = malloc_or_die(size);
112 memcpy(str, buf, size);
113 }
114 }
115
116 /* trailing \0: */
117 i++;
118
119 /* move the file descriptor to the end of the string */
120 r = lseek(input_fd, -(r - i), SEEK_CUR);
121 if (r < 0)
122 die("lseek");
123
124 if (str) {
125 size += i;
126 str = realloc(str, size);
127 if (!str)
128 die("malloc of size %d", size);
129 memcpy(str + (size - i), buf, i);
130 } else {
131 size = i;
132 str = malloc_or_die(i);
133 memcpy(str, buf, i);
134 }
135
136 return str;
137}
138
139static void read_proc_kallsyms(void)
140{
141 unsigned int size;
142 char *buf;
143
144 size = read4();
145 if (!size)
146 return;
147
148 buf = malloc_or_die(size);
149 read_or_die(buf, size);
150
151 parse_proc_kallsyms(buf, size);
152
153 free(buf);
154}
155
156static void read_ftrace_printk(void)
157{
158 unsigned int size;
159 char *buf;
160
161 size = read4();
162 if (!size)
163 return;
164
165 buf = malloc_or_die(size);
166 read_or_die(buf, size);
167
168 parse_ftrace_printk(buf, size);
169
170 free(buf);
171}
172
173static void read_header_files(void)
174{
175 unsigned long long size;
176 char *header_page;
177 char *header_event;
178 char buf[BUFSIZ];
179
180 read_or_die(buf, 12);
181
182 if (memcmp(buf, "header_page", 12) != 0)
183 die("did not read header page");
184
185 size = read8();
186 header_page = malloc_or_die(size);
187 read_or_die(header_page, size);
188 parse_header_page(header_page, size);
189 free(header_page);
190
191 /*
192 * The size field in the page is of type long,
193 * use that instead, since it represents the kernel.
194 */
195 long_size = header_page_size_size;
196
197 read_or_die(buf, 13);
198 if (memcmp(buf, "header_event", 13) != 0)
199 die("did not read header event");
200
201 size = read8();
202 header_event = malloc_or_die(size);
203 read_or_die(header_event, size);
204 free(header_event);
205}
206
207static void read_ftrace_file(unsigned long long size)
208{
209 char *buf;
210
211 buf = malloc_or_die(size);
212 read_or_die(buf, size);
213 parse_ftrace_file(buf, size);
214 free(buf);
215}
216
217static void read_event_file(char *sys, unsigned long long size)
218{
219 char *buf;
220
221 buf = malloc_or_die(size);
222 read_or_die(buf, size);
223 parse_event_file(buf, size, sys);
224 free(buf);
225}
226
227static void read_ftrace_files(void)
228{
229 unsigned long long size;
230 int count;
231 int i;
232
233 count = read4();
234
235 for (i = 0; i < count; i++) {
236 size = read8();
237 read_ftrace_file(size);
238 }
239}
240
241static void read_event_files(void)
242{
243 unsigned long long size;
244 char *sys;
245 int systems;
246 int count;
247 int i,x;
248
249 systems = read4();
250
251 for (i = 0; i < systems; i++) {
252 sys = read_string();
253
254 count = read4();
255 for (x=0; x < count; x++) {
256 size = read8();
257 read_event_file(sys, size);
258 }
259 }
260}
261
262struct cpu_data {
263 unsigned long long offset;
264 unsigned long long size;
265 unsigned long long timestamp;
266 struct record *next;
267 char *page;
268 int cpu;
269 int index;
270 int page_size;
271};
272
273static struct cpu_data *cpu_data;
274
275static void update_cpu_data_index(int cpu)
276{
277 cpu_data[cpu].offset += page_size;
278 cpu_data[cpu].size -= page_size;
279 cpu_data[cpu].index = 0;
280}
281
282static void get_next_page(int cpu)
283{
284 off64_t save_seek;
285 off64_t ret;
286
287 if (!cpu_data[cpu].page)
288 return;
289
290 if (read_page) {
291 if (cpu_data[cpu].size <= page_size) {
292 free(cpu_data[cpu].page);
293 cpu_data[cpu].page = NULL;
294 return;
295 }
296
297 update_cpu_data_index(cpu);
298
299 /* other parts of the code may expect the pointer to not move */
300 save_seek = lseek64(input_fd, 0, SEEK_CUR);
301
302 ret = lseek64(input_fd, cpu_data[cpu].offset, SEEK_SET);
303 if (ret < 0)
304 die("failed to lseek");
305 ret = read(input_fd, cpu_data[cpu].page, page_size);
306 if (ret < 0)
307 die("failed to read page");
308
309 /* reset the file pointer back */
310 lseek64(input_fd, save_seek, SEEK_SET);
311
312 return;
313 }
314
315 munmap(cpu_data[cpu].page, page_size);
316 cpu_data[cpu].page = NULL;
317
318 if (cpu_data[cpu].size <= page_size)
319 return;
320
321 update_cpu_data_index(cpu);
322
323 cpu_data[cpu].page = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE,
324 input_fd, cpu_data[cpu].offset);
325 if (cpu_data[cpu].page == MAP_FAILED)
326 die("failed to mmap cpu %d at offset 0x%llx",
327 cpu, cpu_data[cpu].offset);
328}
329
330static unsigned int type_len4host(unsigned int type_len_ts)
331{
332 if (file_bigendian)
333 return (type_len_ts >> 27) & ((1 << 5) - 1);
334 else
335 return type_len_ts & ((1 << 5) - 1);
336}
337
338static unsigned int ts4host(unsigned int type_len_ts)
339{
340 if (file_bigendian)
341 return type_len_ts & ((1 << 27) - 1);
342 else
343 return type_len_ts >> 5;
344}
345
346static int calc_index(void *ptr, int cpu)
347{
348 return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
349}
350
351struct record *trace_peek_data(int cpu)
352{
353 struct record *data;
354 void *page = cpu_data[cpu].page;
355 int idx = cpu_data[cpu].index;
356 void *ptr = page + idx;
357 unsigned long long extend;
358 unsigned int type_len_ts;
359 unsigned int type_len;
360 unsigned int delta;
361 unsigned int length = 0;
362
363 if (cpu_data[cpu].next)
364 return cpu_data[cpu].next;
365
366 if (!page)
367 return NULL;
368
369 if (!idx) {
370 /* FIXME: handle header page */
371 if (header_page_ts_size != 8)
372 die("expected a long long type for timestamp");
373 cpu_data[cpu].timestamp = data2host8(ptr);
374 ptr += 8;
375 switch (header_page_size_size) {
376 case 4:
377 cpu_data[cpu].page_size = data2host4(ptr);
378 ptr += 4;
379 break;
380 case 8:
381 cpu_data[cpu].page_size = data2host8(ptr);
382 ptr += 8;
383 break;
384 default:
385 die("bad long size");
386 }
387 ptr = cpu_data[cpu].page + header_page_data_offset;
388 }
389
390read_again:
391 idx = calc_index(ptr, cpu);
392
393 if (idx >= cpu_data[cpu].page_size) {
394 get_next_page(cpu);
395 return trace_peek_data(cpu);
396 }
397
398 type_len_ts = data2host4(ptr);
399 ptr += 4;
400
401 type_len = type_len4host(type_len_ts);
402 delta = ts4host(type_len_ts);
403
404 switch (type_len) {
405 case RINGBUF_TYPE_PADDING:
406 if (!delta)
407 die("error, hit unexpected end of page");
408 length = data2host4(ptr);
409 ptr += 4;
410 length *= 4;
411 ptr += length;
412 goto read_again;
413
414 case RINGBUF_TYPE_TIME_EXTEND:
415 extend = data2host4(ptr);
416 ptr += 4;
417 extend <<= TS_SHIFT;
418 extend += delta;
419 cpu_data[cpu].timestamp += extend;
420 goto read_again;
421
422 case RINGBUF_TYPE_TIME_STAMP:
423 ptr += 12;
424 break;
425 case 0:
426 length = data2host4(ptr);
427 ptr += 4;
428 die("here! length=%d", length);
429 break;
430 default:
431 length = type_len * 4;
432 break;
433 }
434
435 cpu_data[cpu].timestamp += delta;
436
437 data = malloc_or_die(sizeof(*data));
438 memset(data, 0, sizeof(*data));
439
440 data->ts = cpu_data[cpu].timestamp;
441 data->size = length;
442 data->data = ptr;
443 ptr += length;
444
445 cpu_data[cpu].index = calc_index(ptr, cpu);
446 cpu_data[cpu].next = data;
447
448 return data;
449}
450
451struct record *trace_read_data(int cpu)
452{
453 struct record *data;
454
455 data = trace_peek_data(cpu);
456 cpu_data[cpu].next = NULL;
457
458 return data;
459}
460
461void trace_report (void)
462{
463 const char *input_file = "trace.info";
464 char buf[BUFSIZ];
465 char test[] = { 23, 8, 68 };
466 char *version;
467 int show_funcs = 0;
468 int show_printk = 0;
469
470 input_fd = open(input_file, O_RDONLY);
471 if (input_fd < 0)
472 die("opening '%s'\n", input_file);
473
474 read_or_die(buf, 3);
475 if (memcmp(buf, test, 3) != 0)
476 die("not an trace data file");
477
478 read_or_die(buf, 7);
479 if (memcmp(buf, "tracing", 7) != 0)
480 die("not a trace file (missing tracing)");
481
482 version = read_string();
483 printf("version = %s\n", version);
484 free(version);
485
486 read_or_die(buf, 1);
487 file_bigendian = buf[0];
488 host_bigendian = bigendian();
489
490 read_or_die(buf, 1);
491 long_size = buf[0];
492
493 page_size = read4();
494
495 read_header_files();
496
497 read_ftrace_files();
498 read_event_files();
499 read_proc_kallsyms();
500 read_ftrace_printk();
501
502 if (show_funcs) {
503 print_funcs();
504 return;
505 }
506 if (show_printk) {
507 print_printk();
508 return;
509 }
510
511 return;
512}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
new file mode 100644
index 000000000000..420294a5773e
--- /dev/null
+++ b/tools/perf/util/trace-event.h
@@ -0,0 +1,240 @@
1#ifndef _TRACE_EVENTS_H
2#define _TRACE_EVENTS_H
3
4#include "parse-events.h"
5
6#define __unused __attribute__((unused))
7
8
9#ifndef PAGE_MASK
10#define PAGE_MASK (page_size - 1)
11#endif
12
13enum {
14 RINGBUF_TYPE_PADDING = 29,
15 RINGBUF_TYPE_TIME_EXTEND = 30,
16 RINGBUF_TYPE_TIME_STAMP = 31,
17};
18
19#ifndef TS_SHIFT
20#define TS_SHIFT 27
21#endif
22
23#define NSECS_PER_SEC 1000000000ULL
24#define NSECS_PER_USEC 1000ULL
25
26enum format_flags {
27 FIELD_IS_ARRAY = 1,
28 FIELD_IS_POINTER = 2,
29};
30
31struct format_field {
32 struct format_field *next;
33 char *type;
34 char *name;
35 int offset;
36 int size;
37 unsigned long flags;
38};
39
40struct format {
41 int nr_common;
42 int nr_fields;
43 struct format_field *common_fields;
44 struct format_field *fields;
45};
46
47struct print_arg_atom {
48 char *atom;
49};
50
51struct print_arg_string {
52 char *string;
53 int offset;
54};
55
56struct print_arg_field {
57 char *name;
58 struct format_field *field;
59};
60
61struct print_flag_sym {
62 struct print_flag_sym *next;
63 char *value;
64 char *str;
65};
66
67struct print_arg_typecast {
68 char *type;
69 struct print_arg *item;
70};
71
72struct print_arg_flags {
73 struct print_arg *field;
74 char *delim;
75 struct print_flag_sym *flags;
76};
77
78struct print_arg_symbol {
79 struct print_arg *field;
80 struct print_flag_sym *symbols;
81};
82
83struct print_arg;
84
85struct print_arg_op {
86 char *op;
87 int prio;
88 struct print_arg *left;
89 struct print_arg *right;
90};
91
92struct print_arg_func {
93 char *name;
94 struct print_arg *args;
95};
96
97enum print_arg_type {
98 PRINT_NULL,
99 PRINT_ATOM,
100 PRINT_FIELD,
101 PRINT_FLAGS,
102 PRINT_SYMBOL,
103 PRINT_TYPE,
104 PRINT_STRING,
105 PRINT_OP,
106};
107
108struct print_arg {
109 struct print_arg *next;
110 enum print_arg_type type;
111 union {
112 struct print_arg_atom atom;
113 struct print_arg_field field;
114 struct print_arg_typecast typecast;
115 struct print_arg_flags flags;
116 struct print_arg_symbol symbol;
117 struct print_arg_func func;
118 struct print_arg_string string;
119 struct print_arg_op op;
120 };
121};
122
123struct print_fmt {
124 char *format;
125 struct print_arg *args;
126};
127
128struct event {
129 struct event *next;
130 char *name;
131 int id;
132 int flags;
133 struct format format;
134 struct print_fmt print_fmt;
135};
136
137enum {
138 EVENT_FL_ISFTRACE = 1,
139 EVENT_FL_ISPRINT = 2,
140 EVENT_FL_ISBPRINT = 4,
141 EVENT_FL_ISFUNC = 8,
142 EVENT_FL_ISFUNCENT = 16,
143 EVENT_FL_ISFUNCRET = 32,
144};
145
146struct record {
147 unsigned long long ts;
148 int size;
149 void *data;
150};
151
152struct record *trace_peek_data(int cpu);
153struct record *trace_read_data(int cpu);
154
155void parse_set_info(int nr_cpus, int long_sz);
156
157void trace_report(void);
158
159void *malloc_or_die(unsigned int size);
160
161void parse_cmdlines(char *file, int size);
162void parse_proc_kallsyms(char *file, unsigned int size);
163void parse_ftrace_printk(char *file, unsigned int size);
164
165void print_funcs(void);
166void print_printk(void);
167
168int parse_ftrace_file(char *buf, unsigned long size);
169int parse_event_file(char *buf, unsigned long size, char *system);
170void print_event(int cpu, void *data, int size, unsigned long long nsecs,
171 char *comm);
172
173extern int file_bigendian;
174extern int host_bigendian;
175
176int bigendian(void);
177
178static inline unsigned short __data2host2(unsigned short data)
179{
180 unsigned short swap;
181
182 if (host_bigendian == file_bigendian)
183 return data;
184
185 swap = ((data & 0xffULL) << 8) |
186 ((data & (0xffULL << 8)) >> 8);
187
188 return swap;
189}
190
191static inline unsigned int __data2host4(unsigned int data)
192{
193 unsigned int swap;
194
195 if (host_bigendian == file_bigendian)
196 return data;
197
198 swap = ((data & 0xffULL) << 24) |
199 ((data & (0xffULL << 8)) << 8) |
200 ((data & (0xffULL << 16)) >> 8) |
201 ((data & (0xffULL << 24)) >> 24);
202
203 return swap;
204}
205
206static inline unsigned long long __data2host8(unsigned long long data)
207{
208 unsigned long long swap;
209
210 if (host_bigendian == file_bigendian)
211 return data;
212
213 swap = ((data & 0xffULL) << 56) |
214 ((data & (0xffULL << 8)) << 40) |
215 ((data & (0xffULL << 16)) << 24) |
216 ((data & (0xffULL << 24)) << 8) |
217 ((data & (0xffULL << 32)) >> 8) |
218 ((data & (0xffULL << 40)) >> 24) |
219 ((data & (0xffULL << 48)) >> 40) |
220 ((data & (0xffULL << 56)) >> 56);
221
222 return swap;
223}
224
225#define data2host2(ptr) __data2host2(*(unsigned short *)ptr)
226#define data2host4(ptr) __data2host4(*(unsigned int *)ptr)
227#define data2host8(ptr) __data2host8(*(unsigned long long *)ptr)
228
229extern int header_page_ts_offset;
230extern int header_page_ts_size;
231extern int header_page_size_offset;
232extern int header_page_size_size;
233extern int header_page_data_offset;
234extern int header_page_data_size;
235
236int parse_header_page(char *buf, unsigned long size);
237
238void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters);
239
240#endif /* _TRACE_EVENTS_H */
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 68fe157d72fb..9de2329dd44d 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -39,10 +39,6 @@
39/* Approximation of the length of the decimal representation of this type. */ 39/* Approximation of the length of the decimal representation of this type. */
40#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) 40#define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1)
41 41
42#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && !defined(_M_UNIX)
43#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */
44#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */
45#endif
46#define _ALL_SOURCE 1 42#define _ALL_SOURCE 1
47#define _GNU_SOURCE 1 43#define _GNU_SOURCE 1
48#define _BSD_SOURCE 1 44#define _BSD_SOURCE 1
@@ -83,6 +79,7 @@
83#include <inttypes.h> 79#include <inttypes.h>
84#include "../../../include/linux/magic.h" 80#include "../../../include/linux/magic.h"
85 81
82
86#ifndef NO_ICONV 83#ifndef NO_ICONV
87#include <iconv.h> 84#include <iconv.h>
88#endif 85#endif
@@ -310,6 +307,7 @@ static inline int has_extension(const char *filename, const char *ext)
310#undef isspace 307#undef isspace
311#undef isdigit 308#undef isdigit
312#undef isalpha 309#undef isalpha
310#undef isprint
313#undef isalnum 311#undef isalnum
314#undef tolower 312#undef tolower
315#undef toupper 313#undef toupper
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
new file mode 100644
index 000000000000..1c15e39f99e3
--- /dev/null
+++ b/tools/perf/util/values.c
@@ -0,0 +1,230 @@
1#include <stdlib.h>
2
3#include "util.h"
4#include "values.h"
5
6void perf_read_values_init(struct perf_read_values *values)
7{
8 values->threads_max = 16;
9 values->pid = malloc(values->threads_max * sizeof(*values->pid));
10 values->tid = malloc(values->threads_max * sizeof(*values->tid));
11 values->value = malloc(values->threads_max * sizeof(*values->value));
12 if (!values->pid || !values->tid || !values->value)
13 die("failed to allocate read_values threads arrays");
14 values->threads = 0;
15
16 values->counters_max = 16;
17 values->counterrawid = malloc(values->counters_max
18 * sizeof(*values->counterrawid));
19 values->countername = malloc(values->counters_max
20 * sizeof(*values->countername));
21 if (!values->counterrawid || !values->countername)
22 die("failed to allocate read_values counters arrays");
23 values->counters = 0;
24}
25
26void perf_read_values_destroy(struct perf_read_values *values)
27{
28 int i;
29
30 if (!values->threads_max || !values->counters_max)
31 return;
32
33 for (i = 0; i < values->threads; i++)
34 free(values->value[i]);
35 free(values->pid);
36 free(values->tid);
37 free(values->counterrawid);
38 for (i = 0; i < values->counters; i++)
39 free(values->countername[i]);
40 free(values->countername);
41}
42
43static void perf_read_values__enlarge_threads(struct perf_read_values *values)
44{
45 values->threads_max *= 2;
46 values->pid = realloc(values->pid,
47 values->threads_max * sizeof(*values->pid));
48 values->tid = realloc(values->tid,
49 values->threads_max * sizeof(*values->tid));
50 values->value = realloc(values->value,
51 values->threads_max * sizeof(*values->value));
52 if (!values->pid || !values->tid || !values->value)
53 die("failed to enlarge read_values threads arrays");
54}
55
56static int perf_read_values__findnew_thread(struct perf_read_values *values,
57 u32 pid, u32 tid)
58{
59 int i;
60
61 for (i = 0; i < values->threads; i++)
62 if (values->pid[i] == pid && values->tid[i] == tid)
63 return i;
64
65 if (values->threads == values->threads_max)
66 perf_read_values__enlarge_threads(values);
67
68 i = values->threads++;
69 values->pid[i] = pid;
70 values->tid[i] = tid;
71 values->value[i] = malloc(values->counters_max * sizeof(**values->value));
72 if (!values->value[i])
73 die("failed to allocate read_values counters array");
74
75 return i;
76}
77
78static void perf_read_values__enlarge_counters(struct perf_read_values *values)
79{
80 int i;
81
82 values->counters_max *= 2;
83 values->counterrawid = realloc(values->counterrawid,
84 values->counters_max * sizeof(*values->counterrawid));
85 values->countername = realloc(values->countername,
86 values->counters_max * sizeof(*values->countername));
87 if (!values->counterrawid || !values->countername)
88 die("failed to enlarge read_values counters arrays");
89
90 for (i = 0; i < values->threads; i++) {
91 values->value[i] = realloc(values->value[i],
92 values->counters_max * sizeof(**values->value));
93 if (!values->value[i])
94 die("failed to enlarge read_values counters arrays");
95 }
96}
97
98static int perf_read_values__findnew_counter(struct perf_read_values *values,
99 u64 rawid, const char *name)
100{
101 int i;
102
103 for (i = 0; i < values->counters; i++)
104 if (values->counterrawid[i] == rawid)
105 return i;
106
107 if (values->counters == values->counters_max)
108 perf_read_values__enlarge_counters(values);
109
110 i = values->counters++;
111 values->counterrawid[i] = rawid;
112 values->countername[i] = strdup(name);
113
114 return i;
115}
116
117void perf_read_values_add_value(struct perf_read_values *values,
118 u32 pid, u32 tid,
119 u64 rawid, const char *name, u64 value)
120{
121 int tindex, cindex;
122
123 tindex = perf_read_values__findnew_thread(values, pid, tid);
124 cindex = perf_read_values__findnew_counter(values, rawid, name);
125
126 values->value[tindex][cindex] = value;
127}
128
129static void perf_read_values__display_pretty(FILE *fp,
130 struct perf_read_values *values)
131{
132 int i, j;
133 int pidwidth, tidwidth;
134 int *counterwidth;
135
136 counterwidth = malloc(values->counters * sizeof(*counterwidth));
137 if (!counterwidth)
138 die("failed to allocate counterwidth array");
139 tidwidth = 3;
140 pidwidth = 3;
141 for (j = 0; j < values->counters; j++)
142 counterwidth[j] = strlen(values->countername[j]);
143 for (i = 0; i < values->threads; i++) {
144 int width;
145
146 width = snprintf(NULL, 0, "%d", values->pid[i]);
147 if (width > pidwidth)
148 pidwidth = width;
149 width = snprintf(NULL, 0, "%d", values->tid[i]);
150 if (width > tidwidth)
151 tidwidth = width;
152 for (j = 0; j < values->counters; j++) {
153 width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
154 if (width > counterwidth[j])
155 counterwidth[j] = width;
156 }
157 }
158
159 fprintf(fp, "# %*s %*s", pidwidth, "PID", tidwidth, "TID");
160 for (j = 0; j < values->counters; j++)
161 fprintf(fp, " %*s", counterwidth[j], values->countername[j]);
162 fprintf(fp, "\n");
163
164 for (i = 0; i < values->threads; i++) {
165 fprintf(fp, " %*d %*d", pidwidth, values->pid[i],
166 tidwidth, values->tid[i]);
167 for (j = 0; j < values->counters; j++)
168 fprintf(fp, " %*Lu",
169 counterwidth[j], values->value[i][j]);
170 fprintf(fp, "\n");
171 }
172}
173
174static void perf_read_values__display_raw(FILE *fp,
175 struct perf_read_values *values)
176{
177 int width, pidwidth, tidwidth, namewidth, rawwidth, countwidth;
178 int i, j;
179
180 tidwidth = 3; /* TID */
181 pidwidth = 3; /* PID */
182 namewidth = 4; /* "Name" */
183 rawwidth = 3; /* "Raw" */
184 countwidth = 5; /* "Count" */
185
186 for (i = 0; i < values->threads; i++) {
187 width = snprintf(NULL, 0, "%d", values->pid[i]);
188 if (width > pidwidth)
189 pidwidth = width;
190 width = snprintf(NULL, 0, "%d", values->tid[i]);
191 if (width > tidwidth)
192 tidwidth = width;
193 }
194 for (j = 0; j < values->counters; j++) {
195 width = strlen(values->countername[j]);
196 if (width > namewidth)
197 namewidth = width;
198 width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
199 if (width > rawwidth)
200 rawwidth = width;
201 }
202 for (i = 0; i < values->threads; i++) {
203 for (j = 0; j < values->counters; j++) {
204 width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
205 if (width > countwidth)
206 countwidth = width;
207 }
208 }
209
210 fprintf(fp, "# %*s %*s %*s %*s %*s\n",
211 pidwidth, "PID", tidwidth, "TID",
212 namewidth, "Name", rawwidth, "Raw",
213 countwidth, "Count");
214 for (i = 0; i < values->threads; i++)
215 for (j = 0; j < values->counters; j++)
216 fprintf(fp, " %*d %*d %*s %*llx %*Lu\n",
217 pidwidth, values->pid[i],
218 tidwidth, values->tid[i],
219 namewidth, values->countername[j],
220 rawwidth, values->counterrawid[j],
221 countwidth, values->value[i][j]);
222}
223
224void perf_read_values_display(FILE *fp, struct perf_read_values *values, int raw)
225{
226 if (raw)
227 perf_read_values__display_raw(fp, values);
228 else
229 perf_read_values__display_pretty(fp, values);
230}
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
new file mode 100644
index 000000000000..cadf8cf2a590
--- /dev/null
+++ b/tools/perf/util/values.h
@@ -0,0 +1,27 @@
1#ifndef _PERF_VALUES_H
2#define _PERF_VALUES_H
3
4#include "types.h"
5
6struct perf_read_values {
7 int threads;
8 int threads_max;
9 u32 *pid, *tid;
10 int counters;
11 int counters_max;
12 u64 *counterrawid;
13 char **countername;
14 u64 **value;
15};
16
17void perf_read_values_init(struct perf_read_values *values);
18void perf_read_values_destroy(struct perf_read_values *values);
19
20void perf_read_values_add_value(struct perf_read_values *values,
21 u32 pid, u32 tid,
22 u64 rawid, const char *name, u64 value);
23
24void perf_read_values_display(FILE *fp, struct perf_read_values *values,
25 int raw);
26
27#endif /* _PERF_VALUES_H */